1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 #include "ice_flow.h" 8 9 #define ICE_PF_RESET_WAIT_COUNT 200 10 11 /** 12 * ice_set_mac_type - Sets MAC type 13 * @hw: pointer to the HW structure 14 * 15 * This function sets the MAC type of the adapter based on the 16 * vendor ID and device ID stored in the HW structure. 17 */ 18 static enum ice_status ice_set_mac_type(struct ice_hw *hw) 19 { 20 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 21 return ICE_ERR_DEVICE_NOT_SUPPORTED; 22 23 hw->mac_type = ICE_MAC_GENERIC; 24 return 0; 25 } 26 27 /** 28 * ice_dev_onetime_setup - Temporary HW/FW workarounds 29 * @hw: pointer to the HW structure 30 * 31 * This function provides temporary workarounds for certain issues 32 * that are expected to be fixed in the HW/FW. 33 */ 34 void ice_dev_onetime_setup(struct ice_hw *hw) 35 { 36 #define MBX_PF_VT_PFALLOC 0x00231E80 37 /* set VFs per PF */ 38 wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF)); 39 } 40 41 /** 42 * ice_clear_pf_cfg - Clear PF configuration 43 * @hw: pointer to the hardware structure 44 * 45 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 46 * configuration, flow director filters, etc.). 47 */ 48 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) 49 { 50 struct ice_aq_desc desc; 51 52 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 53 54 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 55 } 56 57 /** 58 * ice_aq_manage_mac_read - manage MAC address read command 59 * @hw: pointer to the HW struct 60 * @buf: a virtual buffer to hold the manage MAC read response 61 * @buf_size: Size of the virtual buffer 62 * @cd: pointer to command details structure or NULL 63 * 64 * This function is used to return per PF station MAC address (0x0107). 65 * NOTE: Upon successful completion of this command, MAC address information 66 * is returned in user specified buffer. Please interpret user specified 67 * buffer as "manage_mac_read" response. 68 * Response such as various MAC addresses are stored in HW struct (port.mac) 69 * ice_aq_discover_caps is expected to be called before this function is called. 70 */ 71 static enum ice_status 72 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 73 struct ice_sq_cd *cd) 74 { 75 struct ice_aqc_manage_mac_read_resp *resp; 76 struct ice_aqc_manage_mac_read *cmd; 77 struct ice_aq_desc desc; 78 enum ice_status status; 79 u16 flags; 80 u8 i; 81 82 cmd = &desc.params.mac_read; 83 84 if (buf_size < sizeof(*resp)) 85 return ICE_ERR_BUF_TOO_SHORT; 86 87 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 88 89 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 90 if (status) 91 return status; 92 93 resp = (struct ice_aqc_manage_mac_read_resp *)buf; 94 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 95 96 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 97 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 98 return ICE_ERR_CFG; 99 } 100 101 /* A single port can report up to two (LAN and WoL) addresses */ 102 for (i = 0; i < cmd->num_addr; i++) 103 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 104 ether_addr_copy(hw->port_info->mac.lan_addr, 105 resp[i].mac_addr); 106 ether_addr_copy(hw->port_info->mac.perm_addr, 107 resp[i].mac_addr); 108 break; 109 } 110 111 return 0; 112 } 113 114 /** 115 * ice_aq_get_phy_caps - returns PHY capabilities 116 * @pi: port information structure 117 * @qual_mods: report qualified modules 118 * @report_mode: report mode capabilities 119 * @pcaps: structure for PHY capabilities to be filled 120 * @cd: pointer to command details structure or NULL 121 * 122 * Returns the various PHY capabilities supported on the Port (0x0600) 123 */ 124 enum ice_status 125 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 126 struct ice_aqc_get_phy_caps_data *pcaps, 127 struct ice_sq_cd *cd) 128 { 129 struct ice_aqc_get_phy_caps *cmd; 130 u16 pcaps_size = sizeof(*pcaps); 131 struct ice_aq_desc desc; 132 enum ice_status status; 133 134 cmd = &desc.params.get_phy; 135 136 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 137 return ICE_ERR_PARAM; 138 139 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 140 141 if (qual_mods) 142 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 143 144 cmd->param0 |= cpu_to_le16(report_mode); 145 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd); 146 147 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) { 148 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 149 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 150 } 151 152 return status; 153 } 154 155 /** 156 * ice_get_media_type - Gets media type 157 * @pi: port information structure 158 */ 159 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 160 { 161 struct ice_link_status *hw_link_info; 162 163 if (!pi) 164 return ICE_MEDIA_UNKNOWN; 165 166 hw_link_info = &pi->phy.link_info; 167 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 168 /* If more than one media type is selected, report unknown */ 169 return ICE_MEDIA_UNKNOWN; 170 171 if (hw_link_info->phy_type_low) { 172 switch (hw_link_info->phy_type_low) { 173 case ICE_PHY_TYPE_LOW_1000BASE_SX: 174 case ICE_PHY_TYPE_LOW_1000BASE_LX: 175 case ICE_PHY_TYPE_LOW_10GBASE_SR: 176 case ICE_PHY_TYPE_LOW_10GBASE_LR: 177 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 178 case ICE_PHY_TYPE_LOW_25GBASE_SR: 179 case ICE_PHY_TYPE_LOW_25GBASE_LR: 180 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 181 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 182 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 183 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 184 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 185 case ICE_PHY_TYPE_LOW_50GBASE_SR: 186 case ICE_PHY_TYPE_LOW_50GBASE_FR: 187 case ICE_PHY_TYPE_LOW_50GBASE_LR: 188 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 189 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 190 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 191 case ICE_PHY_TYPE_LOW_100GBASE_DR: 192 return ICE_MEDIA_FIBER; 193 case ICE_PHY_TYPE_LOW_100BASE_TX: 194 case ICE_PHY_TYPE_LOW_1000BASE_T: 195 case ICE_PHY_TYPE_LOW_2500BASE_T: 196 case ICE_PHY_TYPE_LOW_5GBASE_T: 197 case ICE_PHY_TYPE_LOW_10GBASE_T: 198 case ICE_PHY_TYPE_LOW_25GBASE_T: 199 return ICE_MEDIA_BASET; 200 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 201 case ICE_PHY_TYPE_LOW_25GBASE_CR: 202 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 203 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 204 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 205 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 206 case ICE_PHY_TYPE_LOW_50GBASE_CP: 207 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 208 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 209 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 210 return ICE_MEDIA_DA; 211 case ICE_PHY_TYPE_LOW_1000BASE_KX: 212 case ICE_PHY_TYPE_LOW_2500BASE_KX: 213 case ICE_PHY_TYPE_LOW_2500BASE_X: 214 case ICE_PHY_TYPE_LOW_5GBASE_KR: 215 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 216 case ICE_PHY_TYPE_LOW_25GBASE_KR: 217 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 218 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 219 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 220 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 221 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 222 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 223 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 224 return ICE_MEDIA_BACKPLANE; 225 } 226 } else { 227 switch (hw_link_info->phy_type_high) { 228 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 229 return ICE_MEDIA_BACKPLANE; 230 } 231 } 232 return ICE_MEDIA_UNKNOWN; 233 } 234 235 /** 236 * ice_aq_get_link_info 237 * @pi: port information structure 238 * @ena_lse: enable/disable LinkStatusEvent reporting 239 * @link: pointer to link status structure - optional 240 * @cd: pointer to command details structure or NULL 241 * 242 * Get Link Status (0x607). Returns the link status of the adapter. 243 */ 244 enum ice_status 245 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 246 struct ice_link_status *link, struct ice_sq_cd *cd) 247 { 248 struct ice_aqc_get_link_status_data link_data = { 0 }; 249 struct ice_aqc_get_link_status *resp; 250 struct ice_link_status *li_old, *li; 251 enum ice_media_type *hw_media_type; 252 struct ice_fc_info *hw_fc_info; 253 bool tx_pause, rx_pause; 254 struct ice_aq_desc desc; 255 enum ice_status status; 256 struct ice_hw *hw; 257 u16 cmd_flags; 258 259 if (!pi) 260 return ICE_ERR_PARAM; 261 hw = pi->hw; 262 li_old = &pi->phy.link_info_old; 263 hw_media_type = &pi->phy.media_type; 264 li = &pi->phy.link_info; 265 hw_fc_info = &pi->fc; 266 267 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 268 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 269 resp = &desc.params.get_link_status; 270 resp->cmd_flags = cpu_to_le16(cmd_flags); 271 resp->lport_num = pi->lport; 272 273 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd); 274 275 if (status) 276 return status; 277 278 /* save off old link status information */ 279 *li_old = *li; 280 281 /* update current link status information */ 282 li->link_speed = le16_to_cpu(link_data.link_speed); 283 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 284 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 285 *hw_media_type = ice_get_media_type(pi); 286 li->link_info = link_data.link_info; 287 li->an_info = link_data.an_info; 288 li->ext_info = link_data.ext_info; 289 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 290 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 291 li->topo_media_conflict = link_data.topo_media_conflict; 292 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 293 ICE_AQ_CFG_PACING_TYPE_M); 294 295 /* update fc info */ 296 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 297 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 298 if (tx_pause && rx_pause) 299 hw_fc_info->current_mode = ICE_FC_FULL; 300 else if (tx_pause) 301 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 302 else if (rx_pause) 303 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 304 else 305 hw_fc_info->current_mode = ICE_FC_NONE; 306 307 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 308 309 ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed); 310 ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n", 311 (unsigned long long)li->phy_type_low); 312 ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n", 313 (unsigned long long)li->phy_type_high); 314 ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type); 315 ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info); 316 ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info); 317 ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info); 318 ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena); 319 ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size); 320 ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing); 321 322 /* save link status information */ 323 if (link) 324 *link = *li; 325 326 /* flag cleared so calling functions don't call AQ again */ 327 pi->phy.get_link_info = false; 328 329 return 0; 330 } 331 332 /** 333 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 334 * @hw: pointer to the HW struct 335 */ 336 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) 337 { 338 struct ice_switch_info *sw; 339 340 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 341 sizeof(*hw->switch_info), GFP_KERNEL); 342 sw = hw->switch_info; 343 344 if (!sw) 345 return ICE_ERR_NO_MEMORY; 346 347 INIT_LIST_HEAD(&sw->vsi_list_map_head); 348 349 return ice_init_def_sw_recp(hw); 350 } 351 352 /** 353 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 354 * @hw: pointer to the HW struct 355 */ 356 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 357 { 358 struct ice_switch_info *sw = hw->switch_info; 359 struct ice_vsi_list_map_info *v_pos_map; 360 struct ice_vsi_list_map_info *v_tmp_map; 361 struct ice_sw_recipe *recps; 362 u8 i; 363 364 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 365 list_entry) { 366 list_del(&v_pos_map->list_entry); 367 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 368 } 369 recps = hw->switch_info->recp_list; 370 for (i = 0; i < ICE_SW_LKUP_LAST; i++) { 371 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 372 373 recps[i].root_rid = i; 374 mutex_destroy(&recps[i].filt_rule_lock); 375 list_for_each_entry_safe(lst_itr, tmp_entry, 376 &recps[i].filt_rules, list_entry) { 377 list_del(&lst_itr->list_entry); 378 devm_kfree(ice_hw_to_dev(hw), lst_itr); 379 } 380 } 381 ice_rm_all_sw_replay_rule_info(hw); 382 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 383 devm_kfree(ice_hw_to_dev(hw), sw); 384 } 385 386 #define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \ 387 (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry))) 388 #define ICE_FW_LOG_DESC_SIZE_MAX \ 389 ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX) 390 391 /** 392 * ice_get_fw_log_cfg - get FW logging configuration 393 * @hw: pointer to the HW struct 394 */ 395 static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw) 396 { 397 struct ice_aqc_fw_logging_data *config; 398 struct ice_aq_desc desc; 399 enum ice_status status; 400 u16 size; 401 402 size = ICE_FW_LOG_DESC_SIZE_MAX; 403 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL); 404 if (!config) 405 return ICE_ERR_NO_MEMORY; 406 407 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); 408 409 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF); 410 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 411 412 status = ice_aq_send_cmd(hw, &desc, config, size, NULL); 413 if (!status) { 414 u16 i; 415 416 /* Save FW logging information into the HW structure */ 417 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 418 u16 v, m, flgs; 419 420 v = le16_to_cpu(config->entry[i]); 421 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 422 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S; 423 424 if (m < ICE_AQC_FW_LOG_ID_MAX) 425 hw->fw_log.evnts[m].cur = flgs; 426 } 427 } 428 429 devm_kfree(ice_hw_to_dev(hw), config); 430 431 return status; 432 } 433 434 /** 435 * ice_cfg_fw_log - configure FW logging 436 * @hw: pointer to the HW struct 437 * @enable: enable certain FW logging events if true, disable all if false 438 * 439 * This function enables/disables the FW logging via Rx CQ events and a UART 440 * port based on predetermined configurations. FW logging via the Rx CQ can be 441 * enabled/disabled for individual PF's. However, FW logging via the UART can 442 * only be enabled/disabled for all PFs on the same device. 443 * 444 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in 445 * hw->fw_log need to be set accordingly, e.g. based on user-provided input, 446 * before initializing the device. 447 * 448 * When re/configuring FW logging, callers need to update the "cfg" elements of 449 * the hw->fw_log.evnts array with the desired logging event configurations for 450 * modules of interest. When disabling FW logging completely, the callers can 451 * just pass false in the "enable" parameter. On completion, the function will 452 * update the "cur" element of the hw->fw_log.evnts array with the resulting 453 * logging event configurations of the modules that are being re/configured. FW 454 * logging modules that are not part of a reconfiguration operation retain their 455 * previous states. 456 * 457 * Before resetting the device, it is recommended that the driver disables FW 458 * logging before shutting down the control queue. When disabling FW logging 459 * ("enable" = false), the latest configurations of FW logging events stored in 460 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after 461 * a device reset. 462 * 463 * When enabling FW logging to emit log messages via the Rx CQ during the 464 * device's initialization phase, a mechanism alternative to interrupt handlers 465 * needs to be used to extract FW log messages from the Rx CQ periodically and 466 * to prevent the Rx CQ from being full and stalling other types of control 467 * messages from FW to SW. Interrupts are typically disabled during the device's 468 * initialization phase. 469 */ 470 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) 471 { 472 struct ice_aqc_fw_logging_data *data = NULL; 473 struct ice_aqc_fw_logging *cmd; 474 enum ice_status status = 0; 475 u16 i, chgs = 0, len = 0; 476 struct ice_aq_desc desc; 477 u8 actv_evnts = 0; 478 void *buf = NULL; 479 480 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) 481 return 0; 482 483 /* Disable FW logging only when the control queue is still responsive */ 484 if (!enable && 485 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) 486 return 0; 487 488 /* Get current FW log settings */ 489 status = ice_get_fw_log_cfg(hw); 490 if (status) 491 return status; 492 493 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); 494 cmd = &desc.params.fw_logging; 495 496 /* Indicate which controls are valid */ 497 if (hw->fw_log.cq_en) 498 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; 499 500 if (hw->fw_log.uart_en) 501 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; 502 503 if (enable) { 504 /* Fill in an array of entries with FW logging modules and 505 * logging events being reconfigured. 506 */ 507 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 508 u16 val; 509 510 /* Keep track of enabled event types */ 511 actv_evnts |= hw->fw_log.evnts[i].cfg; 512 513 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) 514 continue; 515 516 if (!data) { 517 data = devm_kzalloc(ice_hw_to_dev(hw), 518 ICE_FW_LOG_DESC_SIZE_MAX, 519 GFP_KERNEL); 520 if (!data) 521 return ICE_ERR_NO_MEMORY; 522 } 523 524 val = i << ICE_AQC_FW_LOG_ID_S; 525 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; 526 data->entry[chgs++] = cpu_to_le16(val); 527 } 528 529 /* Only enable FW logging if at least one module is specified. 530 * If FW logging is currently enabled but all modules are not 531 * enabled to emit log messages, disable FW logging altogether. 532 */ 533 if (actv_evnts) { 534 /* Leave if there is effectively no change */ 535 if (!chgs) 536 goto out; 537 538 if (hw->fw_log.cq_en) 539 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; 540 541 if (hw->fw_log.uart_en) 542 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; 543 544 buf = data; 545 len = ICE_FW_LOG_DESC_SIZE(chgs); 546 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 547 } 548 } 549 550 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); 551 if (!status) { 552 /* Update the current configuration to reflect events enabled. 553 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW 554 * logging mode is enabled for the device. They do not reflect 555 * actual modules being enabled to emit log messages. So, their 556 * values remain unchanged even when all modules are disabled. 557 */ 558 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; 559 560 hw->fw_log.actv_evnts = actv_evnts; 561 for (i = 0; i < cnt; i++) { 562 u16 v, m; 563 564 if (!enable) { 565 /* When disabling all FW logging events as part 566 * of device's de-initialization, the original 567 * configurations are retained, and can be used 568 * to reconfigure FW logging later if the device 569 * is re-initialized. 570 */ 571 hw->fw_log.evnts[i].cur = 0; 572 continue; 573 } 574 575 v = le16_to_cpu(data->entry[i]); 576 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 577 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; 578 } 579 } 580 581 out: 582 if (data) 583 devm_kfree(ice_hw_to_dev(hw), data); 584 585 return status; 586 } 587 588 /** 589 * ice_output_fw_log 590 * @hw: pointer to the HW struct 591 * @desc: pointer to the AQ message descriptor 592 * @buf: pointer to the buffer accompanying the AQ message 593 * 594 * Formats a FW Log message and outputs it via the standard driver logs. 595 */ 596 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) 597 { 598 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n"); 599 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf, 600 le16_to_cpu(desc->datalen)); 601 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n"); 602 } 603 604 /** 605 * ice_get_itr_intrl_gran - determine int/intrl granularity 606 * @hw: pointer to the HW struct 607 * 608 * Determines the ITR/intrl granularities based on the maximum aggregate 609 * bandwidth according to the device's configuration during power-on. 610 */ 611 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 612 { 613 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & 614 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> 615 GL_PWR_MODE_CTL_CAR_MAX_BW_S; 616 617 switch (max_agg_bw) { 618 case ICE_MAX_AGG_BW_200G: 619 case ICE_MAX_AGG_BW_100G: 620 case ICE_MAX_AGG_BW_50G: 621 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 622 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 623 break; 624 case ICE_MAX_AGG_BW_25G: 625 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 626 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 627 break; 628 } 629 } 630 631 /** 632 * ice_get_nvm_version - get cached NVM version data 633 * @hw: pointer to the hardware structure 634 * @oem_ver: 8 bit NVM version 635 * @oem_build: 16 bit NVM build number 636 * @oem_patch: 8 NVM patch number 637 * @ver_hi: high 16 bits of the NVM version 638 * @ver_lo: low 16 bits of the NVM version 639 */ 640 void 641 ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build, 642 u8 *oem_patch, u8 *ver_hi, u8 *ver_lo) 643 { 644 struct ice_nvm_info *nvm = &hw->nvm; 645 646 *oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT); 647 *oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK); 648 *oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >> 649 ICE_OEM_VER_BUILD_SHIFT); 650 *ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT; 651 *ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT; 652 } 653 654 /** 655 * ice_init_hw - main hardware initialization routine 656 * @hw: pointer to the hardware structure 657 */ 658 enum ice_status ice_init_hw(struct ice_hw *hw) 659 { 660 struct ice_aqc_get_phy_caps_data *pcaps; 661 enum ice_status status; 662 u16 mac_buf_len; 663 void *mac_buf; 664 665 /* Set MAC type based on DeviceID */ 666 status = ice_set_mac_type(hw); 667 if (status) 668 return status; 669 670 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & 671 PF_FUNC_RID_FUNC_NUM_M) >> 672 PF_FUNC_RID_FUNC_NUM_S; 673 674 status = ice_reset(hw, ICE_RESET_PFR); 675 if (status) 676 return status; 677 678 ice_get_itr_intrl_gran(hw); 679 680 status = ice_create_all_ctrlq(hw); 681 if (status) 682 goto err_unroll_cqinit; 683 684 /* Enable FW logging. Not fatal if this fails. */ 685 status = ice_cfg_fw_log(hw, true); 686 if (status) 687 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); 688 689 status = ice_clear_pf_cfg(hw); 690 if (status) 691 goto err_unroll_cqinit; 692 693 ice_clear_pxe_mode(hw); 694 695 status = ice_init_nvm(hw); 696 if (status) 697 goto err_unroll_cqinit; 698 699 status = ice_get_caps(hw); 700 if (status) 701 goto err_unroll_cqinit; 702 703 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 704 sizeof(*hw->port_info), GFP_KERNEL); 705 if (!hw->port_info) { 706 status = ICE_ERR_NO_MEMORY; 707 goto err_unroll_cqinit; 708 } 709 710 /* set the back pointer to HW */ 711 hw->port_info->hw = hw; 712 713 /* Initialize port_info struct with switch configuration data */ 714 status = ice_get_initial_sw_cfg(hw); 715 if (status) 716 goto err_unroll_alloc; 717 718 hw->evb_veb = true; 719 720 /* Query the allocated resources for Tx scheduler */ 721 status = ice_sched_query_res_alloc(hw); 722 if (status) { 723 ice_debug(hw, ICE_DBG_SCHED, 724 "Failed to get scheduler allocated resources\n"); 725 goto err_unroll_alloc; 726 } 727 728 /* Initialize port_info struct with scheduler data */ 729 status = ice_sched_init_port(hw->port_info); 730 if (status) 731 goto err_unroll_sched; 732 733 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 734 if (!pcaps) { 735 status = ICE_ERR_NO_MEMORY; 736 goto err_unroll_sched; 737 } 738 739 /* Initialize port_info struct with PHY capabilities */ 740 status = ice_aq_get_phy_caps(hw->port_info, false, 741 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); 742 devm_kfree(ice_hw_to_dev(hw), pcaps); 743 if (status) 744 goto err_unroll_sched; 745 746 /* Initialize port_info struct with link information */ 747 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 748 if (status) 749 goto err_unroll_sched; 750 751 /* need a valid SW entry point to build a Tx tree */ 752 if (!hw->sw_entry_point_layer) { 753 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 754 status = ICE_ERR_CFG; 755 goto err_unroll_sched; 756 } 757 INIT_LIST_HEAD(&hw->agg_list); 758 /* Initialize max burst size */ 759 if (!hw->max_burst_size) 760 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 761 762 status = ice_init_fltr_mgmt_struct(hw); 763 if (status) 764 goto err_unroll_sched; 765 766 ice_dev_onetime_setup(hw); 767 768 /* Get MAC information */ 769 /* A single port can report up to two (LAN and WoL) addresses */ 770 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, 771 sizeof(struct ice_aqc_manage_mac_read_resp), 772 GFP_KERNEL); 773 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 774 775 if (!mac_buf) { 776 status = ICE_ERR_NO_MEMORY; 777 goto err_unroll_fltr_mgmt_struct; 778 } 779 780 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 781 devm_kfree(ice_hw_to_dev(hw), mac_buf); 782 783 if (status) 784 goto err_unroll_fltr_mgmt_struct; 785 status = ice_init_hw_tbls(hw); 786 if (status) 787 goto err_unroll_fltr_mgmt_struct; 788 return 0; 789 790 err_unroll_fltr_mgmt_struct: 791 ice_cleanup_fltr_mgmt_struct(hw); 792 err_unroll_sched: 793 ice_sched_cleanup_all(hw); 794 err_unroll_alloc: 795 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 796 err_unroll_cqinit: 797 ice_destroy_all_ctrlq(hw); 798 return status; 799 } 800 801 /** 802 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 803 * @hw: pointer to the hardware structure 804 * 805 * This should be called only during nominal operation, not as a result of 806 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 807 * applicable initializations if it fails for any reason. 808 */ 809 void ice_deinit_hw(struct ice_hw *hw) 810 { 811 ice_cleanup_fltr_mgmt_struct(hw); 812 813 ice_sched_cleanup_all(hw); 814 ice_sched_clear_agg(hw); 815 ice_free_seg(hw); 816 ice_free_hw_tbls(hw); 817 818 if (hw->port_info) { 819 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 820 hw->port_info = NULL; 821 } 822 823 /* Attempt to disable FW logging before shutting down control queues */ 824 ice_cfg_fw_log(hw, false); 825 ice_destroy_all_ctrlq(hw); 826 827 /* Clear VSI contexts if not already cleared */ 828 ice_clear_all_vsi_ctx(hw); 829 } 830 831 /** 832 * ice_check_reset - Check to see if a global reset is complete 833 * @hw: pointer to the hardware structure 834 */ 835 enum ice_status ice_check_reset(struct ice_hw *hw) 836 { 837 u32 cnt, reg = 0, grst_delay; 838 839 /* Poll for Device Active state in case a recent CORER, GLOBR, 840 * or EMPR has occurred. The grst delay value is in 100ms units. 841 * Add 1sec for outstanding AQ commands that can take a long time. 842 */ 843 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> 844 GLGEN_RSTCTL_GRSTDEL_S) + 10; 845 846 for (cnt = 0; cnt < grst_delay; cnt++) { 847 mdelay(100); 848 reg = rd32(hw, GLGEN_RSTAT); 849 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 850 break; 851 } 852 853 if (cnt == grst_delay) { 854 ice_debug(hw, ICE_DBG_INIT, 855 "Global reset polling failed to complete.\n"); 856 return ICE_ERR_RESET_FAILED; 857 } 858 859 #define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \ 860 GLNVM_ULD_GLOBR_DONE_M) 861 862 /* Device is Active; check Global Reset processes are done */ 863 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 864 reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK; 865 if (reg == ICE_RESET_DONE_MASK) { 866 ice_debug(hw, ICE_DBG_INIT, 867 "Global reset processes done. %d\n", cnt); 868 break; 869 } 870 mdelay(10); 871 } 872 873 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 874 ice_debug(hw, ICE_DBG_INIT, 875 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 876 reg); 877 return ICE_ERR_RESET_FAILED; 878 } 879 880 return 0; 881 } 882 883 /** 884 * ice_pf_reset - Reset the PF 885 * @hw: pointer to the hardware structure 886 * 887 * If a global reset has been triggered, this function checks 888 * for its completion and then issues the PF reset 889 */ 890 static enum ice_status ice_pf_reset(struct ice_hw *hw) 891 { 892 u32 cnt, reg; 893 894 /* If at function entry a global reset was already in progress, i.e. 895 * state is not 'device active' or any of the reset done bits are not 896 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 897 * global reset is done. 898 */ 899 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 900 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 901 /* poll on global reset currently in progress until done */ 902 if (ice_check_reset(hw)) 903 return ICE_ERR_RESET_FAILED; 904 905 return 0; 906 } 907 908 /* Reset the PF */ 909 reg = rd32(hw, PFGEN_CTRL); 910 911 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 912 913 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 914 reg = rd32(hw, PFGEN_CTRL); 915 if (!(reg & PFGEN_CTRL_PFSWR_M)) 916 break; 917 918 mdelay(1); 919 } 920 921 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 922 ice_debug(hw, ICE_DBG_INIT, 923 "PF reset polling failed to complete.\n"); 924 return ICE_ERR_RESET_FAILED; 925 } 926 927 return 0; 928 } 929 930 /** 931 * ice_reset - Perform different types of reset 932 * @hw: pointer to the hardware structure 933 * @req: reset request 934 * 935 * This function triggers a reset as specified by the req parameter. 936 * 937 * Note: 938 * If anything other than a PF reset is triggered, PXE mode is restored. 939 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 940 * interface has been restored in the rebuild flow. 941 */ 942 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) 943 { 944 u32 val = 0; 945 946 switch (req) { 947 case ICE_RESET_PFR: 948 return ice_pf_reset(hw); 949 case ICE_RESET_CORER: 950 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 951 val = GLGEN_RTRIG_CORER_M; 952 break; 953 case ICE_RESET_GLOBR: 954 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 955 val = GLGEN_RTRIG_GLOBR_M; 956 break; 957 default: 958 return ICE_ERR_PARAM; 959 } 960 961 val |= rd32(hw, GLGEN_RTRIG); 962 wr32(hw, GLGEN_RTRIG, val); 963 ice_flush(hw); 964 965 /* wait for the FW to be ready */ 966 return ice_check_reset(hw); 967 } 968 969 /** 970 * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA 971 * @hw: pointer to hardware structure 972 * @module_tlv: pointer to module TLV to return 973 * @module_tlv_len: pointer to module TLV length to return 974 * @module_type: module type requested 975 * 976 * Finds the requested sub module TLV type from the Preserved Field 977 * Area (PFA) and returns the TLV pointer and length. The caller can 978 * use these to read the variable length TLV value. 979 */ 980 enum ice_status 981 ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, 982 u16 module_type) 983 { 984 enum ice_status status; 985 u16 pfa_len, pfa_ptr; 986 u16 next_tlv; 987 988 status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); 989 if (status) { 990 ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n"); 991 return status; 992 } 993 status = ice_read_sr_word(hw, pfa_ptr, &pfa_len); 994 if (status) { 995 ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n"); 996 return status; 997 } 998 /* Starting with first TLV after PFA length, iterate through the list 999 * of TLVs to find the requested one. 1000 */ 1001 next_tlv = pfa_ptr + 1; 1002 while (next_tlv < pfa_ptr + pfa_len) { 1003 u16 tlv_sub_module_type; 1004 u16 tlv_len; 1005 1006 /* Read TLV type */ 1007 status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type); 1008 if (status) { 1009 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n"); 1010 break; 1011 } 1012 /* Read TLV length */ 1013 status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len); 1014 if (status) { 1015 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n"); 1016 break; 1017 } 1018 if (tlv_sub_module_type == module_type) { 1019 if (tlv_len) { 1020 *module_tlv = next_tlv; 1021 *module_tlv_len = tlv_len; 1022 return 0; 1023 } 1024 return ICE_ERR_INVAL_SIZE; 1025 } 1026 /* Check next TLV, i.e. current TLV pointer + length + 2 words 1027 * (for current TLV's type and length) 1028 */ 1029 next_tlv = next_tlv + tlv_len + 2; 1030 } 1031 /* Module does not exist */ 1032 return ICE_ERR_DOES_NOT_EXIST; 1033 } 1034 1035 /** 1036 * ice_copy_rxq_ctx_to_hw 1037 * @hw: pointer to the hardware structure 1038 * @ice_rxq_ctx: pointer to the rxq context 1039 * @rxq_index: the index of the Rx queue 1040 * 1041 * Copies rxq context from dense structure to HW register space 1042 */ 1043 static enum ice_status 1044 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1045 { 1046 u8 i; 1047 1048 if (!ice_rxq_ctx) 1049 return ICE_ERR_BAD_PTR; 1050 1051 if (rxq_index > QRX_CTRL_MAX_INDEX) 1052 return ICE_ERR_PARAM; 1053 1054 /* Copy each dword separately to HW */ 1055 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1056 wr32(hw, QRX_CONTEXT(i, rxq_index), 1057 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1058 1059 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 1060 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1061 } 1062 1063 return 0; 1064 } 1065 1066 /* LAN Rx Queue Context */ 1067 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 1068 /* Field Width LSB */ 1069 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1070 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1071 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1072 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1073 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1074 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1075 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1076 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1077 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1078 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1079 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1080 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1081 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1082 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1083 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1084 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1085 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1086 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1087 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1088 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1089 { 0 } 1090 }; 1091 1092 /** 1093 * ice_write_rxq_ctx 1094 * @hw: pointer to the hardware structure 1095 * @rlan_ctx: pointer to the rxq context 1096 * @rxq_index: the index of the Rx queue 1097 * 1098 * Converts rxq context from sparse to dense structure and then writes 1099 * it to HW register space and enables the hardware to prefetch descriptors 1100 * instead of only fetching them on demand 1101 */ 1102 enum ice_status 1103 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1104 u32 rxq_index) 1105 { 1106 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1107 1108 if (!rlan_ctx) 1109 return ICE_ERR_BAD_PTR; 1110 1111 rlan_ctx->prefena = 1; 1112 1113 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1114 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1115 } 1116 1117 /* LAN Tx Queue Context */ 1118 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1119 /* Field Width LSB */ 1120 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1121 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1122 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1123 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1124 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1125 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1126 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1127 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1128 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1129 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1130 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1131 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1132 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1133 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1134 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1135 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1136 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1137 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1138 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1139 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1140 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1141 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1142 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1143 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1144 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1145 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1146 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1147 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), 1148 { 0 } 1149 }; 1150 1151 /* FW Admin Queue command wrappers */ 1152 1153 /* Software lock/mutex that is meant to be held while the Global Config Lock 1154 * in firmware is acquired by the software to prevent most (but not all) types 1155 * of AQ commands from being sent to FW 1156 */ 1157 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1158 1159 /** 1160 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1161 * @hw: pointer to the HW struct 1162 * @desc: descriptor describing the command 1163 * @buf: buffer to use for indirect commands (NULL for direct commands) 1164 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1165 * @cd: pointer to command details structure 1166 * 1167 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1168 */ 1169 enum ice_status 1170 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1171 u16 buf_size, struct ice_sq_cd *cd) 1172 { 1173 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1174 bool lock_acquired = false; 1175 enum ice_status status; 1176 1177 /* When a package download is in process (i.e. when the firmware's 1178 * Global Configuration Lock resource is held), only the Download 1179 * Package, Get Version, Get Package Info List and Release Resource 1180 * (with resource ID set to Global Config Lock) AdminQ commands are 1181 * allowed; all others must block until the package download completes 1182 * and the Global Config Lock is released. See also 1183 * ice_acquire_global_cfg_lock(). 1184 */ 1185 switch (le16_to_cpu(desc->opcode)) { 1186 case ice_aqc_opc_download_pkg: 1187 case ice_aqc_opc_get_pkg_info_list: 1188 case ice_aqc_opc_get_ver: 1189 break; 1190 case ice_aqc_opc_release_res: 1191 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1192 break; 1193 /* fall-through */ 1194 default: 1195 mutex_lock(&ice_global_cfg_lock_sw); 1196 lock_acquired = true; 1197 break; 1198 } 1199 1200 status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); 1201 if (lock_acquired) 1202 mutex_unlock(&ice_global_cfg_lock_sw); 1203 1204 return status; 1205 } 1206 1207 /** 1208 * ice_aq_get_fw_ver 1209 * @hw: pointer to the HW struct 1210 * @cd: pointer to command details structure or NULL 1211 * 1212 * Get the firmware version (0x0001) from the admin queue commands 1213 */ 1214 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1215 { 1216 struct ice_aqc_get_ver *resp; 1217 struct ice_aq_desc desc; 1218 enum ice_status status; 1219 1220 resp = &desc.params.get_ver; 1221 1222 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1223 1224 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1225 1226 if (!status) { 1227 hw->fw_branch = resp->fw_branch; 1228 hw->fw_maj_ver = resp->fw_major; 1229 hw->fw_min_ver = resp->fw_minor; 1230 hw->fw_patch = resp->fw_patch; 1231 hw->fw_build = le32_to_cpu(resp->fw_build); 1232 hw->api_branch = resp->api_branch; 1233 hw->api_maj_ver = resp->api_major; 1234 hw->api_min_ver = resp->api_minor; 1235 hw->api_patch = resp->api_patch; 1236 } 1237 1238 return status; 1239 } 1240 1241 /** 1242 * ice_aq_send_driver_ver 1243 * @hw: pointer to the HW struct 1244 * @dv: driver's major, minor version 1245 * @cd: pointer to command details structure or NULL 1246 * 1247 * Send the driver version (0x0002) to the firmware 1248 */ 1249 enum ice_status 1250 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1251 struct ice_sq_cd *cd) 1252 { 1253 struct ice_aqc_driver_ver *cmd; 1254 struct ice_aq_desc desc; 1255 u16 len; 1256 1257 cmd = &desc.params.driver_ver; 1258 1259 if (!dv) 1260 return ICE_ERR_PARAM; 1261 1262 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1263 1264 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1265 cmd->major_ver = dv->major_ver; 1266 cmd->minor_ver = dv->minor_ver; 1267 cmd->build_ver = dv->build_ver; 1268 cmd->subbuild_ver = dv->subbuild_ver; 1269 1270 len = 0; 1271 while (len < sizeof(dv->driver_string) && 1272 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1273 len++; 1274 1275 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1276 } 1277 1278 /** 1279 * ice_aq_q_shutdown 1280 * @hw: pointer to the HW struct 1281 * @unloading: is the driver unloading itself 1282 * 1283 * Tell the Firmware that we're shutting down the AdminQ and whether 1284 * or not the driver is unloading as well (0x0003). 1285 */ 1286 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1287 { 1288 struct ice_aqc_q_shutdown *cmd; 1289 struct ice_aq_desc desc; 1290 1291 cmd = &desc.params.q_shutdown; 1292 1293 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1294 1295 if (unloading) 1296 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1297 1298 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1299 } 1300 1301 /** 1302 * ice_aq_req_res 1303 * @hw: pointer to the HW struct 1304 * @res: resource ID 1305 * @access: access type 1306 * @sdp_number: resource number 1307 * @timeout: the maximum time in ms that the driver may hold the resource 1308 * @cd: pointer to command details structure or NULL 1309 * 1310 * Requests common resource using the admin queue commands (0x0008). 1311 * When attempting to acquire the Global Config Lock, the driver can 1312 * learn of three states: 1313 * 1) ICE_SUCCESS - acquired lock, and can perform download package 1314 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load 1315 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has 1316 * successfully downloaded the package; the driver does 1317 * not have to download the package and can continue 1318 * loading 1319 * 1320 * Note that if the caller is in an acquire lock, perform action, release lock 1321 * phase of operation, it is possible that the FW may detect a timeout and issue 1322 * a CORER. In this case, the driver will receive a CORER interrupt and will 1323 * have to determine its cause. The calling thread that is handling this flow 1324 * will likely get an error propagated back to it indicating the Download 1325 * Package, Update Package or the Release Resource AQ commands timed out. 1326 */ 1327 static enum ice_status 1328 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1329 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1330 struct ice_sq_cd *cd) 1331 { 1332 struct ice_aqc_req_res *cmd_resp; 1333 struct ice_aq_desc desc; 1334 enum ice_status status; 1335 1336 cmd_resp = &desc.params.res_owner; 1337 1338 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1339 1340 cmd_resp->res_id = cpu_to_le16(res); 1341 cmd_resp->access_type = cpu_to_le16(access); 1342 cmd_resp->res_number = cpu_to_le32(sdp_number); 1343 cmd_resp->timeout = cpu_to_le32(*timeout); 1344 *timeout = 0; 1345 1346 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1347 1348 /* The completion specifies the maximum time in ms that the driver 1349 * may hold the resource in the Timeout field. 1350 */ 1351 1352 /* Global config lock response utilizes an additional status field. 1353 * 1354 * If the Global config lock resource is held by some other driver, the 1355 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1356 * and the timeout field indicates the maximum time the current owner 1357 * of the resource has to free it. 1358 */ 1359 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1360 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1361 *timeout = le32_to_cpu(cmd_resp->timeout); 1362 return 0; 1363 } else if (le16_to_cpu(cmd_resp->status) == 1364 ICE_AQ_RES_GLBL_IN_PROG) { 1365 *timeout = le32_to_cpu(cmd_resp->timeout); 1366 return ICE_ERR_AQ_ERROR; 1367 } else if (le16_to_cpu(cmd_resp->status) == 1368 ICE_AQ_RES_GLBL_DONE) { 1369 return ICE_ERR_AQ_NO_WORK; 1370 } 1371 1372 /* invalid FW response, force a timeout immediately */ 1373 *timeout = 0; 1374 return ICE_ERR_AQ_ERROR; 1375 } 1376 1377 /* If the resource is held by some other driver, the command completes 1378 * with a busy return value and the timeout field indicates the maximum 1379 * time the current owner of the resource has to free it. 1380 */ 1381 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1382 *timeout = le32_to_cpu(cmd_resp->timeout); 1383 1384 return status; 1385 } 1386 1387 /** 1388 * ice_aq_release_res 1389 * @hw: pointer to the HW struct 1390 * @res: resource ID 1391 * @sdp_number: resource number 1392 * @cd: pointer to command details structure or NULL 1393 * 1394 * release common resource using the admin queue commands (0x0009) 1395 */ 1396 static enum ice_status 1397 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1398 struct ice_sq_cd *cd) 1399 { 1400 struct ice_aqc_req_res *cmd; 1401 struct ice_aq_desc desc; 1402 1403 cmd = &desc.params.res_owner; 1404 1405 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1406 1407 cmd->res_id = cpu_to_le16(res); 1408 cmd->res_number = cpu_to_le32(sdp_number); 1409 1410 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1411 } 1412 1413 /** 1414 * ice_acquire_res 1415 * @hw: pointer to the HW structure 1416 * @res: resource ID 1417 * @access: access type (read or write) 1418 * @timeout: timeout in milliseconds 1419 * 1420 * This function will attempt to acquire the ownership of a resource. 1421 */ 1422 enum ice_status 1423 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1424 enum ice_aq_res_access_type access, u32 timeout) 1425 { 1426 #define ICE_RES_POLLING_DELAY_MS 10 1427 u32 delay = ICE_RES_POLLING_DELAY_MS; 1428 u32 time_left = timeout; 1429 enum ice_status status; 1430 1431 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1432 1433 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has 1434 * previously acquired the resource and performed any necessary updates; 1435 * in this case the caller does not obtain the resource and has no 1436 * further work to do. 1437 */ 1438 if (status == ICE_ERR_AQ_NO_WORK) 1439 goto ice_acquire_res_exit; 1440 1441 if (status) 1442 ice_debug(hw, ICE_DBG_RES, 1443 "resource %d acquire type %d failed.\n", res, access); 1444 1445 /* If necessary, poll until the current lock owner timeouts */ 1446 timeout = time_left; 1447 while (status && timeout && time_left) { 1448 mdelay(delay); 1449 timeout = (timeout > delay) ? timeout - delay : 0; 1450 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1451 1452 if (status == ICE_ERR_AQ_NO_WORK) 1453 /* lock free, but no work to do */ 1454 break; 1455 1456 if (!status) 1457 /* lock acquired */ 1458 break; 1459 } 1460 if (status && status != ICE_ERR_AQ_NO_WORK) 1461 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1462 1463 ice_acquire_res_exit: 1464 if (status == ICE_ERR_AQ_NO_WORK) { 1465 if (access == ICE_RES_WRITE) 1466 ice_debug(hw, ICE_DBG_RES, 1467 "resource indicates no work to do.\n"); 1468 else 1469 ice_debug(hw, ICE_DBG_RES, 1470 "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); 1471 } 1472 return status; 1473 } 1474 1475 /** 1476 * ice_release_res 1477 * @hw: pointer to the HW structure 1478 * @res: resource ID 1479 * 1480 * This function will release a resource using the proper Admin Command. 1481 */ 1482 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1483 { 1484 enum ice_status status; 1485 u32 total_delay = 0; 1486 1487 status = ice_aq_release_res(hw, res, 0, NULL); 1488 1489 /* there are some rare cases when trying to release the resource 1490 * results in an admin queue timeout, so handle them correctly 1491 */ 1492 while ((status == ICE_ERR_AQ_TIMEOUT) && 1493 (total_delay < hw->adminq.sq_cmd_timeout)) { 1494 mdelay(1); 1495 status = ice_aq_release_res(hw, res, 0, NULL); 1496 total_delay++; 1497 } 1498 } 1499 1500 /** 1501 * ice_aq_alloc_free_res - command to allocate/free resources 1502 * @hw: pointer to the HW struct 1503 * @num_entries: number of resource entries in buffer 1504 * @buf: Indirect buffer to hold data parameters and response 1505 * @buf_size: size of buffer for indirect commands 1506 * @opc: pass in the command opcode 1507 * @cd: pointer to command details structure or NULL 1508 * 1509 * Helper function to allocate/free resources using the admin queue commands 1510 */ 1511 enum ice_status 1512 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, 1513 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 1514 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 1515 { 1516 struct ice_aqc_alloc_free_res_cmd *cmd; 1517 struct ice_aq_desc desc; 1518 1519 cmd = &desc.params.sw_res_ctrl; 1520 1521 if (!buf) 1522 return ICE_ERR_PARAM; 1523 1524 if (buf_size < (num_entries * sizeof(buf->elem[0]))) 1525 return ICE_ERR_PARAM; 1526 1527 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1528 1529 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1530 1531 cmd->num_entries = cpu_to_le16(num_entries); 1532 1533 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 1534 } 1535 1536 /** 1537 * ice_alloc_hw_res - allocate resource 1538 * @hw: pointer to the HW struct 1539 * @type: type of resource 1540 * @num: number of resources to allocate 1541 * @btm: allocate from bottom 1542 * @res: pointer to array that will receive the resources 1543 */ 1544 enum ice_status 1545 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 1546 { 1547 struct ice_aqc_alloc_free_res_elem *buf; 1548 enum ice_status status; 1549 u16 buf_len; 1550 1551 buf_len = struct_size(buf, elem, num - 1); 1552 buf = kzalloc(buf_len, GFP_KERNEL); 1553 if (!buf) 1554 return ICE_ERR_NO_MEMORY; 1555 1556 /* Prepare buffer to allocate resource. */ 1557 buf->num_elems = cpu_to_le16(num); 1558 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 1559 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 1560 if (btm) 1561 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 1562 1563 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 1564 ice_aqc_opc_alloc_res, NULL); 1565 if (status) 1566 goto ice_alloc_res_exit; 1567 1568 memcpy(res, buf->elem, sizeof(buf->elem) * num); 1569 1570 ice_alloc_res_exit: 1571 kfree(buf); 1572 return status; 1573 } 1574 1575 /** 1576 * ice_free_hw_res - free allocated HW resource 1577 * @hw: pointer to the HW struct 1578 * @type: type of resource to free 1579 * @num: number of resources 1580 * @res: pointer to array that contains the resources to free 1581 */ 1582 enum ice_status 1583 ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 1584 { 1585 struct ice_aqc_alloc_free_res_elem *buf; 1586 enum ice_status status; 1587 u16 buf_len; 1588 1589 buf_len = struct_size(buf, elem, num - 1); 1590 buf = kzalloc(buf_len, GFP_KERNEL); 1591 if (!buf) 1592 return ICE_ERR_NO_MEMORY; 1593 1594 /* Prepare buffer to free resource. */ 1595 buf->num_elems = cpu_to_le16(num); 1596 buf->res_type = cpu_to_le16(type); 1597 memcpy(buf->elem, res, sizeof(buf->elem) * num); 1598 1599 status = ice_aq_alloc_free_res(hw, num, buf, buf_len, 1600 ice_aqc_opc_free_res, NULL); 1601 if (status) 1602 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 1603 1604 kfree(buf); 1605 return status; 1606 } 1607 1608 /** 1609 * ice_get_num_per_func - determine number of resources per PF 1610 * @hw: pointer to the HW structure 1611 * @max: value to be evenly split between each PF 1612 * 1613 * Determine the number of valid functions by going through the bitmap returned 1614 * from parsing capabilities and use this to calculate the number of resources 1615 * per PF based on the max value passed in. 1616 */ 1617 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 1618 { 1619 u8 funcs; 1620 1621 #define ICE_CAPS_VALID_FUNCS_M 0xFF 1622 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 1623 ICE_CAPS_VALID_FUNCS_M); 1624 1625 if (!funcs) 1626 return 0; 1627 1628 return max / funcs; 1629 } 1630 1631 /** 1632 * ice_parse_caps - parse function/device capabilities 1633 * @hw: pointer to the HW struct 1634 * @buf: pointer to a buffer containing function/device capability records 1635 * @cap_count: number of capability records in the list 1636 * @opc: type of capabilities list to parse 1637 * 1638 * Helper function to parse function(0x000a)/device(0x000b) capabilities list. 1639 */ 1640 static void 1641 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, 1642 enum ice_adminq_opc opc) 1643 { 1644 struct ice_aqc_list_caps_elem *cap_resp; 1645 struct ice_hw_func_caps *func_p = NULL; 1646 struct ice_hw_dev_caps *dev_p = NULL; 1647 struct ice_hw_common_caps *caps; 1648 char const *prefix; 1649 u32 i; 1650 1651 if (!buf) 1652 return; 1653 1654 cap_resp = (struct ice_aqc_list_caps_elem *)buf; 1655 1656 if (opc == ice_aqc_opc_list_dev_caps) { 1657 dev_p = &hw->dev_caps; 1658 caps = &dev_p->common_cap; 1659 prefix = "dev cap"; 1660 } else if (opc == ice_aqc_opc_list_func_caps) { 1661 func_p = &hw->func_caps; 1662 caps = &func_p->common_cap; 1663 prefix = "func cap"; 1664 } else { 1665 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n"); 1666 return; 1667 } 1668 1669 for (i = 0; caps && i < cap_count; i++, cap_resp++) { 1670 u32 logical_id = le32_to_cpu(cap_resp->logical_id); 1671 u32 phys_id = le32_to_cpu(cap_resp->phys_id); 1672 u32 number = le32_to_cpu(cap_resp->number); 1673 u16 cap = le16_to_cpu(cap_resp->cap); 1674 1675 switch (cap) { 1676 case ICE_AQC_CAPS_VALID_FUNCTIONS: 1677 caps->valid_functions = number; 1678 ice_debug(hw, ICE_DBG_INIT, 1679 "%s: valid_functions (bitmap) = %d\n", prefix, 1680 caps->valid_functions); 1681 1682 /* store func count for resource management purposes */ 1683 if (dev_p) 1684 dev_p->num_funcs = hweight32(number); 1685 break; 1686 case ICE_AQC_CAPS_SRIOV: 1687 caps->sr_iov_1_1 = (number == 1); 1688 ice_debug(hw, ICE_DBG_INIT, 1689 "%s: sr_iov_1_1 = %d\n", prefix, 1690 caps->sr_iov_1_1); 1691 break; 1692 case ICE_AQC_CAPS_VF: 1693 if (dev_p) { 1694 dev_p->num_vfs_exposed = number; 1695 ice_debug(hw, ICE_DBG_INIT, 1696 "%s: num_vfs_exposed = %d\n", prefix, 1697 dev_p->num_vfs_exposed); 1698 } else if (func_p) { 1699 func_p->num_allocd_vfs = number; 1700 func_p->vf_base_id = logical_id; 1701 ice_debug(hw, ICE_DBG_INIT, 1702 "%s: num_allocd_vfs = %d\n", prefix, 1703 func_p->num_allocd_vfs); 1704 ice_debug(hw, ICE_DBG_INIT, 1705 "%s: vf_base_id = %d\n", prefix, 1706 func_p->vf_base_id); 1707 } 1708 break; 1709 case ICE_AQC_CAPS_VSI: 1710 if (dev_p) { 1711 dev_p->num_vsi_allocd_to_host = number; 1712 ice_debug(hw, ICE_DBG_INIT, 1713 "%s: num_vsi_allocd_to_host = %d\n", 1714 prefix, 1715 dev_p->num_vsi_allocd_to_host); 1716 } else if (func_p) { 1717 func_p->guar_num_vsi = 1718 ice_get_num_per_func(hw, ICE_MAX_VSI); 1719 ice_debug(hw, ICE_DBG_INIT, 1720 "%s: guar_num_vsi (fw) = %d\n", 1721 prefix, number); 1722 ice_debug(hw, ICE_DBG_INIT, 1723 "%s: guar_num_vsi = %d\n", 1724 prefix, func_p->guar_num_vsi); 1725 } 1726 break; 1727 case ICE_AQC_CAPS_DCB: 1728 caps->dcb = (number == 1); 1729 caps->active_tc_bitmap = logical_id; 1730 caps->maxtc = phys_id; 1731 ice_debug(hw, ICE_DBG_INIT, 1732 "%s: dcb = %d\n", prefix, caps->dcb); 1733 ice_debug(hw, ICE_DBG_INIT, 1734 "%s: active_tc_bitmap = %d\n", prefix, 1735 caps->active_tc_bitmap); 1736 ice_debug(hw, ICE_DBG_INIT, 1737 "%s: maxtc = %d\n", prefix, caps->maxtc); 1738 break; 1739 case ICE_AQC_CAPS_RSS: 1740 caps->rss_table_size = number; 1741 caps->rss_table_entry_width = logical_id; 1742 ice_debug(hw, ICE_DBG_INIT, 1743 "%s: rss_table_size = %d\n", prefix, 1744 caps->rss_table_size); 1745 ice_debug(hw, ICE_DBG_INIT, 1746 "%s: rss_table_entry_width = %d\n", prefix, 1747 caps->rss_table_entry_width); 1748 break; 1749 case ICE_AQC_CAPS_RXQS: 1750 caps->num_rxq = number; 1751 caps->rxq_first_id = phys_id; 1752 ice_debug(hw, ICE_DBG_INIT, 1753 "%s: num_rxq = %d\n", prefix, 1754 caps->num_rxq); 1755 ice_debug(hw, ICE_DBG_INIT, 1756 "%s: rxq_first_id = %d\n", prefix, 1757 caps->rxq_first_id); 1758 break; 1759 case ICE_AQC_CAPS_TXQS: 1760 caps->num_txq = number; 1761 caps->txq_first_id = phys_id; 1762 ice_debug(hw, ICE_DBG_INIT, 1763 "%s: num_txq = %d\n", prefix, 1764 caps->num_txq); 1765 ice_debug(hw, ICE_DBG_INIT, 1766 "%s: txq_first_id = %d\n", prefix, 1767 caps->txq_first_id); 1768 break; 1769 case ICE_AQC_CAPS_MSIX: 1770 caps->num_msix_vectors = number; 1771 caps->msix_vector_first_id = phys_id; 1772 ice_debug(hw, ICE_DBG_INIT, 1773 "%s: num_msix_vectors = %d\n", prefix, 1774 caps->num_msix_vectors); 1775 ice_debug(hw, ICE_DBG_INIT, 1776 "%s: msix_vector_first_id = %d\n", prefix, 1777 caps->msix_vector_first_id); 1778 break; 1779 case ICE_AQC_CAPS_MAX_MTU: 1780 caps->max_mtu = number; 1781 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 1782 prefix, caps->max_mtu); 1783 break; 1784 default: 1785 ice_debug(hw, ICE_DBG_INIT, 1786 "%s: unknown capability[%d]: 0x%x\n", prefix, 1787 i, cap); 1788 break; 1789 } 1790 } 1791 1792 /* Re-calculate capabilities that are dependent on the number of 1793 * physical ports; i.e. some features are not supported or function 1794 * differently on devices with more than 4 ports. 1795 */ 1796 if (hw->dev_caps.num_funcs > 4) { 1797 /* Max 4 TCs per port */ 1798 caps->maxtc = 4; 1799 ice_debug(hw, ICE_DBG_INIT, 1800 "%s: maxtc = %d (based on #ports)\n", prefix, 1801 caps->maxtc); 1802 } 1803 } 1804 1805 /** 1806 * ice_aq_discover_caps - query function/device capabilities 1807 * @hw: pointer to the HW struct 1808 * @buf: a virtual buffer to hold the capabilities 1809 * @buf_size: Size of the virtual buffer 1810 * @cap_count: cap count needed if AQ err==ENOMEM 1811 * @opc: capabilities type to discover - pass in the command opcode 1812 * @cd: pointer to command details structure or NULL 1813 * 1814 * Get the function(0x000a)/device(0x000b) capabilities description from 1815 * the firmware. 1816 */ 1817 static enum ice_status 1818 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 1819 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 1820 { 1821 struct ice_aqc_list_caps *cmd; 1822 struct ice_aq_desc desc; 1823 enum ice_status status; 1824 1825 cmd = &desc.params.get_cap; 1826 1827 if (opc != ice_aqc_opc_list_func_caps && 1828 opc != ice_aqc_opc_list_dev_caps) 1829 return ICE_ERR_PARAM; 1830 1831 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1832 1833 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 1834 if (!status) 1835 ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc); 1836 else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM) 1837 *cap_count = le32_to_cpu(cmd->count); 1838 return status; 1839 } 1840 1841 /** 1842 * ice_discover_caps - get info about the HW 1843 * @hw: pointer to the hardware structure 1844 * @opc: capabilities type to discover - pass in the command opcode 1845 */ 1846 static enum ice_status 1847 ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc) 1848 { 1849 enum ice_status status; 1850 u32 cap_count; 1851 u16 cbuf_len; 1852 u8 retries; 1853 1854 /* The driver doesn't know how many capabilities the device will return 1855 * so the buffer size required isn't known ahead of time. The driver 1856 * starts with cbuf_len and if this turns out to be insufficient, the 1857 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs. 1858 * The driver then allocates the buffer based on the count and retries 1859 * the operation. So it follows that the retry count is 2. 1860 */ 1861 #define ICE_GET_CAP_BUF_COUNT 40 1862 #define ICE_GET_CAP_RETRY_COUNT 2 1863 1864 cap_count = ICE_GET_CAP_BUF_COUNT; 1865 retries = ICE_GET_CAP_RETRY_COUNT; 1866 1867 do { 1868 void *cbuf; 1869 1870 cbuf_len = (u16)(cap_count * 1871 sizeof(struct ice_aqc_list_caps_elem)); 1872 cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL); 1873 if (!cbuf) 1874 return ICE_ERR_NO_MEMORY; 1875 1876 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count, 1877 opc, NULL); 1878 devm_kfree(ice_hw_to_dev(hw), cbuf); 1879 1880 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM) 1881 break; 1882 1883 /* If ENOMEM is returned, try again with bigger buffer */ 1884 } while (--retries); 1885 1886 return status; 1887 } 1888 1889 /** 1890 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 1891 * @hw: pointer to the hardware structure 1892 */ 1893 void ice_set_safe_mode_caps(struct ice_hw *hw) 1894 { 1895 struct ice_hw_func_caps *func_caps = &hw->func_caps; 1896 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 1897 u32 valid_func, rxq_first_id, txq_first_id; 1898 u32 msix_vector_first_id, max_mtu; 1899 u32 num_funcs; 1900 1901 /* cache some func_caps values that should be restored after memset */ 1902 valid_func = func_caps->common_cap.valid_functions; 1903 txq_first_id = func_caps->common_cap.txq_first_id; 1904 rxq_first_id = func_caps->common_cap.rxq_first_id; 1905 msix_vector_first_id = func_caps->common_cap.msix_vector_first_id; 1906 max_mtu = func_caps->common_cap.max_mtu; 1907 1908 /* unset func capabilities */ 1909 memset(func_caps, 0, sizeof(*func_caps)); 1910 1911 /* restore cached values */ 1912 func_caps->common_cap.valid_functions = valid_func; 1913 func_caps->common_cap.txq_first_id = txq_first_id; 1914 func_caps->common_cap.rxq_first_id = rxq_first_id; 1915 func_caps->common_cap.msix_vector_first_id = msix_vector_first_id; 1916 func_caps->common_cap.max_mtu = max_mtu; 1917 1918 /* one Tx and one Rx queue in safe mode */ 1919 func_caps->common_cap.num_rxq = 1; 1920 func_caps->common_cap.num_txq = 1; 1921 1922 /* two MSIX vectors, one for traffic and one for misc causes */ 1923 func_caps->common_cap.num_msix_vectors = 2; 1924 func_caps->guar_num_vsi = 1; 1925 1926 /* cache some dev_caps values that should be restored after memset */ 1927 valid_func = dev_caps->common_cap.valid_functions; 1928 txq_first_id = dev_caps->common_cap.txq_first_id; 1929 rxq_first_id = dev_caps->common_cap.rxq_first_id; 1930 msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id; 1931 max_mtu = dev_caps->common_cap.max_mtu; 1932 num_funcs = dev_caps->num_funcs; 1933 1934 /* unset dev capabilities */ 1935 memset(dev_caps, 0, sizeof(*dev_caps)); 1936 1937 /* restore cached values */ 1938 dev_caps->common_cap.valid_functions = valid_func; 1939 dev_caps->common_cap.txq_first_id = txq_first_id; 1940 dev_caps->common_cap.rxq_first_id = rxq_first_id; 1941 dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id; 1942 dev_caps->common_cap.max_mtu = max_mtu; 1943 dev_caps->num_funcs = num_funcs; 1944 1945 /* one Tx and one Rx queue per function in safe mode */ 1946 dev_caps->common_cap.num_rxq = num_funcs; 1947 dev_caps->common_cap.num_txq = num_funcs; 1948 1949 /* two MSIX vectors per function */ 1950 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 1951 } 1952 1953 /** 1954 * ice_get_caps - get info about the HW 1955 * @hw: pointer to the hardware structure 1956 */ 1957 enum ice_status ice_get_caps(struct ice_hw *hw) 1958 { 1959 enum ice_status status; 1960 1961 status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps); 1962 if (!status) 1963 status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps); 1964 1965 return status; 1966 } 1967 1968 /** 1969 * ice_aq_manage_mac_write - manage MAC address write command 1970 * @hw: pointer to the HW struct 1971 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 1972 * @flags: flags to control write behavior 1973 * @cd: pointer to command details structure or NULL 1974 * 1975 * This function is used to write MAC address to the NVM (0x0108). 1976 */ 1977 enum ice_status 1978 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 1979 struct ice_sq_cd *cd) 1980 { 1981 struct ice_aqc_manage_mac_write *cmd; 1982 struct ice_aq_desc desc; 1983 1984 cmd = &desc.params.mac_write; 1985 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 1986 1987 cmd->flags = flags; 1988 1989 /* Prep values for flags, sah, sal */ 1990 cmd->sah = htons(*((const u16 *)mac_addr)); 1991 cmd->sal = htonl(*((const u32 *)(mac_addr + 2))); 1992 1993 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1994 } 1995 1996 /** 1997 * ice_aq_clear_pxe_mode 1998 * @hw: pointer to the HW struct 1999 * 2000 * Tell the firmware that the driver is taking over from PXE (0x0110). 2001 */ 2002 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) 2003 { 2004 struct ice_aq_desc desc; 2005 2006 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 2007 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 2008 2009 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 2010 } 2011 2012 /** 2013 * ice_clear_pxe_mode - clear pxe operations mode 2014 * @hw: pointer to the HW struct 2015 * 2016 * Make sure all PXE mode settings are cleared, including things 2017 * like descriptor fetch/write-back mode. 2018 */ 2019 void ice_clear_pxe_mode(struct ice_hw *hw) 2020 { 2021 if (ice_check_sq_alive(hw, &hw->adminq)) 2022 ice_aq_clear_pxe_mode(hw); 2023 } 2024 2025 /** 2026 * ice_get_link_speed_based_on_phy_type - returns link speed 2027 * @phy_type_low: lower part of phy_type 2028 * @phy_type_high: higher part of phy_type 2029 * 2030 * This helper function will convert an entry in PHY type structure 2031 * [phy_type_low, phy_type_high] to its corresponding link speed. 2032 * Note: In the structure of [phy_type_low, phy_type_high], there should 2033 * be one bit set, as this function will convert one PHY type to its 2034 * speed. 2035 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned 2036 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned 2037 */ 2038 static u16 2039 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 2040 { 2041 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 2042 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 2043 2044 switch (phy_type_low) { 2045 case ICE_PHY_TYPE_LOW_100BASE_TX: 2046 case ICE_PHY_TYPE_LOW_100M_SGMII: 2047 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 2048 break; 2049 case ICE_PHY_TYPE_LOW_1000BASE_T: 2050 case ICE_PHY_TYPE_LOW_1000BASE_SX: 2051 case ICE_PHY_TYPE_LOW_1000BASE_LX: 2052 case ICE_PHY_TYPE_LOW_1000BASE_KX: 2053 case ICE_PHY_TYPE_LOW_1G_SGMII: 2054 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 2055 break; 2056 case ICE_PHY_TYPE_LOW_2500BASE_T: 2057 case ICE_PHY_TYPE_LOW_2500BASE_X: 2058 case ICE_PHY_TYPE_LOW_2500BASE_KX: 2059 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 2060 break; 2061 case ICE_PHY_TYPE_LOW_5GBASE_T: 2062 case ICE_PHY_TYPE_LOW_5GBASE_KR: 2063 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 2064 break; 2065 case ICE_PHY_TYPE_LOW_10GBASE_T: 2066 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 2067 case ICE_PHY_TYPE_LOW_10GBASE_SR: 2068 case ICE_PHY_TYPE_LOW_10GBASE_LR: 2069 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 2070 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 2071 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 2072 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 2073 break; 2074 case ICE_PHY_TYPE_LOW_25GBASE_T: 2075 case ICE_PHY_TYPE_LOW_25GBASE_CR: 2076 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 2077 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 2078 case ICE_PHY_TYPE_LOW_25GBASE_SR: 2079 case ICE_PHY_TYPE_LOW_25GBASE_LR: 2080 case ICE_PHY_TYPE_LOW_25GBASE_KR: 2081 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 2082 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 2083 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 2084 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 2085 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 2086 break; 2087 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 2088 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 2089 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 2090 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 2091 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 2092 case ICE_PHY_TYPE_LOW_40G_XLAUI: 2093 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 2094 break; 2095 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 2096 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 2097 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 2098 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 2099 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 2100 case ICE_PHY_TYPE_LOW_50G_LAUI2: 2101 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 2102 case ICE_PHY_TYPE_LOW_50G_AUI2: 2103 case ICE_PHY_TYPE_LOW_50GBASE_CP: 2104 case ICE_PHY_TYPE_LOW_50GBASE_SR: 2105 case ICE_PHY_TYPE_LOW_50GBASE_FR: 2106 case ICE_PHY_TYPE_LOW_50GBASE_LR: 2107 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 2108 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 2109 case ICE_PHY_TYPE_LOW_50G_AUI1: 2110 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 2111 break; 2112 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 2113 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 2114 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 2115 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 2116 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 2117 case ICE_PHY_TYPE_LOW_100G_CAUI4: 2118 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 2119 case ICE_PHY_TYPE_LOW_100G_AUI4: 2120 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 2121 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 2122 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 2123 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 2124 case ICE_PHY_TYPE_LOW_100GBASE_DR: 2125 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 2126 break; 2127 default: 2128 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 2129 break; 2130 } 2131 2132 switch (phy_type_high) { 2133 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 2134 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 2135 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 2136 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 2137 case ICE_PHY_TYPE_HIGH_100G_AUI2: 2138 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 2139 break; 2140 default: 2141 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 2142 break; 2143 } 2144 2145 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 2146 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 2147 return ICE_AQ_LINK_SPEED_UNKNOWN; 2148 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 2149 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 2150 return ICE_AQ_LINK_SPEED_UNKNOWN; 2151 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 2152 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 2153 return speed_phy_type_low; 2154 else 2155 return speed_phy_type_high; 2156 } 2157 2158 /** 2159 * ice_update_phy_type 2160 * @phy_type_low: pointer to the lower part of phy_type 2161 * @phy_type_high: pointer to the higher part of phy_type 2162 * @link_speeds_bitmap: targeted link speeds bitmap 2163 * 2164 * Note: For the link_speeds_bitmap structure, you can check it at 2165 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 2166 * link_speeds_bitmap include multiple speeds. 2167 * 2168 * Each entry in this [phy_type_low, phy_type_high] structure will 2169 * present a certain link speed. This helper function will turn on bits 2170 * in [phy_type_low, phy_type_high] structure based on the value of 2171 * link_speeds_bitmap input parameter. 2172 */ 2173 void 2174 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 2175 u16 link_speeds_bitmap) 2176 { 2177 u64 pt_high; 2178 u64 pt_low; 2179 int index; 2180 u16 speed; 2181 2182 /* We first check with low part of phy_type */ 2183 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 2184 pt_low = BIT_ULL(index); 2185 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 2186 2187 if (link_speeds_bitmap & speed) 2188 *phy_type_low |= BIT_ULL(index); 2189 } 2190 2191 /* We then check with high part of phy_type */ 2192 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 2193 pt_high = BIT_ULL(index); 2194 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 2195 2196 if (link_speeds_bitmap & speed) 2197 *phy_type_high |= BIT_ULL(index); 2198 } 2199 } 2200 2201 /** 2202 * ice_aq_set_phy_cfg 2203 * @hw: pointer to the HW struct 2204 * @lport: logical port number 2205 * @cfg: structure with PHY configuration data to be set 2206 * @cd: pointer to command details structure or NULL 2207 * 2208 * Set the various PHY configuration parameters supported on the Port. 2209 * One or more of the Set PHY config parameters may be ignored in an MFP 2210 * mode as the PF may not have the privilege to set some of the PHY Config 2211 * parameters. This status will be indicated by the command response (0x0601). 2212 */ 2213 enum ice_status 2214 ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, 2215 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 2216 { 2217 struct ice_aq_desc desc; 2218 2219 if (!cfg) 2220 return ICE_ERR_PARAM; 2221 2222 /* Ensure that only valid bits of cfg->caps can be turned on. */ 2223 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 2224 ice_debug(hw, ICE_DBG_PHY, 2225 "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 2226 cfg->caps); 2227 2228 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 2229 } 2230 2231 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 2232 desc.params.set_phy.lport_num = lport; 2233 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2234 2235 ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n", 2236 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 2237 ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n", 2238 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 2239 ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps); 2240 ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n", 2241 cfg->low_power_ctrl); 2242 ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap); 2243 ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value); 2244 ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt); 2245 2246 return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 2247 } 2248 2249 /** 2250 * ice_update_link_info - update status of the HW network link 2251 * @pi: port info structure of the interested logical port 2252 */ 2253 enum ice_status ice_update_link_info(struct ice_port_info *pi) 2254 { 2255 struct ice_link_status *li; 2256 enum ice_status status; 2257 2258 if (!pi) 2259 return ICE_ERR_PARAM; 2260 2261 li = &pi->phy.link_info; 2262 2263 status = ice_aq_get_link_info(pi, true, NULL, NULL); 2264 if (status) 2265 return status; 2266 2267 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 2268 struct ice_aqc_get_phy_caps_data *pcaps; 2269 struct ice_hw *hw; 2270 2271 hw = pi->hw; 2272 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), 2273 GFP_KERNEL); 2274 if (!pcaps) 2275 return ICE_ERR_NO_MEMORY; 2276 2277 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, 2278 pcaps, NULL); 2279 if (!status) 2280 memcpy(li->module_type, &pcaps->module_type, 2281 sizeof(li->module_type)); 2282 2283 devm_kfree(ice_hw_to_dev(hw), pcaps); 2284 } 2285 2286 return status; 2287 } 2288 2289 /** 2290 * ice_set_fc 2291 * @pi: port information structure 2292 * @aq_failures: pointer to status code, specific to ice_set_fc routine 2293 * @ena_auto_link_update: enable automatic link update 2294 * 2295 * Set the requested flow control mode. 2296 */ 2297 enum ice_status 2298 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 2299 { 2300 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 2301 struct ice_aqc_get_phy_caps_data *pcaps; 2302 enum ice_status status; 2303 u8 pause_mask = 0x0; 2304 struct ice_hw *hw; 2305 2306 if (!pi) 2307 return ICE_ERR_PARAM; 2308 hw = pi->hw; 2309 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE; 2310 2311 switch (pi->fc.req_mode) { 2312 case ICE_FC_FULL: 2313 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 2314 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 2315 break; 2316 case ICE_FC_RX_PAUSE: 2317 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 2318 break; 2319 case ICE_FC_TX_PAUSE: 2320 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 2321 break; 2322 default: 2323 break; 2324 } 2325 2326 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 2327 if (!pcaps) 2328 return ICE_ERR_NO_MEMORY; 2329 2330 /* Get the current PHY config */ 2331 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, 2332 NULL); 2333 if (status) { 2334 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 2335 goto out; 2336 } 2337 2338 /* clear the old pause settings */ 2339 cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 2340 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 2341 2342 /* set the new capabilities */ 2343 cfg.caps |= pause_mask; 2344 2345 /* If the capabilities have changed, then set the new config */ 2346 if (cfg.caps != pcaps->caps) { 2347 int retry_count, retry_max = 10; 2348 2349 /* Auto restart link so settings take effect */ 2350 if (ena_auto_link_update) 2351 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 2352 /* Copy over all the old settings */ 2353 cfg.phy_type_high = pcaps->phy_type_high; 2354 cfg.phy_type_low = pcaps->phy_type_low; 2355 cfg.low_power_ctrl = pcaps->low_power_ctrl; 2356 cfg.eee_cap = pcaps->eee_cap; 2357 cfg.eeer_value = pcaps->eeer_value; 2358 cfg.link_fec_opt = pcaps->link_fec_options; 2359 2360 status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL); 2361 if (status) { 2362 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 2363 goto out; 2364 } 2365 2366 /* Update the link info 2367 * It sometimes takes a really long time for link to 2368 * come back from the atomic reset. Thus, we wait a 2369 * little bit. 2370 */ 2371 for (retry_count = 0; retry_count < retry_max; retry_count++) { 2372 status = ice_update_link_info(pi); 2373 2374 if (!status) 2375 break; 2376 2377 mdelay(100); 2378 } 2379 2380 if (status) 2381 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 2382 } 2383 2384 out: 2385 devm_kfree(ice_hw_to_dev(hw), pcaps); 2386 return status; 2387 } 2388 2389 /** 2390 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 2391 * @caps: PHY ability structure to copy date from 2392 * @cfg: PHY configuration structure to copy data to 2393 * 2394 * Helper function to copy AQC PHY get ability data to PHY set configuration 2395 * data structure 2396 */ 2397 void 2398 ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps, 2399 struct ice_aqc_set_phy_cfg_data *cfg) 2400 { 2401 if (!caps || !cfg) 2402 return; 2403 2404 cfg->phy_type_low = caps->phy_type_low; 2405 cfg->phy_type_high = caps->phy_type_high; 2406 cfg->caps = caps->caps; 2407 cfg->low_power_ctrl = caps->low_power_ctrl; 2408 cfg->eee_cap = caps->eee_cap; 2409 cfg->eeer_value = caps->eeer_value; 2410 cfg->link_fec_opt = caps->link_fec_options; 2411 } 2412 2413 /** 2414 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 2415 * @cfg: PHY configuration data to set FEC mode 2416 * @fec: FEC mode to configure 2417 * 2418 * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC 2419 * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps 2420 * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling. 2421 */ 2422 void 2423 ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec) 2424 { 2425 switch (fec) { 2426 case ICE_FEC_BASER: 2427 /* Clear RS bits, and AND BASE-R ability 2428 * bits and OR request bits. 2429 */ 2430 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 2431 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 2432 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 2433 ICE_AQC_PHY_FEC_25G_KR_REQ; 2434 break; 2435 case ICE_FEC_RS: 2436 /* Clear BASE-R bits, and AND RS ability 2437 * bits and OR request bits. 2438 */ 2439 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 2440 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 2441 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 2442 break; 2443 case ICE_FEC_NONE: 2444 /* Clear all FEC option bits. */ 2445 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 2446 break; 2447 case ICE_FEC_AUTO: 2448 /* AND auto FEC bit, and all caps bits. */ 2449 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 2450 break; 2451 } 2452 } 2453 2454 /** 2455 * ice_get_link_status - get status of the HW network link 2456 * @pi: port information structure 2457 * @link_up: pointer to bool (true/false = linkup/linkdown) 2458 * 2459 * Variable link_up is true if link is up, false if link is down. 2460 * The variable link_up is invalid if status is non zero. As a 2461 * result of this call, link status reporting becomes enabled 2462 */ 2463 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) 2464 { 2465 struct ice_phy_info *phy_info; 2466 enum ice_status status = 0; 2467 2468 if (!pi || !link_up) 2469 return ICE_ERR_PARAM; 2470 2471 phy_info = &pi->phy; 2472 2473 if (phy_info->get_link_info) { 2474 status = ice_update_link_info(pi); 2475 2476 if (status) 2477 ice_debug(pi->hw, ICE_DBG_LINK, 2478 "get link status error, status = %d\n", 2479 status); 2480 } 2481 2482 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 2483 2484 return status; 2485 } 2486 2487 /** 2488 * ice_aq_set_link_restart_an 2489 * @pi: pointer to the port information structure 2490 * @ena_link: if true: enable link, if false: disable link 2491 * @cd: pointer to command details structure or NULL 2492 * 2493 * Sets up the link and restarts the Auto-Negotiation over the link. 2494 */ 2495 enum ice_status 2496 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 2497 struct ice_sq_cd *cd) 2498 { 2499 struct ice_aqc_restart_an *cmd; 2500 struct ice_aq_desc desc; 2501 2502 cmd = &desc.params.restart_an; 2503 2504 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 2505 2506 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 2507 cmd->lport_num = pi->lport; 2508 if (ena_link) 2509 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 2510 else 2511 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 2512 2513 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 2514 } 2515 2516 /** 2517 * ice_aq_set_event_mask 2518 * @hw: pointer to the HW struct 2519 * @port_num: port number of the physical function 2520 * @mask: event mask to be set 2521 * @cd: pointer to command details structure or NULL 2522 * 2523 * Set event mask (0x0613) 2524 */ 2525 enum ice_status 2526 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 2527 struct ice_sq_cd *cd) 2528 { 2529 struct ice_aqc_set_event_mask *cmd; 2530 struct ice_aq_desc desc; 2531 2532 cmd = &desc.params.set_event_mask; 2533 2534 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 2535 2536 cmd->lport_num = port_num; 2537 2538 cmd->event_mask = cpu_to_le16(mask); 2539 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2540 } 2541 2542 /** 2543 * ice_aq_set_mac_loopback 2544 * @hw: pointer to the HW struct 2545 * @ena_lpbk: Enable or Disable loopback 2546 * @cd: pointer to command details structure or NULL 2547 * 2548 * Enable/disable loopback on a given port 2549 */ 2550 enum ice_status 2551 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 2552 { 2553 struct ice_aqc_set_mac_lb *cmd; 2554 struct ice_aq_desc desc; 2555 2556 cmd = &desc.params.set_mac_lb; 2557 2558 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 2559 if (ena_lpbk) 2560 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 2561 2562 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2563 } 2564 2565 /** 2566 * ice_aq_set_port_id_led 2567 * @pi: pointer to the port information 2568 * @is_orig_mode: is this LED set to original mode (by the net-list) 2569 * @cd: pointer to command details structure or NULL 2570 * 2571 * Set LED value for the given port (0x06e9) 2572 */ 2573 enum ice_status 2574 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 2575 struct ice_sq_cd *cd) 2576 { 2577 struct ice_aqc_set_port_id_led *cmd; 2578 struct ice_hw *hw = pi->hw; 2579 struct ice_aq_desc desc; 2580 2581 cmd = &desc.params.set_port_id_led; 2582 2583 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 2584 2585 if (is_orig_mode) 2586 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 2587 else 2588 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 2589 2590 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2591 } 2592 2593 /** 2594 * ice_aq_sff_eeprom 2595 * @hw: pointer to the HW struct 2596 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 2597 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 2598 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 2599 * @page: QSFP page 2600 * @set_page: set or ignore the page 2601 * @data: pointer to data buffer to be read/written to the I2C device. 2602 * @length: 1-16 for read, 1 for write. 2603 * @write: 0 read, 1 for write. 2604 * @cd: pointer to command details structure or NULL 2605 * 2606 * Read/Write SFF EEPROM (0x06EE) 2607 */ 2608 enum ice_status 2609 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 2610 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 2611 bool write, struct ice_sq_cd *cd) 2612 { 2613 struct ice_aqc_sff_eeprom *cmd; 2614 struct ice_aq_desc desc; 2615 enum ice_status status; 2616 2617 if (!data || (mem_addr & 0xff00)) 2618 return ICE_ERR_PARAM; 2619 2620 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 2621 cmd = &desc.params.read_write_sff_param; 2622 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF); 2623 cmd->lport_num = (u8)(lport & 0xff); 2624 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 2625 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) & 2626 ICE_AQC_SFF_I2CBUS_7BIT_M) | 2627 ((set_page << 2628 ICE_AQC_SFF_SET_EEPROM_PAGE_S) & 2629 ICE_AQC_SFF_SET_EEPROM_PAGE_M)); 2630 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 2631 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S); 2632 if (write) 2633 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE); 2634 2635 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 2636 return status; 2637 } 2638 2639 /** 2640 * __ice_aq_get_set_rss_lut 2641 * @hw: pointer to the hardware structure 2642 * @vsi_id: VSI FW index 2643 * @lut_type: LUT table type 2644 * @lut: pointer to the LUT buffer provided by the caller 2645 * @lut_size: size of the LUT buffer 2646 * @glob_lut_idx: global LUT index 2647 * @set: set true to set the table, false to get the table 2648 * 2649 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 2650 */ 2651 static enum ice_status 2652 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, 2653 u16 lut_size, u8 glob_lut_idx, bool set) 2654 { 2655 struct ice_aqc_get_set_rss_lut *cmd_resp; 2656 struct ice_aq_desc desc; 2657 enum ice_status status; 2658 u16 flags = 0; 2659 2660 cmd_resp = &desc.params.get_set_rss_lut; 2661 2662 if (set) { 2663 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut); 2664 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2665 } else { 2666 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut); 2667 } 2668 2669 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 2670 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) & 2671 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) | 2672 ICE_AQC_GSET_RSS_LUT_VSI_VALID); 2673 2674 switch (lut_type) { 2675 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI: 2676 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF: 2677 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL: 2678 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) & 2679 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); 2680 break; 2681 default: 2682 status = ICE_ERR_PARAM; 2683 goto ice_aq_get_set_rss_lut_exit; 2684 } 2685 2686 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) { 2687 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) & 2688 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M); 2689 2690 if (!set) 2691 goto ice_aq_get_set_rss_lut_send; 2692 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 2693 if (!set) 2694 goto ice_aq_get_set_rss_lut_send; 2695 } else { 2696 goto ice_aq_get_set_rss_lut_send; 2697 } 2698 2699 /* LUT size is only valid for Global and PF table types */ 2700 switch (lut_size) { 2701 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128: 2702 break; 2703 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512: 2704 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << 2705 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 2706 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 2707 break; 2708 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K: 2709 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 2710 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << 2711 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 2712 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 2713 break; 2714 } 2715 /* fall-through */ 2716 default: 2717 status = ICE_ERR_PARAM; 2718 goto ice_aq_get_set_rss_lut_exit; 2719 } 2720 2721 ice_aq_get_set_rss_lut_send: 2722 cmd_resp->flags = cpu_to_le16(flags); 2723 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 2724 2725 ice_aq_get_set_rss_lut_exit: 2726 return status; 2727 } 2728 2729 /** 2730 * ice_aq_get_rss_lut 2731 * @hw: pointer to the hardware structure 2732 * @vsi_handle: software VSI handle 2733 * @lut_type: LUT table type 2734 * @lut: pointer to the LUT buffer provided by the caller 2735 * @lut_size: size of the LUT buffer 2736 * 2737 * get the RSS lookup table, PF or VSI type 2738 */ 2739 enum ice_status 2740 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, 2741 u8 *lut, u16 lut_size) 2742 { 2743 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) 2744 return ICE_ERR_PARAM; 2745 2746 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle), 2747 lut_type, lut, lut_size, 0, false); 2748 } 2749 2750 /** 2751 * ice_aq_set_rss_lut 2752 * @hw: pointer to the hardware structure 2753 * @vsi_handle: software VSI handle 2754 * @lut_type: LUT table type 2755 * @lut: pointer to the LUT buffer provided by the caller 2756 * @lut_size: size of the LUT buffer 2757 * 2758 * set the RSS lookup table, PF or VSI type 2759 */ 2760 enum ice_status 2761 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, 2762 u8 *lut, u16 lut_size) 2763 { 2764 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) 2765 return ICE_ERR_PARAM; 2766 2767 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle), 2768 lut_type, lut, lut_size, 0, true); 2769 } 2770 2771 /** 2772 * __ice_aq_get_set_rss_key 2773 * @hw: pointer to the HW struct 2774 * @vsi_id: VSI FW index 2775 * @key: pointer to key info struct 2776 * @set: set true to set the key, false to get the key 2777 * 2778 * get (0x0B04) or set (0x0B02) the RSS key per VSI 2779 */ 2780 static enum 2781 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 2782 struct ice_aqc_get_set_rss_keys *key, 2783 bool set) 2784 { 2785 struct ice_aqc_get_set_rss_key *cmd_resp; 2786 u16 key_size = sizeof(*key); 2787 struct ice_aq_desc desc; 2788 2789 cmd_resp = &desc.params.get_set_rss_key; 2790 2791 if (set) { 2792 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 2793 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2794 } else { 2795 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 2796 } 2797 2798 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 2799 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) & 2800 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) | 2801 ICE_AQC_GSET_RSS_KEY_VSI_VALID); 2802 2803 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 2804 } 2805 2806 /** 2807 * ice_aq_get_rss_key 2808 * @hw: pointer to the HW struct 2809 * @vsi_handle: software VSI handle 2810 * @key: pointer to key info struct 2811 * 2812 * get the RSS key per VSI 2813 */ 2814 enum ice_status 2815 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 2816 struct ice_aqc_get_set_rss_keys *key) 2817 { 2818 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 2819 return ICE_ERR_PARAM; 2820 2821 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 2822 key, false); 2823 } 2824 2825 /** 2826 * ice_aq_set_rss_key 2827 * @hw: pointer to the HW struct 2828 * @vsi_handle: software VSI handle 2829 * @keys: pointer to key info struct 2830 * 2831 * set the RSS key per VSI 2832 */ 2833 enum ice_status 2834 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 2835 struct ice_aqc_get_set_rss_keys *keys) 2836 { 2837 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 2838 return ICE_ERR_PARAM; 2839 2840 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 2841 keys, true); 2842 } 2843 2844 /** 2845 * ice_aq_add_lan_txq 2846 * @hw: pointer to the hardware structure 2847 * @num_qgrps: Number of added queue groups 2848 * @qg_list: list of queue groups to be added 2849 * @buf_size: size of buffer for indirect command 2850 * @cd: pointer to command details structure or NULL 2851 * 2852 * Add Tx LAN queue (0x0C30) 2853 * 2854 * NOTE: 2855 * Prior to calling add Tx LAN queue: 2856 * Initialize the following as part of the Tx queue context: 2857 * Completion queue ID if the queue uses Completion queue, Quanta profile, 2858 * Cache profile and Packet shaper profile. 2859 * 2860 * After add Tx LAN queue AQ command is completed: 2861 * Interrupts should be associated with specific queues, 2862 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 2863 * flow. 2864 */ 2865 static enum ice_status 2866 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 2867 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 2868 struct ice_sq_cd *cd) 2869 { 2870 u16 i, sum_header_size, sum_q_size = 0; 2871 struct ice_aqc_add_tx_qgrp *list; 2872 struct ice_aqc_add_txqs *cmd; 2873 struct ice_aq_desc desc; 2874 2875 cmd = &desc.params.add_txqs; 2876 2877 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 2878 2879 if (!qg_list) 2880 return ICE_ERR_PARAM; 2881 2882 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 2883 return ICE_ERR_PARAM; 2884 2885 sum_header_size = num_qgrps * 2886 (sizeof(*qg_list) - sizeof(*qg_list->txqs)); 2887 2888 list = qg_list; 2889 for (i = 0; i < num_qgrps; i++) { 2890 struct ice_aqc_add_txqs_perq *q = list->txqs; 2891 2892 sum_q_size += list->num_txqs * sizeof(*q); 2893 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs); 2894 } 2895 2896 if (buf_size != (sum_header_size + sum_q_size)) 2897 return ICE_ERR_PARAM; 2898 2899 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2900 2901 cmd->num_qgrps = num_qgrps; 2902 2903 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 2904 } 2905 2906 /** 2907 * ice_aq_dis_lan_txq 2908 * @hw: pointer to the hardware structure 2909 * @num_qgrps: number of groups in the list 2910 * @qg_list: the list of groups to disable 2911 * @buf_size: the total size of the qg_list buffer in bytes 2912 * @rst_src: if called due to reset, specifies the reset source 2913 * @vmvf_num: the relative VM or VF number that is undergoing the reset 2914 * @cd: pointer to command details structure or NULL 2915 * 2916 * Disable LAN Tx queue (0x0C31) 2917 */ 2918 static enum ice_status 2919 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 2920 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 2921 enum ice_disq_rst_src rst_src, u16 vmvf_num, 2922 struct ice_sq_cd *cd) 2923 { 2924 struct ice_aqc_dis_txqs *cmd; 2925 struct ice_aq_desc desc; 2926 enum ice_status status; 2927 u16 i, sz = 0; 2928 2929 cmd = &desc.params.dis_txqs; 2930 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 2931 2932 /* qg_list can be NULL only in VM/VF reset flow */ 2933 if (!qg_list && !rst_src) 2934 return ICE_ERR_PARAM; 2935 2936 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 2937 return ICE_ERR_PARAM; 2938 2939 cmd->num_entries = num_qgrps; 2940 2941 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & 2942 ICE_AQC_Q_DIS_TIMEOUT_M); 2943 2944 switch (rst_src) { 2945 case ICE_VM_RESET: 2946 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 2947 cmd->vmvf_and_timeout |= 2948 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); 2949 break; 2950 case ICE_VF_RESET: 2951 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 2952 /* In this case, FW expects vmvf_num to be absolute VF ID */ 2953 cmd->vmvf_and_timeout |= 2954 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) & 2955 ICE_AQC_Q_DIS_VMVF_NUM_M); 2956 break; 2957 case ICE_NO_RESET: 2958 default: 2959 break; 2960 } 2961 2962 /* flush pipe on time out */ 2963 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 2964 /* If no queue group info, we are in a reset flow. Issue the AQ */ 2965 if (!qg_list) 2966 goto do_aq; 2967 2968 /* set RD bit to indicate that command buffer is provided by the driver 2969 * and it needs to be read by the firmware 2970 */ 2971 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2972 2973 for (i = 0; i < num_qgrps; ++i) { 2974 /* Calculate the size taken up by the queue IDs in this group */ 2975 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id); 2976 2977 /* Add the size of the group header */ 2978 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id); 2979 2980 /* If the num of queues is even, add 2 bytes of padding */ 2981 if ((qg_list[i].num_qs % 2) == 0) 2982 sz += 2; 2983 } 2984 2985 if (buf_size != sz) 2986 return ICE_ERR_PARAM; 2987 2988 do_aq: 2989 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 2990 if (status) { 2991 if (!qg_list) 2992 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 2993 vmvf_num, hw->adminq.sq_last_status); 2994 else 2995 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 2996 le16_to_cpu(qg_list[0].q_id[0]), 2997 hw->adminq.sq_last_status); 2998 } 2999 return status; 3000 } 3001 3002 /* End of FW Admin Queue command wrappers */ 3003 3004 /** 3005 * ice_write_byte - write a byte to a packed context structure 3006 * @src_ctx: the context structure to read from 3007 * @dest_ctx: the context to be written to 3008 * @ce_info: a description of the struct to be filled 3009 */ 3010 static void 3011 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 3012 { 3013 u8 src_byte, dest_byte, mask; 3014 u8 *from, *dest; 3015 u16 shift_width; 3016 3017 /* copy from the next struct field */ 3018 from = src_ctx + ce_info->offset; 3019 3020 /* prepare the bits and mask */ 3021 shift_width = ce_info->lsb % 8; 3022 mask = (u8)(BIT(ce_info->width) - 1); 3023 3024 src_byte = *from; 3025 src_byte &= mask; 3026 3027 /* shift to correct alignment */ 3028 mask <<= shift_width; 3029 src_byte <<= shift_width; 3030 3031 /* get the current bits from the target bit string */ 3032 dest = dest_ctx + (ce_info->lsb / 8); 3033 3034 memcpy(&dest_byte, dest, sizeof(dest_byte)); 3035 3036 dest_byte &= ~mask; /* get the bits not changing */ 3037 dest_byte |= src_byte; /* add in the new bits */ 3038 3039 /* put it all back */ 3040 memcpy(dest, &dest_byte, sizeof(dest_byte)); 3041 } 3042 3043 /** 3044 * ice_write_word - write a word to a packed context structure 3045 * @src_ctx: the context structure to read from 3046 * @dest_ctx: the context to be written to 3047 * @ce_info: a description of the struct to be filled 3048 */ 3049 static void 3050 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 3051 { 3052 u16 src_word, mask; 3053 __le16 dest_word; 3054 u8 *from, *dest; 3055 u16 shift_width; 3056 3057 /* copy from the next struct field */ 3058 from = src_ctx + ce_info->offset; 3059 3060 /* prepare the bits and mask */ 3061 shift_width = ce_info->lsb % 8; 3062 mask = BIT(ce_info->width) - 1; 3063 3064 /* don't swizzle the bits until after the mask because the mask bits 3065 * will be in a different bit position on big endian machines 3066 */ 3067 src_word = *(u16 *)from; 3068 src_word &= mask; 3069 3070 /* shift to correct alignment */ 3071 mask <<= shift_width; 3072 src_word <<= shift_width; 3073 3074 /* get the current bits from the target bit string */ 3075 dest = dest_ctx + (ce_info->lsb / 8); 3076 3077 memcpy(&dest_word, dest, sizeof(dest_word)); 3078 3079 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 3080 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 3081 3082 /* put it all back */ 3083 memcpy(dest, &dest_word, sizeof(dest_word)); 3084 } 3085 3086 /** 3087 * ice_write_dword - write a dword to a packed context structure 3088 * @src_ctx: the context structure to read from 3089 * @dest_ctx: the context to be written to 3090 * @ce_info: a description of the struct to be filled 3091 */ 3092 static void 3093 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 3094 { 3095 u32 src_dword, mask; 3096 __le32 dest_dword; 3097 u8 *from, *dest; 3098 u16 shift_width; 3099 3100 /* copy from the next struct field */ 3101 from = src_ctx + ce_info->offset; 3102 3103 /* prepare the bits and mask */ 3104 shift_width = ce_info->lsb % 8; 3105 3106 /* if the field width is exactly 32 on an x86 machine, then the shift 3107 * operation will not work because the SHL instructions count is masked 3108 * to 5 bits so the shift will do nothing 3109 */ 3110 if (ce_info->width < 32) 3111 mask = BIT(ce_info->width) - 1; 3112 else 3113 mask = (u32)~0; 3114 3115 /* don't swizzle the bits until after the mask because the mask bits 3116 * will be in a different bit position on big endian machines 3117 */ 3118 src_dword = *(u32 *)from; 3119 src_dword &= mask; 3120 3121 /* shift to correct alignment */ 3122 mask <<= shift_width; 3123 src_dword <<= shift_width; 3124 3125 /* get the current bits from the target bit string */ 3126 dest = dest_ctx + (ce_info->lsb / 8); 3127 3128 memcpy(&dest_dword, dest, sizeof(dest_dword)); 3129 3130 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 3131 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 3132 3133 /* put it all back */ 3134 memcpy(dest, &dest_dword, sizeof(dest_dword)); 3135 } 3136 3137 /** 3138 * ice_write_qword - write a qword to a packed context structure 3139 * @src_ctx: the context structure to read from 3140 * @dest_ctx: the context to be written to 3141 * @ce_info: a description of the struct to be filled 3142 */ 3143 static void 3144 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 3145 { 3146 u64 src_qword, mask; 3147 __le64 dest_qword; 3148 u8 *from, *dest; 3149 u16 shift_width; 3150 3151 /* copy from the next struct field */ 3152 from = src_ctx + ce_info->offset; 3153 3154 /* prepare the bits and mask */ 3155 shift_width = ce_info->lsb % 8; 3156 3157 /* if the field width is exactly 64 on an x86 machine, then the shift 3158 * operation will not work because the SHL instructions count is masked 3159 * to 6 bits so the shift will do nothing 3160 */ 3161 if (ce_info->width < 64) 3162 mask = BIT_ULL(ce_info->width) - 1; 3163 else 3164 mask = (u64)~0; 3165 3166 /* don't swizzle the bits until after the mask because the mask bits 3167 * will be in a different bit position on big endian machines 3168 */ 3169 src_qword = *(u64 *)from; 3170 src_qword &= mask; 3171 3172 /* shift to correct alignment */ 3173 mask <<= shift_width; 3174 src_qword <<= shift_width; 3175 3176 /* get the current bits from the target bit string */ 3177 dest = dest_ctx + (ce_info->lsb / 8); 3178 3179 memcpy(&dest_qword, dest, sizeof(dest_qword)); 3180 3181 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 3182 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 3183 3184 /* put it all back */ 3185 memcpy(dest, &dest_qword, sizeof(dest_qword)); 3186 } 3187 3188 /** 3189 * ice_set_ctx - set context bits in packed structure 3190 * @src_ctx: pointer to a generic non-packed context structure 3191 * @dest_ctx: pointer to memory for the packed structure 3192 * @ce_info: a description of the structure to be transformed 3193 */ 3194 enum ice_status 3195 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 3196 { 3197 int f; 3198 3199 for (f = 0; ce_info[f].width; f++) { 3200 /* We have to deal with each element of the FW response 3201 * using the correct size so that we are correct regardless 3202 * of the endianness of the machine. 3203 */ 3204 switch (ce_info[f].size_of) { 3205 case sizeof(u8): 3206 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); 3207 break; 3208 case sizeof(u16): 3209 ice_write_word(src_ctx, dest_ctx, &ce_info[f]); 3210 break; 3211 case sizeof(u32): 3212 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); 3213 break; 3214 case sizeof(u64): 3215 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); 3216 break; 3217 default: 3218 return ICE_ERR_INVAL_SIZE; 3219 } 3220 } 3221 3222 return 0; 3223 } 3224 3225 /** 3226 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 3227 * @hw: pointer to the HW struct 3228 * @vsi_handle: software VSI handle 3229 * @tc: TC number 3230 * @q_handle: software queue handle 3231 */ 3232 struct ice_q_ctx * 3233 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 3234 { 3235 struct ice_vsi_ctx *vsi; 3236 struct ice_q_ctx *q_ctx; 3237 3238 vsi = ice_get_vsi_ctx(hw, vsi_handle); 3239 if (!vsi) 3240 return NULL; 3241 if (q_handle >= vsi->num_lan_q_entries[tc]) 3242 return NULL; 3243 if (!vsi->lan_q_ctx[tc]) 3244 return NULL; 3245 q_ctx = vsi->lan_q_ctx[tc]; 3246 return &q_ctx[q_handle]; 3247 } 3248 3249 /** 3250 * ice_ena_vsi_txq 3251 * @pi: port information structure 3252 * @vsi_handle: software VSI handle 3253 * @tc: TC number 3254 * @q_handle: software queue handle 3255 * @num_qgrps: Number of added queue groups 3256 * @buf: list of queue groups to be added 3257 * @buf_size: size of buffer for indirect command 3258 * @cd: pointer to command details structure or NULL 3259 * 3260 * This function adds one LAN queue 3261 */ 3262 enum ice_status 3263 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 3264 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 3265 struct ice_sq_cd *cd) 3266 { 3267 struct ice_aqc_txsched_elem_data node = { 0 }; 3268 struct ice_sched_node *parent; 3269 struct ice_q_ctx *q_ctx; 3270 enum ice_status status; 3271 struct ice_hw *hw; 3272 3273 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 3274 return ICE_ERR_CFG; 3275 3276 if (num_qgrps > 1 || buf->num_txqs > 1) 3277 return ICE_ERR_MAX_LIMIT; 3278 3279 hw = pi->hw; 3280 3281 if (!ice_is_vsi_valid(hw, vsi_handle)) 3282 return ICE_ERR_PARAM; 3283 3284 mutex_lock(&pi->sched_lock); 3285 3286 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 3287 if (!q_ctx) { 3288 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 3289 q_handle); 3290 status = ICE_ERR_PARAM; 3291 goto ena_txq_exit; 3292 } 3293 3294 /* find a parent node */ 3295 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 3296 ICE_SCHED_NODE_OWNER_LAN); 3297 if (!parent) { 3298 status = ICE_ERR_PARAM; 3299 goto ena_txq_exit; 3300 } 3301 3302 buf->parent_teid = parent->info.node_teid; 3303 node.parent_teid = parent->info.node_teid; 3304 /* Mark that the values in the "generic" section as valid. The default 3305 * value in the "generic" section is zero. This means that : 3306 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 3307 * - 0 priority among siblings, indicated by Bit 1-3. 3308 * - WFQ, indicated by Bit 4. 3309 * - 0 Adjustment value is used in PSM credit update flow, indicated by 3310 * Bit 5-6. 3311 * - Bit 7 is reserved. 3312 * Without setting the generic section as valid in valid_sections, the 3313 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 3314 */ 3315 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC; 3316 3317 /* add the LAN queue */ 3318 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 3319 if (status) { 3320 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 3321 le16_to_cpu(buf->txqs[0].txq_id), 3322 hw->adminq.sq_last_status); 3323 goto ena_txq_exit; 3324 } 3325 3326 node.node_teid = buf->txqs[0].q_teid; 3327 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 3328 q_ctx->q_handle = q_handle; 3329 q_ctx->q_teid = le32_to_cpu(node.node_teid); 3330 3331 /* add a leaf node into scheduler tree queue layer */ 3332 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); 3333 if (!status) 3334 status = ice_sched_replay_q_bw(pi, q_ctx); 3335 3336 ena_txq_exit: 3337 mutex_unlock(&pi->sched_lock); 3338 return status; 3339 } 3340 3341 /** 3342 * ice_dis_vsi_txq 3343 * @pi: port information structure 3344 * @vsi_handle: software VSI handle 3345 * @tc: TC number 3346 * @num_queues: number of queues 3347 * @q_handles: pointer to software queue handle array 3348 * @q_ids: pointer to the q_id array 3349 * @q_teids: pointer to queue node teids 3350 * @rst_src: if called due to reset, specifies the reset source 3351 * @vmvf_num: the relative VM or VF number that is undergoing the reset 3352 * @cd: pointer to command details structure or NULL 3353 * 3354 * This function removes queues and their corresponding nodes in SW DB 3355 */ 3356 enum ice_status 3357 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 3358 u16 *q_handles, u16 *q_ids, u32 *q_teids, 3359 enum ice_disq_rst_src rst_src, u16 vmvf_num, 3360 struct ice_sq_cd *cd) 3361 { 3362 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 3363 struct ice_aqc_dis_txq_item qg_list; 3364 struct ice_q_ctx *q_ctx; 3365 u16 i; 3366 3367 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 3368 return ICE_ERR_CFG; 3369 3370 if (!num_queues) { 3371 /* if queue is disabled already yet the disable queue command 3372 * has to be sent to complete the VF reset, then call 3373 * ice_aq_dis_lan_txq without any queue information 3374 */ 3375 if (rst_src) 3376 return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, 3377 vmvf_num, NULL); 3378 return ICE_ERR_CFG; 3379 } 3380 3381 mutex_lock(&pi->sched_lock); 3382 3383 for (i = 0; i < num_queues; i++) { 3384 struct ice_sched_node *node; 3385 3386 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 3387 if (!node) 3388 continue; 3389 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]); 3390 if (!q_ctx) { 3391 ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 3392 q_handles[i]); 3393 continue; 3394 } 3395 if (q_ctx->q_handle != q_handles[i]) { 3396 ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 3397 q_ctx->q_handle, q_handles[i]); 3398 continue; 3399 } 3400 qg_list.parent_teid = node->info.parent_teid; 3401 qg_list.num_qs = 1; 3402 qg_list.q_id[0] = cpu_to_le16(q_ids[i]); 3403 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list, 3404 sizeof(qg_list), rst_src, vmvf_num, 3405 cd); 3406 3407 if (status) 3408 break; 3409 ice_free_sched_node(pi, node); 3410 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 3411 } 3412 mutex_unlock(&pi->sched_lock); 3413 return status; 3414 } 3415 3416 /** 3417 * ice_cfg_vsi_qs - configure the new/existing VSI queues 3418 * @pi: port information structure 3419 * @vsi_handle: software VSI handle 3420 * @tc_bitmap: TC bitmap 3421 * @maxqs: max queues array per TC 3422 * @owner: LAN or RDMA 3423 * 3424 * This function adds/updates the VSI queues per TC. 3425 */ 3426 static enum ice_status 3427 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 3428 u16 *maxqs, u8 owner) 3429 { 3430 enum ice_status status = 0; 3431 u8 i; 3432 3433 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 3434 return ICE_ERR_CFG; 3435 3436 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 3437 return ICE_ERR_PARAM; 3438 3439 mutex_lock(&pi->sched_lock); 3440 3441 ice_for_each_traffic_class(i) { 3442 /* configuration is possible only if TC node is present */ 3443 if (!ice_sched_get_tc_node(pi, i)) 3444 continue; 3445 3446 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 3447 ice_is_tc_ena(tc_bitmap, i)); 3448 if (status) 3449 break; 3450 } 3451 3452 mutex_unlock(&pi->sched_lock); 3453 return status; 3454 } 3455 3456 /** 3457 * ice_cfg_vsi_lan - configure VSI LAN queues 3458 * @pi: port information structure 3459 * @vsi_handle: software VSI handle 3460 * @tc_bitmap: TC bitmap 3461 * @max_lanqs: max LAN queues array per TC 3462 * 3463 * This function adds/updates the VSI LAN queues per TC. 3464 */ 3465 enum ice_status 3466 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 3467 u16 *max_lanqs) 3468 { 3469 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 3470 ICE_SCHED_NODE_OWNER_LAN); 3471 } 3472 3473 /** 3474 * ice_replay_pre_init - replay pre initialization 3475 * @hw: pointer to the HW struct 3476 * 3477 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 3478 */ 3479 static enum ice_status ice_replay_pre_init(struct ice_hw *hw) 3480 { 3481 struct ice_switch_info *sw = hw->switch_info; 3482 u8 i; 3483 3484 /* Delete old entries from replay filter list head if there is any */ 3485 ice_rm_all_sw_replay_rule_info(hw); 3486 /* In start of replay, move entries into replay_rules list, it 3487 * will allow adding rules entries back to filt_rules list, 3488 * which is operational list. 3489 */ 3490 for (i = 0; i < ICE_SW_LKUP_LAST; i++) 3491 list_replace_init(&sw->recp_list[i].filt_rules, 3492 &sw->recp_list[i].filt_replay_rules); 3493 3494 return 0; 3495 } 3496 3497 /** 3498 * ice_replay_vsi - replay VSI configuration 3499 * @hw: pointer to the HW struct 3500 * @vsi_handle: driver VSI handle 3501 * 3502 * Restore all VSI configuration after reset. It is required to call this 3503 * function with main VSI first. 3504 */ 3505 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 3506 { 3507 enum ice_status status; 3508 3509 if (!ice_is_vsi_valid(hw, vsi_handle)) 3510 return ICE_ERR_PARAM; 3511 3512 /* Replay pre-initialization if there is any */ 3513 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 3514 status = ice_replay_pre_init(hw); 3515 if (status) 3516 return status; 3517 } 3518 /* Replay per VSI all RSS configurations */ 3519 status = ice_replay_rss_cfg(hw, vsi_handle); 3520 if (status) 3521 return status; 3522 /* Replay per VSI all filters */ 3523 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 3524 return status; 3525 } 3526 3527 /** 3528 * ice_replay_post - post replay configuration cleanup 3529 * @hw: pointer to the HW struct 3530 * 3531 * Post replay cleanup. 3532 */ 3533 void ice_replay_post(struct ice_hw *hw) 3534 { 3535 /* Delete old entries from replay filter list head */ 3536 ice_rm_all_sw_replay_rule_info(hw); 3537 } 3538 3539 /** 3540 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 3541 * @hw: ptr to the hardware info 3542 * @reg: offset of 64 bit HW register to read from 3543 * @prev_stat_loaded: bool to specify if previous stats are loaded 3544 * @prev_stat: ptr to previous loaded stat value 3545 * @cur_stat: ptr to current stat value 3546 */ 3547 void 3548 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 3549 u64 *prev_stat, u64 *cur_stat) 3550 { 3551 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 3552 3553 /* device stats are not reset at PFR, they likely will not be zeroed 3554 * when the driver starts. Thus, save the value from the first read 3555 * without adding to the statistic value so that we report stats which 3556 * count up from zero. 3557 */ 3558 if (!prev_stat_loaded) { 3559 *prev_stat = new_data; 3560 return; 3561 } 3562 3563 /* Calculate the difference between the new and old values, and then 3564 * add it to the software stat value. 3565 */ 3566 if (new_data >= *prev_stat) 3567 *cur_stat += new_data - *prev_stat; 3568 else 3569 /* to manage the potential roll-over */ 3570 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 3571 3572 /* Update the previously stored value to prepare for next read */ 3573 *prev_stat = new_data; 3574 } 3575 3576 /** 3577 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 3578 * @hw: ptr to the hardware info 3579 * @reg: offset of HW register to read from 3580 * @prev_stat_loaded: bool to specify if previous stats are loaded 3581 * @prev_stat: ptr to previous loaded stat value 3582 * @cur_stat: ptr to current stat value 3583 */ 3584 void 3585 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 3586 u64 *prev_stat, u64 *cur_stat) 3587 { 3588 u32 new_data; 3589 3590 new_data = rd32(hw, reg); 3591 3592 /* device stats are not reset at PFR, they likely will not be zeroed 3593 * when the driver starts. Thus, save the value from the first read 3594 * without adding to the statistic value so that we report stats which 3595 * count up from zero. 3596 */ 3597 if (!prev_stat_loaded) { 3598 *prev_stat = new_data; 3599 return; 3600 } 3601 3602 /* Calculate the difference between the new and old values, and then 3603 * add it to the software stat value. 3604 */ 3605 if (new_data >= *prev_stat) 3606 *cur_stat += new_data - *prev_stat; 3607 else 3608 /* to manage the potential roll-over */ 3609 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 3610 3611 /* Update the previously stored value to prepare for next read */ 3612 *prev_stat = new_data; 3613 } 3614 3615 /** 3616 * ice_sched_query_elem - query element information from HW 3617 * @hw: pointer to the HW struct 3618 * @node_teid: node TEID to be queried 3619 * @buf: buffer to element information 3620 * 3621 * This function queries HW element information 3622 */ 3623 enum ice_status 3624 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 3625 struct ice_aqc_get_elem *buf) 3626 { 3627 u16 buf_size, num_elem_ret = 0; 3628 enum ice_status status; 3629 3630 buf_size = sizeof(*buf); 3631 memset(buf, 0, buf_size); 3632 buf->generic[0].node_teid = cpu_to_le32(node_teid); 3633 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 3634 NULL); 3635 if (status || num_elem_ret != 1) 3636 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 3637 return status; 3638 } 3639