1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 8 #define ICE_PF_RESET_WAIT_COUNT 200 9 10 #define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \ 11 wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \ 12 ((ICE_RX_OPC_MDID << \ 13 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \ 14 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \ 15 (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \ 16 GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M)) 17 18 #define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \ 19 wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \ 20 (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \ 21 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \ 22 (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \ 23 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \ 24 (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \ 25 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \ 26 (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \ 27 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M)) 28 29 /** 30 * ice_set_mac_type - Sets MAC type 31 * @hw: pointer to the HW structure 32 * 33 * This function sets the MAC type of the adapter based on the 34 * vendor ID and device ID stored in the hw structure. 35 */ 36 static enum ice_status ice_set_mac_type(struct ice_hw *hw) 37 { 38 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 39 return ICE_ERR_DEVICE_NOT_SUPPORTED; 40 41 hw->mac_type = ICE_MAC_GENERIC; 42 return 0; 43 } 44 45 /** 46 * ice_clear_pf_cfg - Clear PF configuration 47 * @hw: pointer to the hardware structure 48 * 49 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 50 * configuration, flow director filters, etc.). 51 */ 52 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) 53 { 54 struct ice_aq_desc desc; 55 56 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 57 58 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 59 } 60 61 /** 62 * ice_aq_manage_mac_read - manage MAC address read command 63 * @hw: pointer to the hw struct 64 * @buf: a virtual buffer to hold the manage MAC read response 65 * @buf_size: Size of the virtual buffer 66 * @cd: pointer to command details structure or NULL 67 * 68 * This function is used to return per PF station MAC address (0x0107). 69 * NOTE: Upon successful completion of this command, MAC address information 70 * is returned in user specified buffer. Please interpret user specified 71 * buffer as "manage_mac_read" response. 72 * Response such as various MAC addresses are stored in HW struct (port.mac) 73 * ice_aq_discover_caps is expected to be called before this function is called. 74 */ 75 static enum ice_status 76 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 77 struct ice_sq_cd *cd) 78 { 79 struct ice_aqc_manage_mac_read_resp *resp; 80 struct ice_aqc_manage_mac_read *cmd; 81 struct ice_aq_desc desc; 82 enum ice_status status; 83 u16 flags; 84 u8 i; 85 86 cmd = &desc.params.mac_read; 87 88 if (buf_size < sizeof(*resp)) 89 return ICE_ERR_BUF_TOO_SHORT; 90 91 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 92 93 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 94 if (status) 95 return status; 96 97 resp = (struct ice_aqc_manage_mac_read_resp *)buf; 98 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 99 100 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 101 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 102 return ICE_ERR_CFG; 103 } 104 105 /* A single port can report up to two (LAN and WoL) addresses */ 106 for (i = 0; i < cmd->num_addr; i++) 107 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 108 ether_addr_copy(hw->port_info->mac.lan_addr, 109 resp[i].mac_addr); 110 ether_addr_copy(hw->port_info->mac.perm_addr, 111 resp[i].mac_addr); 112 break; 113 } 114 115 return 0; 116 } 117 118 /** 119 * ice_aq_get_phy_caps - returns PHY capabilities 120 * @pi: port information structure 121 * @qual_mods: report qualified modules 122 * @report_mode: report mode capabilities 123 * @pcaps: structure for PHY capabilities to be filled 124 * @cd: pointer to command details structure or NULL 125 * 126 * Returns the various PHY capabilities supported on the Port (0x0600) 127 */ 128 enum ice_status 129 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 130 struct ice_aqc_get_phy_caps_data *pcaps, 131 struct ice_sq_cd *cd) 132 { 133 struct ice_aqc_get_phy_caps *cmd; 134 u16 pcaps_size = sizeof(*pcaps); 135 struct ice_aq_desc desc; 136 enum ice_status status; 137 138 cmd = &desc.params.get_phy; 139 140 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 141 return ICE_ERR_PARAM; 142 143 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 144 145 if (qual_mods) 146 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 147 148 cmd->param0 |= cpu_to_le16(report_mode); 149 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd); 150 151 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) 152 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 153 154 return status; 155 } 156 157 /** 158 * ice_get_media_type - Gets media type 159 * @pi: port information structure 160 */ 161 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 162 { 163 struct ice_link_status *hw_link_info; 164 165 if (!pi) 166 return ICE_MEDIA_UNKNOWN; 167 168 hw_link_info = &pi->phy.link_info; 169 170 if (hw_link_info->phy_type_low) { 171 switch (hw_link_info->phy_type_low) { 172 case ICE_PHY_TYPE_LOW_1000BASE_SX: 173 case ICE_PHY_TYPE_LOW_1000BASE_LX: 174 case ICE_PHY_TYPE_LOW_10GBASE_SR: 175 case ICE_PHY_TYPE_LOW_10GBASE_LR: 176 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 177 case ICE_PHY_TYPE_LOW_25GBASE_SR: 178 case ICE_PHY_TYPE_LOW_25GBASE_LR: 179 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 180 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 181 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 182 return ICE_MEDIA_FIBER; 183 case ICE_PHY_TYPE_LOW_100BASE_TX: 184 case ICE_PHY_TYPE_LOW_1000BASE_T: 185 case ICE_PHY_TYPE_LOW_2500BASE_T: 186 case ICE_PHY_TYPE_LOW_5GBASE_T: 187 case ICE_PHY_TYPE_LOW_10GBASE_T: 188 case ICE_PHY_TYPE_LOW_25GBASE_T: 189 return ICE_MEDIA_BASET; 190 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 191 case ICE_PHY_TYPE_LOW_25GBASE_CR: 192 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 193 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 194 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 195 return ICE_MEDIA_DA; 196 case ICE_PHY_TYPE_LOW_1000BASE_KX: 197 case ICE_PHY_TYPE_LOW_2500BASE_KX: 198 case ICE_PHY_TYPE_LOW_2500BASE_X: 199 case ICE_PHY_TYPE_LOW_5GBASE_KR: 200 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 201 case ICE_PHY_TYPE_LOW_25GBASE_KR: 202 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 203 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 204 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 205 return ICE_MEDIA_BACKPLANE; 206 } 207 } 208 209 return ICE_MEDIA_UNKNOWN; 210 } 211 212 /** 213 * ice_aq_get_link_info 214 * @pi: port information structure 215 * @ena_lse: enable/disable LinkStatusEvent reporting 216 * @link: pointer to link status structure - optional 217 * @cd: pointer to command details structure or NULL 218 * 219 * Get Link Status (0x607). Returns the link status of the adapter. 220 */ 221 enum ice_status 222 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 223 struct ice_link_status *link, struct ice_sq_cd *cd) 224 { 225 struct ice_link_status *hw_link_info_old, *hw_link_info; 226 struct ice_aqc_get_link_status_data link_data = { 0 }; 227 struct ice_aqc_get_link_status *resp; 228 enum ice_media_type *hw_media_type; 229 struct ice_fc_info *hw_fc_info; 230 bool tx_pause, rx_pause; 231 struct ice_aq_desc desc; 232 enum ice_status status; 233 u16 cmd_flags; 234 235 if (!pi) 236 return ICE_ERR_PARAM; 237 hw_link_info_old = &pi->phy.link_info_old; 238 hw_media_type = &pi->phy.media_type; 239 hw_link_info = &pi->phy.link_info; 240 hw_fc_info = &pi->fc; 241 242 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 243 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 244 resp = &desc.params.get_link_status; 245 resp->cmd_flags = cpu_to_le16(cmd_flags); 246 resp->lport_num = pi->lport; 247 248 status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data), 249 cd); 250 251 if (status) 252 return status; 253 254 /* save off old link status information */ 255 *hw_link_info_old = *hw_link_info; 256 257 /* update current link status information */ 258 hw_link_info->link_speed = le16_to_cpu(link_data.link_speed); 259 hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low); 260 *hw_media_type = ice_get_media_type(pi); 261 hw_link_info->link_info = link_data.link_info; 262 hw_link_info->an_info = link_data.an_info; 263 hw_link_info->ext_info = link_data.ext_info; 264 hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size); 265 hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M; 266 267 /* update fc info */ 268 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 269 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 270 if (tx_pause && rx_pause) 271 hw_fc_info->current_mode = ICE_FC_FULL; 272 else if (tx_pause) 273 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 274 else if (rx_pause) 275 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 276 else 277 hw_fc_info->current_mode = ICE_FC_NONE; 278 279 hw_link_info->lse_ena = 280 !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 281 282 /* save link status information */ 283 if (link) 284 *link = *hw_link_info; 285 286 /* flag cleared so calling functions don't call AQ again */ 287 pi->phy.get_link_info = false; 288 289 return status; 290 } 291 292 /** 293 * ice_init_flex_flags 294 * @hw: pointer to the hardware structure 295 * @prof_id: Rx Descriptor Builder profile ID 296 * 297 * Function to initialize Rx flex flags 298 */ 299 static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id) 300 { 301 u8 idx = 0; 302 303 /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout: 304 * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE 305 * flexiflags1[3:0] - Not used for flag programming 306 * flexiflags2[7:0] - Tunnel and VLAN types 307 * 2 invalid fields in last index 308 */ 309 switch (prof_id) { 310 /* Rx flex flags are currently programmed for the NIC profiles only. 311 * Different flag bit programming configurations can be added per 312 * profile as needed. 313 */ 314 case ICE_RXDID_FLEX_NIC: 315 case ICE_RXDID_FLEX_NIC_2: 316 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG, 317 ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI, 318 ICE_RXFLG_FIN, idx++); 319 /* flex flag 1 is not used for flexi-flag programming, skipping 320 * these four FLG64 bits. 321 */ 322 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST, 323 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++); 324 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI, 325 ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100, 326 ICE_RXFLG_EVLAN_x9100, idx++); 327 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100, 328 ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC, 329 ICE_RXFLG_TNL0, idx++); 330 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2, 331 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx); 332 break; 333 334 default: 335 ice_debug(hw, ICE_DBG_INIT, 336 "Flag programming for profile ID %d not supported\n", 337 prof_id); 338 } 339 } 340 341 /** 342 * ice_init_flex_flds 343 * @hw: pointer to the hardware structure 344 * @prof_id: Rx Descriptor Builder profile ID 345 * 346 * Function to initialize flex descriptors 347 */ 348 static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id) 349 { 350 enum ice_flex_rx_mdid mdid; 351 352 switch (prof_id) { 353 case ICE_RXDID_FLEX_NIC: 354 case ICE_RXDID_FLEX_NIC_2: 355 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0); 356 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1); 357 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2); 358 359 mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ? 360 ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH; 361 362 ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3); 363 364 ice_init_flex_flags(hw, prof_id); 365 break; 366 367 default: 368 ice_debug(hw, ICE_DBG_INIT, 369 "Field init for profile ID %d not supported\n", 370 prof_id); 371 } 372 } 373 374 /** 375 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 376 * @hw: pointer to the hw struct 377 */ 378 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) 379 { 380 struct ice_switch_info *sw; 381 382 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 383 sizeof(*hw->switch_info), GFP_KERNEL); 384 sw = hw->switch_info; 385 386 if (!sw) 387 return ICE_ERR_NO_MEMORY; 388 389 INIT_LIST_HEAD(&sw->vsi_list_map_head); 390 391 ice_init_def_sw_recp(hw); 392 393 return 0; 394 } 395 396 /** 397 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 398 * @hw: pointer to the hw struct 399 */ 400 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 401 { 402 struct ice_switch_info *sw = hw->switch_info; 403 struct ice_vsi_list_map_info *v_pos_map; 404 struct ice_vsi_list_map_info *v_tmp_map; 405 struct ice_sw_recipe *recps; 406 u8 i; 407 408 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 409 list_entry) { 410 list_del(&v_pos_map->list_entry); 411 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 412 } 413 recps = hw->switch_info->recp_list; 414 for (i = 0; i < ICE_SW_LKUP_LAST; i++) { 415 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 416 417 recps[i].root_rid = i; 418 mutex_destroy(&recps[i].filt_rule_lock); 419 list_for_each_entry_safe(lst_itr, tmp_entry, 420 &recps[i].filt_rules, list_entry) { 421 list_del(&lst_itr->list_entry); 422 devm_kfree(ice_hw_to_dev(hw), lst_itr); 423 } 424 } 425 ice_rm_all_sw_replay_rule_info(hw); 426 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 427 devm_kfree(ice_hw_to_dev(hw), sw); 428 } 429 430 #define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \ 431 (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry))) 432 #define ICE_FW_LOG_DESC_SIZE_MAX \ 433 ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX) 434 435 /** 436 * ice_cfg_fw_log - configure FW logging 437 * @hw: pointer to the hw struct 438 * @enable: enable certain FW logging events if true, disable all if false 439 * 440 * This function enables/disables the FW logging via Rx CQ events and a UART 441 * port based on predetermined configurations. FW logging via the Rx CQ can be 442 * enabled/disabled for individual PF's. However, FW logging via the UART can 443 * only be enabled/disabled for all PFs on the same device. 444 * 445 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in 446 * hw->fw_log need to be set accordingly, e.g. based on user-provided input, 447 * before initializing the device. 448 * 449 * When re/configuring FW logging, callers need to update the "cfg" elements of 450 * the hw->fw_log.evnts array with the desired logging event configurations for 451 * modules of interest. When disabling FW logging completely, the callers can 452 * just pass false in the "enable" parameter. On completion, the function will 453 * update the "cur" element of the hw->fw_log.evnts array with the resulting 454 * logging event configurations of the modules that are being re/configured. FW 455 * logging modules that are not part of a reconfiguration operation retain their 456 * previous states. 457 * 458 * Before resetting the device, it is recommended that the driver disables FW 459 * logging before shutting down the control queue. When disabling FW logging 460 * ("enable" = false), the latest configurations of FW logging events stored in 461 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after 462 * a device reset. 463 * 464 * When enabling FW logging to emit log messages via the Rx CQ during the 465 * device's initialization phase, a mechanism alternative to interrupt handlers 466 * needs to be used to extract FW log messages from the Rx CQ periodically and 467 * to prevent the Rx CQ from being full and stalling other types of control 468 * messages from FW to SW. Interrupts are typically disabled during the device's 469 * initialization phase. 470 */ 471 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) 472 { 473 struct ice_aqc_fw_logging_data *data = NULL; 474 struct ice_aqc_fw_logging *cmd; 475 enum ice_status status = 0; 476 u16 i, chgs = 0, len = 0; 477 struct ice_aq_desc desc; 478 u8 actv_evnts = 0; 479 void *buf = NULL; 480 481 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) 482 return 0; 483 484 /* Disable FW logging only when the control queue is still responsive */ 485 if (!enable && 486 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) 487 return 0; 488 489 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); 490 cmd = &desc.params.fw_logging; 491 492 /* Indicate which controls are valid */ 493 if (hw->fw_log.cq_en) 494 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; 495 496 if (hw->fw_log.uart_en) 497 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; 498 499 if (enable) { 500 /* Fill in an array of entries with FW logging modules and 501 * logging events being reconfigured. 502 */ 503 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 504 u16 val; 505 506 /* Keep track of enabled event types */ 507 actv_evnts |= hw->fw_log.evnts[i].cfg; 508 509 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) 510 continue; 511 512 if (!data) { 513 data = devm_kzalloc(ice_hw_to_dev(hw), 514 ICE_FW_LOG_DESC_SIZE_MAX, 515 GFP_KERNEL); 516 if (!data) 517 return ICE_ERR_NO_MEMORY; 518 } 519 520 val = i << ICE_AQC_FW_LOG_ID_S; 521 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; 522 data->entry[chgs++] = cpu_to_le16(val); 523 } 524 525 /* Only enable FW logging if at least one module is specified. 526 * If FW logging is currently enabled but all modules are not 527 * enabled to emit log messages, disable FW logging altogether. 528 */ 529 if (actv_evnts) { 530 /* Leave if there is effectively no change */ 531 if (!chgs) 532 goto out; 533 534 if (hw->fw_log.cq_en) 535 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; 536 537 if (hw->fw_log.uart_en) 538 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; 539 540 buf = data; 541 len = ICE_FW_LOG_DESC_SIZE(chgs); 542 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 543 } 544 } 545 546 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); 547 if (!status) { 548 /* Update the current configuration to reflect events enabled. 549 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW 550 * logging mode is enabled for the device. They do not reflect 551 * actual modules being enabled to emit log messages. So, their 552 * values remain unchanged even when all modules are disabled. 553 */ 554 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; 555 556 hw->fw_log.actv_evnts = actv_evnts; 557 for (i = 0; i < cnt; i++) { 558 u16 v, m; 559 560 if (!enable) { 561 /* When disabling all FW logging events as part 562 * of device's de-initialization, the original 563 * configurations are retained, and can be used 564 * to reconfigure FW logging later if the device 565 * is re-initialized. 566 */ 567 hw->fw_log.evnts[i].cur = 0; 568 continue; 569 } 570 571 v = le16_to_cpu(data->entry[i]); 572 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 573 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; 574 } 575 } 576 577 out: 578 if (data) 579 devm_kfree(ice_hw_to_dev(hw), data); 580 581 return status; 582 } 583 584 /** 585 * ice_output_fw_log 586 * @hw: pointer to the hw struct 587 * @desc: pointer to the AQ message descriptor 588 * @buf: pointer to the buffer accompanying the AQ message 589 * 590 * Formats a FW Log message and outputs it via the standard driver logs. 591 */ 592 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) 593 { 594 ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n"); 595 ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf, 596 le16_to_cpu(desc->datalen)); 597 ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n"); 598 } 599 600 /** 601 * ice_get_itr_intrl_gran - determine int/intrl granularity 602 * @hw: pointer to the hw struct 603 * 604 * Determines the itr/intrl granularities based on the maximum aggregate 605 * bandwidth according to the device's configuration during power-on. 606 */ 607 static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw) 608 { 609 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & 610 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> 611 GL_PWR_MODE_CTL_CAR_MAX_BW_S; 612 613 switch (max_agg_bw) { 614 case ICE_MAX_AGG_BW_200G: 615 case ICE_MAX_AGG_BW_100G: 616 case ICE_MAX_AGG_BW_50G: 617 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 618 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 619 break; 620 case ICE_MAX_AGG_BW_25G: 621 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 622 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 623 break; 624 default: 625 ice_debug(hw, ICE_DBG_INIT, 626 "Failed to determine itr/intrl granularity\n"); 627 return ICE_ERR_CFG; 628 } 629 630 return 0; 631 } 632 633 /** 634 * ice_init_hw - main hardware initialization routine 635 * @hw: pointer to the hardware structure 636 */ 637 enum ice_status ice_init_hw(struct ice_hw *hw) 638 { 639 struct ice_aqc_get_phy_caps_data *pcaps; 640 enum ice_status status; 641 u16 mac_buf_len; 642 void *mac_buf; 643 644 /* Set MAC type based on DeviceID */ 645 status = ice_set_mac_type(hw); 646 if (status) 647 return status; 648 649 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & 650 PF_FUNC_RID_FUNC_NUM_M) >> 651 PF_FUNC_RID_FUNC_NUM_S; 652 653 status = ice_reset(hw, ICE_RESET_PFR); 654 if (status) 655 return status; 656 657 status = ice_get_itr_intrl_gran(hw); 658 if (status) 659 return status; 660 661 status = ice_init_all_ctrlq(hw); 662 if (status) 663 goto err_unroll_cqinit; 664 665 /* Enable FW logging. Not fatal if this fails. */ 666 status = ice_cfg_fw_log(hw, true); 667 if (status) 668 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); 669 670 status = ice_clear_pf_cfg(hw); 671 if (status) 672 goto err_unroll_cqinit; 673 674 ice_clear_pxe_mode(hw); 675 676 status = ice_init_nvm(hw); 677 if (status) 678 goto err_unroll_cqinit; 679 680 status = ice_get_caps(hw); 681 if (status) 682 goto err_unroll_cqinit; 683 684 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 685 sizeof(*hw->port_info), GFP_KERNEL); 686 if (!hw->port_info) { 687 status = ICE_ERR_NO_MEMORY; 688 goto err_unroll_cqinit; 689 } 690 691 /* set the back pointer to hw */ 692 hw->port_info->hw = hw; 693 694 /* Initialize port_info struct with switch configuration data */ 695 status = ice_get_initial_sw_cfg(hw); 696 if (status) 697 goto err_unroll_alloc; 698 699 hw->evb_veb = true; 700 701 /* Query the allocated resources for tx scheduler */ 702 status = ice_sched_query_res_alloc(hw); 703 if (status) { 704 ice_debug(hw, ICE_DBG_SCHED, 705 "Failed to get scheduler allocated resources\n"); 706 goto err_unroll_alloc; 707 } 708 709 /* Initialize port_info struct with scheduler data */ 710 status = ice_sched_init_port(hw->port_info); 711 if (status) 712 goto err_unroll_sched; 713 714 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 715 if (!pcaps) { 716 status = ICE_ERR_NO_MEMORY; 717 goto err_unroll_sched; 718 } 719 720 /* Initialize port_info struct with PHY capabilities */ 721 status = ice_aq_get_phy_caps(hw->port_info, false, 722 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); 723 devm_kfree(ice_hw_to_dev(hw), pcaps); 724 if (status) 725 goto err_unroll_sched; 726 727 /* Initialize port_info struct with link information */ 728 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 729 if (status) 730 goto err_unroll_sched; 731 732 /* need a valid SW entry point to build a Tx tree */ 733 if (!hw->sw_entry_point_layer) { 734 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 735 status = ICE_ERR_CFG; 736 goto err_unroll_sched; 737 } 738 739 status = ice_init_fltr_mgmt_struct(hw); 740 if (status) 741 goto err_unroll_sched; 742 743 /* Get MAC information */ 744 /* A single port can report up to two (LAN and WoL) addresses */ 745 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, 746 sizeof(struct ice_aqc_manage_mac_read_resp), 747 GFP_KERNEL); 748 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 749 750 if (!mac_buf) { 751 status = ICE_ERR_NO_MEMORY; 752 goto err_unroll_fltr_mgmt_struct; 753 } 754 755 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 756 devm_kfree(ice_hw_to_dev(hw), mac_buf); 757 758 if (status) 759 goto err_unroll_fltr_mgmt_struct; 760 761 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC); 762 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2); 763 764 return 0; 765 766 err_unroll_fltr_mgmt_struct: 767 ice_cleanup_fltr_mgmt_struct(hw); 768 err_unroll_sched: 769 ice_sched_cleanup_all(hw); 770 err_unroll_alloc: 771 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 772 err_unroll_cqinit: 773 ice_shutdown_all_ctrlq(hw); 774 return status; 775 } 776 777 /** 778 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 779 * @hw: pointer to the hardware structure 780 */ 781 void ice_deinit_hw(struct ice_hw *hw) 782 { 783 ice_cleanup_fltr_mgmt_struct(hw); 784 785 ice_sched_cleanup_all(hw); 786 787 if (hw->port_info) { 788 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 789 hw->port_info = NULL; 790 } 791 792 /* Attempt to disable FW logging before shutting down control queues */ 793 ice_cfg_fw_log(hw, false); 794 ice_shutdown_all_ctrlq(hw); 795 } 796 797 /** 798 * ice_check_reset - Check to see if a global reset is complete 799 * @hw: pointer to the hardware structure 800 */ 801 enum ice_status ice_check_reset(struct ice_hw *hw) 802 { 803 u32 cnt, reg = 0, grst_delay; 804 805 /* Poll for Device Active state in case a recent CORER, GLOBR, 806 * or EMPR has occurred. The grst delay value is in 100ms units. 807 * Add 1sec for outstanding AQ commands that can take a long time. 808 */ 809 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> 810 GLGEN_RSTCTL_GRSTDEL_S) + 10; 811 812 for (cnt = 0; cnt < grst_delay; cnt++) { 813 mdelay(100); 814 reg = rd32(hw, GLGEN_RSTAT); 815 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 816 break; 817 } 818 819 if (cnt == grst_delay) { 820 ice_debug(hw, ICE_DBG_INIT, 821 "Global reset polling failed to complete.\n"); 822 return ICE_ERR_RESET_FAILED; 823 } 824 825 #define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \ 826 GLNVM_ULD_GLOBR_DONE_M) 827 828 /* Device is Active; check Global Reset processes are done */ 829 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 830 reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK; 831 if (reg == ICE_RESET_DONE_MASK) { 832 ice_debug(hw, ICE_DBG_INIT, 833 "Global reset processes done. %d\n", cnt); 834 break; 835 } 836 mdelay(10); 837 } 838 839 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 840 ice_debug(hw, ICE_DBG_INIT, 841 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 842 reg); 843 return ICE_ERR_RESET_FAILED; 844 } 845 846 return 0; 847 } 848 849 /** 850 * ice_pf_reset - Reset the PF 851 * @hw: pointer to the hardware structure 852 * 853 * If a global reset has been triggered, this function checks 854 * for its completion and then issues the PF reset 855 */ 856 static enum ice_status ice_pf_reset(struct ice_hw *hw) 857 { 858 u32 cnt, reg; 859 860 /* If at function entry a global reset was already in progress, i.e. 861 * state is not 'device active' or any of the reset done bits are not 862 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 863 * global reset is done. 864 */ 865 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 866 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 867 /* poll on global reset currently in progress until done */ 868 if (ice_check_reset(hw)) 869 return ICE_ERR_RESET_FAILED; 870 871 return 0; 872 } 873 874 /* Reset the PF */ 875 reg = rd32(hw, PFGEN_CTRL); 876 877 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 878 879 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 880 reg = rd32(hw, PFGEN_CTRL); 881 if (!(reg & PFGEN_CTRL_PFSWR_M)) 882 break; 883 884 mdelay(1); 885 } 886 887 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 888 ice_debug(hw, ICE_DBG_INIT, 889 "PF reset polling failed to complete.\n"); 890 return ICE_ERR_RESET_FAILED; 891 } 892 893 return 0; 894 } 895 896 /** 897 * ice_reset - Perform different types of reset 898 * @hw: pointer to the hardware structure 899 * @req: reset request 900 * 901 * This function triggers a reset as specified by the req parameter. 902 * 903 * Note: 904 * If anything other than a PF reset is triggered, PXE mode is restored. 905 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 906 * interface has been restored in the rebuild flow. 907 */ 908 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) 909 { 910 u32 val = 0; 911 912 switch (req) { 913 case ICE_RESET_PFR: 914 return ice_pf_reset(hw); 915 case ICE_RESET_CORER: 916 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 917 val = GLGEN_RTRIG_CORER_M; 918 break; 919 case ICE_RESET_GLOBR: 920 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 921 val = GLGEN_RTRIG_GLOBR_M; 922 break; 923 default: 924 return ICE_ERR_PARAM; 925 } 926 927 val |= rd32(hw, GLGEN_RTRIG); 928 wr32(hw, GLGEN_RTRIG, val); 929 ice_flush(hw); 930 931 /* wait for the FW to be ready */ 932 return ice_check_reset(hw); 933 } 934 935 /** 936 * ice_copy_rxq_ctx_to_hw 937 * @hw: pointer to the hardware structure 938 * @ice_rxq_ctx: pointer to the rxq context 939 * @rxq_index: the index of the rx queue 940 * 941 * Copies rxq context from dense structure to hw register space 942 */ 943 static enum ice_status 944 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 945 { 946 u8 i; 947 948 if (!ice_rxq_ctx) 949 return ICE_ERR_BAD_PTR; 950 951 if (rxq_index > QRX_CTRL_MAX_INDEX) 952 return ICE_ERR_PARAM; 953 954 /* Copy each dword separately to hw */ 955 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 956 wr32(hw, QRX_CONTEXT(i, rxq_index), 957 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 958 959 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 960 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 961 } 962 963 return 0; 964 } 965 966 /* LAN Rx Queue Context */ 967 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 968 /* Field Width LSB */ 969 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 970 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 971 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 972 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 973 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 974 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 975 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 976 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 977 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 978 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 979 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 980 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 981 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 982 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 983 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 984 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 985 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 986 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 987 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 988 { 0 } 989 }; 990 991 /** 992 * ice_write_rxq_ctx 993 * @hw: pointer to the hardware structure 994 * @rlan_ctx: pointer to the rxq context 995 * @rxq_index: the index of the rx queue 996 * 997 * Converts rxq context from sparse to dense structure and then writes 998 * it to hw register space 999 */ 1000 enum ice_status 1001 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1002 u32 rxq_index) 1003 { 1004 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1005 1006 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1007 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1008 } 1009 1010 /* LAN Tx Queue Context */ 1011 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1012 /* Field Width LSB */ 1013 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1014 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1015 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1016 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1017 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1018 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1019 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1020 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1021 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1022 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1023 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1024 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1025 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1026 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1027 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1028 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1029 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1030 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1031 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1032 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1033 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1034 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1035 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1036 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1037 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1038 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1039 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 110, 171), 1040 { 0 } 1041 }; 1042 1043 /** 1044 * ice_debug_cq 1045 * @hw: pointer to the hardware structure 1046 * @mask: debug mask 1047 * @desc: pointer to control queue descriptor 1048 * @buf: pointer to command buffer 1049 * @buf_len: max length of buf 1050 * 1051 * Dumps debug log about control command with descriptor contents. 1052 */ 1053 void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, 1054 void *buf, u16 buf_len) 1055 { 1056 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc; 1057 u16 len; 1058 1059 #ifndef CONFIG_DYNAMIC_DEBUG 1060 if (!(mask & hw->debug_mask)) 1061 return; 1062 #endif 1063 1064 if (!desc) 1065 return; 1066 1067 len = le16_to_cpu(cq_desc->datalen); 1068 1069 ice_debug(hw, mask, 1070 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 1071 le16_to_cpu(cq_desc->opcode), 1072 le16_to_cpu(cq_desc->flags), 1073 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval)); 1074 ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", 1075 le32_to_cpu(cq_desc->cookie_high), 1076 le32_to_cpu(cq_desc->cookie_low)); 1077 ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", 1078 le32_to_cpu(cq_desc->params.generic.param0), 1079 le32_to_cpu(cq_desc->params.generic.param1)); 1080 ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", 1081 le32_to_cpu(cq_desc->params.generic.addr_high), 1082 le32_to_cpu(cq_desc->params.generic.addr_low)); 1083 if (buf && cq_desc->datalen != 0) { 1084 ice_debug(hw, mask, "Buffer:\n"); 1085 if (buf_len < len) 1086 len = buf_len; 1087 1088 ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len); 1089 } 1090 } 1091 1092 /* FW Admin Queue command wrappers */ 1093 1094 /** 1095 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1096 * @hw: pointer to the hw struct 1097 * @desc: descriptor describing the command 1098 * @buf: buffer to use for indirect commands (NULL for direct commands) 1099 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1100 * @cd: pointer to command details structure 1101 * 1102 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1103 */ 1104 enum ice_status 1105 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1106 u16 buf_size, struct ice_sq_cd *cd) 1107 { 1108 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); 1109 } 1110 1111 /** 1112 * ice_aq_get_fw_ver 1113 * @hw: pointer to the hw struct 1114 * @cd: pointer to command details structure or NULL 1115 * 1116 * Get the firmware version (0x0001) from the admin queue commands 1117 */ 1118 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1119 { 1120 struct ice_aqc_get_ver *resp; 1121 struct ice_aq_desc desc; 1122 enum ice_status status; 1123 1124 resp = &desc.params.get_ver; 1125 1126 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1127 1128 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1129 1130 if (!status) { 1131 hw->fw_branch = resp->fw_branch; 1132 hw->fw_maj_ver = resp->fw_major; 1133 hw->fw_min_ver = resp->fw_minor; 1134 hw->fw_patch = resp->fw_patch; 1135 hw->fw_build = le32_to_cpu(resp->fw_build); 1136 hw->api_branch = resp->api_branch; 1137 hw->api_maj_ver = resp->api_major; 1138 hw->api_min_ver = resp->api_minor; 1139 hw->api_patch = resp->api_patch; 1140 } 1141 1142 return status; 1143 } 1144 1145 /** 1146 * ice_aq_q_shutdown 1147 * @hw: pointer to the hw struct 1148 * @unloading: is the driver unloading itself 1149 * 1150 * Tell the Firmware that we're shutting down the AdminQ and whether 1151 * or not the driver is unloading as well (0x0003). 1152 */ 1153 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1154 { 1155 struct ice_aqc_q_shutdown *cmd; 1156 struct ice_aq_desc desc; 1157 1158 cmd = &desc.params.q_shutdown; 1159 1160 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1161 1162 if (unloading) 1163 cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING); 1164 1165 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1166 } 1167 1168 /** 1169 * ice_aq_req_res 1170 * @hw: pointer to the hw struct 1171 * @res: resource id 1172 * @access: access type 1173 * @sdp_number: resource number 1174 * @timeout: the maximum time in ms that the driver may hold the resource 1175 * @cd: pointer to command details structure or NULL 1176 * 1177 * Requests common resource using the admin queue commands (0x0008). 1178 * When attempting to acquire the Global Config Lock, the driver can 1179 * learn of three states: 1180 * 1) ICE_SUCCESS - acquired lock, and can perform download package 1181 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load 1182 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has 1183 * successfully downloaded the package; the driver does 1184 * not have to download the package and can continue 1185 * loading 1186 * 1187 * Note that if the caller is in an acquire lock, perform action, release lock 1188 * phase of operation, it is possible that the FW may detect a timeout and issue 1189 * a CORER. In this case, the driver will receive a CORER interrupt and will 1190 * have to determine its cause. The calling thread that is handling this flow 1191 * will likely get an error propagated back to it indicating the Download 1192 * Package, Update Package or the Release Resource AQ commands timed out. 1193 */ 1194 static enum ice_status 1195 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1196 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1197 struct ice_sq_cd *cd) 1198 { 1199 struct ice_aqc_req_res *cmd_resp; 1200 struct ice_aq_desc desc; 1201 enum ice_status status; 1202 1203 cmd_resp = &desc.params.res_owner; 1204 1205 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1206 1207 cmd_resp->res_id = cpu_to_le16(res); 1208 cmd_resp->access_type = cpu_to_le16(access); 1209 cmd_resp->res_number = cpu_to_le32(sdp_number); 1210 cmd_resp->timeout = cpu_to_le32(*timeout); 1211 *timeout = 0; 1212 1213 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1214 1215 /* The completion specifies the maximum time in ms that the driver 1216 * may hold the resource in the Timeout field. 1217 */ 1218 1219 /* Global config lock response utilizes an additional status field. 1220 * 1221 * If the Global config lock resource is held by some other driver, the 1222 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1223 * and the timeout field indicates the maximum time the current owner 1224 * of the resource has to free it. 1225 */ 1226 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1227 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1228 *timeout = le32_to_cpu(cmd_resp->timeout); 1229 return 0; 1230 } else if (le16_to_cpu(cmd_resp->status) == 1231 ICE_AQ_RES_GLBL_IN_PROG) { 1232 *timeout = le32_to_cpu(cmd_resp->timeout); 1233 return ICE_ERR_AQ_ERROR; 1234 } else if (le16_to_cpu(cmd_resp->status) == 1235 ICE_AQ_RES_GLBL_DONE) { 1236 return ICE_ERR_AQ_NO_WORK; 1237 } 1238 1239 /* invalid FW response, force a timeout immediately */ 1240 *timeout = 0; 1241 return ICE_ERR_AQ_ERROR; 1242 } 1243 1244 /* If the resource is held by some other driver, the command completes 1245 * with a busy return value and the timeout field indicates the maximum 1246 * time the current owner of the resource has to free it. 1247 */ 1248 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1249 *timeout = le32_to_cpu(cmd_resp->timeout); 1250 1251 return status; 1252 } 1253 1254 /** 1255 * ice_aq_release_res 1256 * @hw: pointer to the hw struct 1257 * @res: resource id 1258 * @sdp_number: resource number 1259 * @cd: pointer to command details structure or NULL 1260 * 1261 * release common resource using the admin queue commands (0x0009) 1262 */ 1263 static enum ice_status 1264 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1265 struct ice_sq_cd *cd) 1266 { 1267 struct ice_aqc_req_res *cmd; 1268 struct ice_aq_desc desc; 1269 1270 cmd = &desc.params.res_owner; 1271 1272 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1273 1274 cmd->res_id = cpu_to_le16(res); 1275 cmd->res_number = cpu_to_le32(sdp_number); 1276 1277 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1278 } 1279 1280 /** 1281 * ice_acquire_res 1282 * @hw: pointer to the HW structure 1283 * @res: resource id 1284 * @access: access type (read or write) 1285 * @timeout: timeout in milliseconds 1286 * 1287 * This function will attempt to acquire the ownership of a resource. 1288 */ 1289 enum ice_status 1290 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1291 enum ice_aq_res_access_type access, u32 timeout) 1292 { 1293 #define ICE_RES_POLLING_DELAY_MS 10 1294 u32 delay = ICE_RES_POLLING_DELAY_MS; 1295 u32 time_left = timeout; 1296 enum ice_status status; 1297 1298 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1299 1300 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has 1301 * previously acquired the resource and performed any necessary updates; 1302 * in this case the caller does not obtain the resource and has no 1303 * further work to do. 1304 */ 1305 if (status == ICE_ERR_AQ_NO_WORK) 1306 goto ice_acquire_res_exit; 1307 1308 if (status) 1309 ice_debug(hw, ICE_DBG_RES, 1310 "resource %d acquire type %d failed.\n", res, access); 1311 1312 /* If necessary, poll until the current lock owner timeouts */ 1313 timeout = time_left; 1314 while (status && timeout && time_left) { 1315 mdelay(delay); 1316 timeout = (timeout > delay) ? timeout - delay : 0; 1317 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1318 1319 if (status == ICE_ERR_AQ_NO_WORK) 1320 /* lock free, but no work to do */ 1321 break; 1322 1323 if (!status) 1324 /* lock acquired */ 1325 break; 1326 } 1327 if (status && status != ICE_ERR_AQ_NO_WORK) 1328 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1329 1330 ice_acquire_res_exit: 1331 if (status == ICE_ERR_AQ_NO_WORK) { 1332 if (access == ICE_RES_WRITE) 1333 ice_debug(hw, ICE_DBG_RES, 1334 "resource indicates no work to do.\n"); 1335 else 1336 ice_debug(hw, ICE_DBG_RES, 1337 "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); 1338 } 1339 return status; 1340 } 1341 1342 /** 1343 * ice_release_res 1344 * @hw: pointer to the HW structure 1345 * @res: resource id 1346 * 1347 * This function will release a resource using the proper Admin Command. 1348 */ 1349 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1350 { 1351 enum ice_status status; 1352 u32 total_delay = 0; 1353 1354 status = ice_aq_release_res(hw, res, 0, NULL); 1355 1356 /* there are some rare cases when trying to release the resource 1357 * results in an admin Q timeout, so handle them correctly 1358 */ 1359 while ((status == ICE_ERR_AQ_TIMEOUT) && 1360 (total_delay < hw->adminq.sq_cmd_timeout)) { 1361 mdelay(1); 1362 status = ice_aq_release_res(hw, res, 0, NULL); 1363 total_delay++; 1364 } 1365 } 1366 1367 /** 1368 * ice_parse_caps - parse function/device capabilities 1369 * @hw: pointer to the hw struct 1370 * @buf: pointer to a buffer containing function/device capability records 1371 * @cap_count: number of capability records in the list 1372 * @opc: type of capabilities list to parse 1373 * 1374 * Helper function to parse function(0x000a)/device(0x000b) capabilities list. 1375 */ 1376 static void 1377 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, 1378 enum ice_adminq_opc opc) 1379 { 1380 struct ice_aqc_list_caps_elem *cap_resp; 1381 struct ice_hw_func_caps *func_p = NULL; 1382 struct ice_hw_dev_caps *dev_p = NULL; 1383 struct ice_hw_common_caps *caps; 1384 u32 i; 1385 1386 if (!buf) 1387 return; 1388 1389 cap_resp = (struct ice_aqc_list_caps_elem *)buf; 1390 1391 if (opc == ice_aqc_opc_list_dev_caps) { 1392 dev_p = &hw->dev_caps; 1393 caps = &dev_p->common_cap; 1394 } else if (opc == ice_aqc_opc_list_func_caps) { 1395 func_p = &hw->func_caps; 1396 caps = &func_p->common_cap; 1397 } else { 1398 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n"); 1399 return; 1400 } 1401 1402 for (i = 0; caps && i < cap_count; i++, cap_resp++) { 1403 u32 logical_id = le32_to_cpu(cap_resp->logical_id); 1404 u32 phys_id = le32_to_cpu(cap_resp->phys_id); 1405 u32 number = le32_to_cpu(cap_resp->number); 1406 u16 cap = le16_to_cpu(cap_resp->cap); 1407 1408 switch (cap) { 1409 case ICE_AQC_CAPS_SRIOV: 1410 caps->sr_iov_1_1 = (number == 1); 1411 ice_debug(hw, ICE_DBG_INIT, 1412 "HW caps: SR-IOV = %d\n", caps->sr_iov_1_1); 1413 break; 1414 case ICE_AQC_CAPS_VF: 1415 if (dev_p) { 1416 dev_p->num_vfs_exposed = number; 1417 ice_debug(hw, ICE_DBG_INIT, 1418 "HW caps: VFs exposed = %d\n", 1419 dev_p->num_vfs_exposed); 1420 } else if (func_p) { 1421 func_p->num_allocd_vfs = number; 1422 func_p->vf_base_id = logical_id; 1423 ice_debug(hw, ICE_DBG_INIT, 1424 "HW caps: VFs allocated = %d\n", 1425 func_p->num_allocd_vfs); 1426 ice_debug(hw, ICE_DBG_INIT, 1427 "HW caps: VF base_id = %d\n", 1428 func_p->vf_base_id); 1429 } 1430 break; 1431 case ICE_AQC_CAPS_VSI: 1432 if (dev_p) { 1433 dev_p->num_vsi_allocd_to_host = number; 1434 ice_debug(hw, ICE_DBG_INIT, 1435 "HW caps: Dev.VSI cnt = %d\n", 1436 dev_p->num_vsi_allocd_to_host); 1437 } else if (func_p) { 1438 func_p->guaranteed_num_vsi = number; 1439 ice_debug(hw, ICE_DBG_INIT, 1440 "HW caps: Func.VSI cnt = %d\n", 1441 func_p->guaranteed_num_vsi); 1442 } 1443 break; 1444 case ICE_AQC_CAPS_RSS: 1445 caps->rss_table_size = number; 1446 caps->rss_table_entry_width = logical_id; 1447 ice_debug(hw, ICE_DBG_INIT, 1448 "HW caps: RSS table size = %d\n", 1449 caps->rss_table_size); 1450 ice_debug(hw, ICE_DBG_INIT, 1451 "HW caps: RSS table width = %d\n", 1452 caps->rss_table_entry_width); 1453 break; 1454 case ICE_AQC_CAPS_RXQS: 1455 caps->num_rxq = number; 1456 caps->rxq_first_id = phys_id; 1457 ice_debug(hw, ICE_DBG_INIT, 1458 "HW caps: Num Rx Qs = %d\n", caps->num_rxq); 1459 ice_debug(hw, ICE_DBG_INIT, 1460 "HW caps: Rx first queue ID = %d\n", 1461 caps->rxq_first_id); 1462 break; 1463 case ICE_AQC_CAPS_TXQS: 1464 caps->num_txq = number; 1465 caps->txq_first_id = phys_id; 1466 ice_debug(hw, ICE_DBG_INIT, 1467 "HW caps: Num Tx Qs = %d\n", caps->num_txq); 1468 ice_debug(hw, ICE_DBG_INIT, 1469 "HW caps: Tx first queue ID = %d\n", 1470 caps->txq_first_id); 1471 break; 1472 case ICE_AQC_CAPS_MSIX: 1473 caps->num_msix_vectors = number; 1474 caps->msix_vector_first_id = phys_id; 1475 ice_debug(hw, ICE_DBG_INIT, 1476 "HW caps: MSIX vector count = %d\n", 1477 caps->num_msix_vectors); 1478 ice_debug(hw, ICE_DBG_INIT, 1479 "HW caps: MSIX first vector index = %d\n", 1480 caps->msix_vector_first_id); 1481 break; 1482 case ICE_AQC_CAPS_MAX_MTU: 1483 caps->max_mtu = number; 1484 if (dev_p) 1485 ice_debug(hw, ICE_DBG_INIT, 1486 "HW caps: Dev.MaxMTU = %d\n", 1487 caps->max_mtu); 1488 else if (func_p) 1489 ice_debug(hw, ICE_DBG_INIT, 1490 "HW caps: func.MaxMTU = %d\n", 1491 caps->max_mtu); 1492 break; 1493 default: 1494 ice_debug(hw, ICE_DBG_INIT, 1495 "HW caps: Unknown capability[%d]: 0x%x\n", i, 1496 cap); 1497 break; 1498 } 1499 } 1500 } 1501 1502 /** 1503 * ice_aq_discover_caps - query function/device capabilities 1504 * @hw: pointer to the hw struct 1505 * @buf: a virtual buffer to hold the capabilities 1506 * @buf_size: Size of the virtual buffer 1507 * @cap_count: cap count needed if AQ err==ENOMEM 1508 * @opc: capabilities type to discover - pass in the command opcode 1509 * @cd: pointer to command details structure or NULL 1510 * 1511 * Get the function(0x000a)/device(0x000b) capabilities description from 1512 * the firmware. 1513 */ 1514 static enum ice_status 1515 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 1516 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 1517 { 1518 struct ice_aqc_list_caps *cmd; 1519 struct ice_aq_desc desc; 1520 enum ice_status status; 1521 1522 cmd = &desc.params.get_cap; 1523 1524 if (opc != ice_aqc_opc_list_func_caps && 1525 opc != ice_aqc_opc_list_dev_caps) 1526 return ICE_ERR_PARAM; 1527 1528 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1529 1530 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 1531 if (!status) 1532 ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc); 1533 else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM) 1534 *cap_count = 1535 DIV_ROUND_UP(le16_to_cpu(desc.datalen), 1536 sizeof(struct ice_aqc_list_caps_elem)); 1537 return status; 1538 } 1539 1540 /** 1541 * ice_discover_caps - get info about the HW 1542 * @hw: pointer to the hardware structure 1543 * @opc: capabilities type to discover - pass in the command opcode 1544 */ 1545 static enum ice_status ice_discover_caps(struct ice_hw *hw, 1546 enum ice_adminq_opc opc) 1547 { 1548 enum ice_status status; 1549 u32 cap_count; 1550 u16 cbuf_len; 1551 u8 retries; 1552 1553 /* The driver doesn't know how many capabilities the device will return 1554 * so the buffer size required isn't known ahead of time. The driver 1555 * starts with cbuf_len and if this turns out to be insufficient, the 1556 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs. 1557 * The driver then allocates the buffer based on the count and retries 1558 * the operation. So it follows that the retry count is 2. 1559 */ 1560 #define ICE_GET_CAP_BUF_COUNT 40 1561 #define ICE_GET_CAP_RETRY_COUNT 2 1562 1563 cap_count = ICE_GET_CAP_BUF_COUNT; 1564 retries = ICE_GET_CAP_RETRY_COUNT; 1565 1566 do { 1567 void *cbuf; 1568 1569 cbuf_len = (u16)(cap_count * 1570 sizeof(struct ice_aqc_list_caps_elem)); 1571 cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL); 1572 if (!cbuf) 1573 return ICE_ERR_NO_MEMORY; 1574 1575 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count, 1576 opc, NULL); 1577 devm_kfree(ice_hw_to_dev(hw), cbuf); 1578 1579 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM) 1580 break; 1581 1582 /* If ENOMEM is returned, try again with bigger buffer */ 1583 } while (--retries); 1584 1585 return status; 1586 } 1587 1588 /** 1589 * ice_get_caps - get info about the HW 1590 * @hw: pointer to the hardware structure 1591 */ 1592 enum ice_status ice_get_caps(struct ice_hw *hw) 1593 { 1594 enum ice_status status; 1595 1596 status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps); 1597 if (!status) 1598 status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps); 1599 1600 return status; 1601 } 1602 1603 /** 1604 * ice_aq_manage_mac_write - manage MAC address write command 1605 * @hw: pointer to the hw struct 1606 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 1607 * @flags: flags to control write behavior 1608 * @cd: pointer to command details structure or NULL 1609 * 1610 * This function is used to write MAC address to the NVM (0x0108). 1611 */ 1612 enum ice_status 1613 ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags, 1614 struct ice_sq_cd *cd) 1615 { 1616 struct ice_aqc_manage_mac_write *cmd; 1617 struct ice_aq_desc desc; 1618 1619 cmd = &desc.params.mac_write; 1620 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 1621 1622 cmd->flags = flags; 1623 1624 /* Prep values for flags, sah, sal */ 1625 cmd->sah = htons(*((u16 *)mac_addr)); 1626 cmd->sal = htonl(*((u32 *)(mac_addr + 2))); 1627 1628 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1629 } 1630 1631 /** 1632 * ice_aq_clear_pxe_mode 1633 * @hw: pointer to the hw struct 1634 * 1635 * Tell the firmware that the driver is taking over from PXE (0x0110). 1636 */ 1637 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) 1638 { 1639 struct ice_aq_desc desc; 1640 1641 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 1642 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 1643 1644 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1645 } 1646 1647 /** 1648 * ice_clear_pxe_mode - clear pxe operations mode 1649 * @hw: pointer to the hw struct 1650 * 1651 * Make sure all PXE mode settings are cleared, including things 1652 * like descriptor fetch/write-back mode. 1653 */ 1654 void ice_clear_pxe_mode(struct ice_hw *hw) 1655 { 1656 if (ice_check_sq_alive(hw, &hw->adminq)) 1657 ice_aq_clear_pxe_mode(hw); 1658 } 1659 1660 /** 1661 * ice_get_link_speed_based_on_phy_type - returns link speed 1662 * @phy_type_low: lower part of phy_type 1663 * 1664 * This helper function will convert a phy_type_low to its corresponding link 1665 * speed. 1666 * Note: In the structure of phy_type_low, there should be one bit set, as 1667 * this function will convert one phy type to its speed. 1668 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned 1669 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned 1670 */ 1671 static u16 1672 ice_get_link_speed_based_on_phy_type(u64 phy_type_low) 1673 { 1674 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 1675 1676 switch (phy_type_low) { 1677 case ICE_PHY_TYPE_LOW_100BASE_TX: 1678 case ICE_PHY_TYPE_LOW_100M_SGMII: 1679 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 1680 break; 1681 case ICE_PHY_TYPE_LOW_1000BASE_T: 1682 case ICE_PHY_TYPE_LOW_1000BASE_SX: 1683 case ICE_PHY_TYPE_LOW_1000BASE_LX: 1684 case ICE_PHY_TYPE_LOW_1000BASE_KX: 1685 case ICE_PHY_TYPE_LOW_1G_SGMII: 1686 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 1687 break; 1688 case ICE_PHY_TYPE_LOW_2500BASE_T: 1689 case ICE_PHY_TYPE_LOW_2500BASE_X: 1690 case ICE_PHY_TYPE_LOW_2500BASE_KX: 1691 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 1692 break; 1693 case ICE_PHY_TYPE_LOW_5GBASE_T: 1694 case ICE_PHY_TYPE_LOW_5GBASE_KR: 1695 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 1696 break; 1697 case ICE_PHY_TYPE_LOW_10GBASE_T: 1698 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 1699 case ICE_PHY_TYPE_LOW_10GBASE_SR: 1700 case ICE_PHY_TYPE_LOW_10GBASE_LR: 1701 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 1702 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 1703 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 1704 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 1705 break; 1706 case ICE_PHY_TYPE_LOW_25GBASE_T: 1707 case ICE_PHY_TYPE_LOW_25GBASE_CR: 1708 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 1709 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 1710 case ICE_PHY_TYPE_LOW_25GBASE_SR: 1711 case ICE_PHY_TYPE_LOW_25GBASE_LR: 1712 case ICE_PHY_TYPE_LOW_25GBASE_KR: 1713 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 1714 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 1715 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 1716 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 1717 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 1718 break; 1719 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 1720 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 1721 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 1722 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 1723 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 1724 case ICE_PHY_TYPE_LOW_40G_XLAUI: 1725 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 1726 break; 1727 default: 1728 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 1729 break; 1730 } 1731 1732 return speed_phy_type_low; 1733 } 1734 1735 /** 1736 * ice_update_phy_type 1737 * @phy_type_low: pointer to the lower part of phy_type 1738 * @link_speeds_bitmap: targeted link speeds bitmap 1739 * 1740 * Note: For the link_speeds_bitmap structure, you can check it at 1741 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 1742 * link_speeds_bitmap include multiple speeds. 1743 * 1744 * The value of phy_type_low will present a certain link speed. This helper 1745 * function will turn on bits in the phy_type_low based on the value of 1746 * link_speeds_bitmap input parameter. 1747 */ 1748 void ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap) 1749 { 1750 u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN; 1751 u64 pt_low; 1752 int index; 1753 1754 /* We first check with low part of phy_type */ 1755 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 1756 pt_low = BIT_ULL(index); 1757 speed = ice_get_link_speed_based_on_phy_type(pt_low); 1758 1759 if (link_speeds_bitmap & speed) 1760 *phy_type_low |= BIT_ULL(index); 1761 } 1762 } 1763 1764 /** 1765 * ice_aq_set_phy_cfg 1766 * @hw: pointer to the hw struct 1767 * @lport: logical port number 1768 * @cfg: structure with PHY configuration data to be set 1769 * @cd: pointer to command details structure or NULL 1770 * 1771 * Set the various PHY configuration parameters supported on the Port. 1772 * One or more of the Set PHY config parameters may be ignored in an MFP 1773 * mode as the PF may not have the privilege to set some of the PHY Config 1774 * parameters. This status will be indicated by the command response (0x0601). 1775 */ 1776 enum ice_status 1777 ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, 1778 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 1779 { 1780 struct ice_aq_desc desc; 1781 1782 if (!cfg) 1783 return ICE_ERR_PARAM; 1784 1785 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 1786 desc.params.set_phy.lport_num = lport; 1787 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1788 1789 return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 1790 } 1791 1792 /** 1793 * ice_update_link_info - update status of the HW network link 1794 * @pi: port info structure of the interested logical port 1795 */ 1796 enum ice_status ice_update_link_info(struct ice_port_info *pi) 1797 { 1798 struct ice_aqc_get_phy_caps_data *pcaps; 1799 struct ice_phy_info *phy_info; 1800 enum ice_status status; 1801 struct ice_hw *hw; 1802 1803 if (!pi) 1804 return ICE_ERR_PARAM; 1805 1806 hw = pi->hw; 1807 1808 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 1809 if (!pcaps) 1810 return ICE_ERR_NO_MEMORY; 1811 1812 phy_info = &pi->phy; 1813 status = ice_aq_get_link_info(pi, true, NULL, NULL); 1814 if (status) 1815 goto out; 1816 1817 if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 1818 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, 1819 pcaps, NULL); 1820 if (status) 1821 goto out; 1822 1823 memcpy(phy_info->link_info.module_type, &pcaps->module_type, 1824 sizeof(phy_info->link_info.module_type)); 1825 } 1826 out: 1827 devm_kfree(ice_hw_to_dev(hw), pcaps); 1828 return status; 1829 } 1830 1831 /** 1832 * ice_set_fc 1833 * @pi: port information structure 1834 * @aq_failures: pointer to status code, specific to ice_set_fc routine 1835 * @ena_auto_link_update: enable automatic link update 1836 * 1837 * Set the requested flow control mode. 1838 */ 1839 enum ice_status 1840 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 1841 { 1842 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 1843 struct ice_aqc_get_phy_caps_data *pcaps; 1844 enum ice_status status; 1845 u8 pause_mask = 0x0; 1846 struct ice_hw *hw; 1847 1848 if (!pi) 1849 return ICE_ERR_PARAM; 1850 hw = pi->hw; 1851 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE; 1852 1853 switch (pi->fc.req_mode) { 1854 case ICE_FC_FULL: 1855 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 1856 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 1857 break; 1858 case ICE_FC_RX_PAUSE: 1859 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 1860 break; 1861 case ICE_FC_TX_PAUSE: 1862 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 1863 break; 1864 default: 1865 break; 1866 } 1867 1868 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 1869 if (!pcaps) 1870 return ICE_ERR_NO_MEMORY; 1871 1872 /* Get the current phy config */ 1873 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, 1874 NULL); 1875 if (status) { 1876 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 1877 goto out; 1878 } 1879 1880 /* clear the old pause settings */ 1881 cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 1882 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 1883 /* set the new capabilities */ 1884 cfg.caps |= pause_mask; 1885 /* If the capabilities have changed, then set the new config */ 1886 if (cfg.caps != pcaps->caps) { 1887 int retry_count, retry_max = 10; 1888 1889 /* Auto restart link so settings take effect */ 1890 if (ena_auto_link_update) 1891 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 1892 /* Copy over all the old settings */ 1893 cfg.phy_type_low = pcaps->phy_type_low; 1894 cfg.low_power_ctrl = pcaps->low_power_ctrl; 1895 cfg.eee_cap = pcaps->eee_cap; 1896 cfg.eeer_value = pcaps->eeer_value; 1897 cfg.link_fec_opt = pcaps->link_fec_options; 1898 1899 status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL); 1900 if (status) { 1901 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 1902 goto out; 1903 } 1904 1905 /* Update the link info 1906 * It sometimes takes a really long time for link to 1907 * come back from the atomic reset. Thus, we wait a 1908 * little bit. 1909 */ 1910 for (retry_count = 0; retry_count < retry_max; retry_count++) { 1911 status = ice_update_link_info(pi); 1912 1913 if (!status) 1914 break; 1915 1916 mdelay(100); 1917 } 1918 1919 if (status) 1920 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 1921 } 1922 1923 out: 1924 devm_kfree(ice_hw_to_dev(hw), pcaps); 1925 return status; 1926 } 1927 1928 /** 1929 * ice_get_link_status - get status of the HW network link 1930 * @pi: port information structure 1931 * @link_up: pointer to bool (true/false = linkup/linkdown) 1932 * 1933 * Variable link_up is true if link is up, false if link is down. 1934 * The variable link_up is invalid if status is non zero. As a 1935 * result of this call, link status reporting becomes enabled 1936 */ 1937 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) 1938 { 1939 struct ice_phy_info *phy_info; 1940 enum ice_status status = 0; 1941 1942 if (!pi || !link_up) 1943 return ICE_ERR_PARAM; 1944 1945 phy_info = &pi->phy; 1946 1947 if (phy_info->get_link_info) { 1948 status = ice_update_link_info(pi); 1949 1950 if (status) 1951 ice_debug(pi->hw, ICE_DBG_LINK, 1952 "get link status error, status = %d\n", 1953 status); 1954 } 1955 1956 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 1957 1958 return status; 1959 } 1960 1961 /** 1962 * ice_aq_set_link_restart_an 1963 * @pi: pointer to the port information structure 1964 * @ena_link: if true: enable link, if false: disable link 1965 * @cd: pointer to command details structure or NULL 1966 * 1967 * Sets up the link and restarts the Auto-Negotiation over the link. 1968 */ 1969 enum ice_status 1970 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 1971 struct ice_sq_cd *cd) 1972 { 1973 struct ice_aqc_restart_an *cmd; 1974 struct ice_aq_desc desc; 1975 1976 cmd = &desc.params.restart_an; 1977 1978 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 1979 1980 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 1981 cmd->lport_num = pi->lport; 1982 if (ena_link) 1983 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 1984 else 1985 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 1986 1987 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 1988 } 1989 1990 /** 1991 * ice_aq_set_event_mask 1992 * @hw: pointer to the hw struct 1993 * @port_num: port number of the physical function 1994 * @mask: event mask to be set 1995 * @cd: pointer to command details structure or NULL 1996 * 1997 * Set event mask (0x0613) 1998 */ 1999 enum ice_status 2000 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 2001 struct ice_sq_cd *cd) 2002 { 2003 struct ice_aqc_set_event_mask *cmd; 2004 struct ice_aq_desc desc; 2005 2006 cmd = &desc.params.set_event_mask; 2007 2008 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 2009 2010 cmd->lport_num = port_num; 2011 2012 cmd->event_mask = cpu_to_le16(mask); 2013 2014 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2015 } 2016 2017 /** 2018 * __ice_aq_get_set_rss_lut 2019 * @hw: pointer to the hardware structure 2020 * @vsi_id: VSI FW index 2021 * @lut_type: LUT table type 2022 * @lut: pointer to the LUT buffer provided by the caller 2023 * @lut_size: size of the LUT buffer 2024 * @glob_lut_idx: global LUT index 2025 * @set: set true to set the table, false to get the table 2026 * 2027 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 2028 */ 2029 static enum ice_status 2030 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, 2031 u16 lut_size, u8 glob_lut_idx, bool set) 2032 { 2033 struct ice_aqc_get_set_rss_lut *cmd_resp; 2034 struct ice_aq_desc desc; 2035 enum ice_status status; 2036 u16 flags = 0; 2037 2038 cmd_resp = &desc.params.get_set_rss_lut; 2039 2040 if (set) { 2041 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut); 2042 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2043 } else { 2044 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut); 2045 } 2046 2047 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 2048 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) & 2049 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) | 2050 ICE_AQC_GSET_RSS_LUT_VSI_VALID); 2051 2052 switch (lut_type) { 2053 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI: 2054 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF: 2055 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL: 2056 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) & 2057 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); 2058 break; 2059 default: 2060 status = ICE_ERR_PARAM; 2061 goto ice_aq_get_set_rss_lut_exit; 2062 } 2063 2064 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) { 2065 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) & 2066 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M); 2067 2068 if (!set) 2069 goto ice_aq_get_set_rss_lut_send; 2070 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 2071 if (!set) 2072 goto ice_aq_get_set_rss_lut_send; 2073 } else { 2074 goto ice_aq_get_set_rss_lut_send; 2075 } 2076 2077 /* LUT size is only valid for Global and PF table types */ 2078 switch (lut_size) { 2079 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128: 2080 break; 2081 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512: 2082 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << 2083 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 2084 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 2085 break; 2086 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K: 2087 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 2088 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << 2089 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 2090 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 2091 break; 2092 } 2093 /* fall-through */ 2094 default: 2095 status = ICE_ERR_PARAM; 2096 goto ice_aq_get_set_rss_lut_exit; 2097 } 2098 2099 ice_aq_get_set_rss_lut_send: 2100 cmd_resp->flags = cpu_to_le16(flags); 2101 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 2102 2103 ice_aq_get_set_rss_lut_exit: 2104 return status; 2105 } 2106 2107 /** 2108 * ice_aq_get_rss_lut 2109 * @hw: pointer to the hardware structure 2110 * @vsi_handle: software VSI handle 2111 * @lut_type: LUT table type 2112 * @lut: pointer to the LUT buffer provided by the caller 2113 * @lut_size: size of the LUT buffer 2114 * 2115 * get the RSS lookup table, PF or VSI type 2116 */ 2117 enum ice_status 2118 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, 2119 u8 *lut, u16 lut_size) 2120 { 2121 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) 2122 return ICE_ERR_PARAM; 2123 2124 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle), 2125 lut_type, lut, lut_size, 0, false); 2126 } 2127 2128 /** 2129 * ice_aq_set_rss_lut 2130 * @hw: pointer to the hardware structure 2131 * @vsi_handle: software VSI handle 2132 * @lut_type: LUT table type 2133 * @lut: pointer to the LUT buffer provided by the caller 2134 * @lut_size: size of the LUT buffer 2135 * 2136 * set the RSS lookup table, PF or VSI type 2137 */ 2138 enum ice_status 2139 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, 2140 u8 *lut, u16 lut_size) 2141 { 2142 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) 2143 return ICE_ERR_PARAM; 2144 2145 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle), 2146 lut_type, lut, lut_size, 0, true); 2147 } 2148 2149 /** 2150 * __ice_aq_get_set_rss_key 2151 * @hw: pointer to the hw struct 2152 * @vsi_id: VSI FW index 2153 * @key: pointer to key info struct 2154 * @set: set true to set the key, false to get the key 2155 * 2156 * get (0x0B04) or set (0x0B02) the RSS key per VSI 2157 */ 2158 static enum 2159 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 2160 struct ice_aqc_get_set_rss_keys *key, 2161 bool set) 2162 { 2163 struct ice_aqc_get_set_rss_key *cmd_resp; 2164 u16 key_size = sizeof(*key); 2165 struct ice_aq_desc desc; 2166 2167 cmd_resp = &desc.params.get_set_rss_key; 2168 2169 if (set) { 2170 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 2171 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2172 } else { 2173 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 2174 } 2175 2176 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 2177 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) & 2178 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) | 2179 ICE_AQC_GSET_RSS_KEY_VSI_VALID); 2180 2181 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 2182 } 2183 2184 /** 2185 * ice_aq_get_rss_key 2186 * @hw: pointer to the hw struct 2187 * @vsi_handle: software VSI handle 2188 * @key: pointer to key info struct 2189 * 2190 * get the RSS key per VSI 2191 */ 2192 enum ice_status 2193 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 2194 struct ice_aqc_get_set_rss_keys *key) 2195 { 2196 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 2197 return ICE_ERR_PARAM; 2198 2199 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 2200 key, false); 2201 } 2202 2203 /** 2204 * ice_aq_set_rss_key 2205 * @hw: pointer to the hw struct 2206 * @vsi_handle: software VSI handle 2207 * @keys: pointer to key info struct 2208 * 2209 * set the RSS key per VSI 2210 */ 2211 enum ice_status 2212 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 2213 struct ice_aqc_get_set_rss_keys *keys) 2214 { 2215 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 2216 return ICE_ERR_PARAM; 2217 2218 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 2219 keys, true); 2220 } 2221 2222 /** 2223 * ice_aq_add_lan_txq 2224 * @hw: pointer to the hardware structure 2225 * @num_qgrps: Number of added queue groups 2226 * @qg_list: list of queue groups to be added 2227 * @buf_size: size of buffer for indirect command 2228 * @cd: pointer to command details structure or NULL 2229 * 2230 * Add Tx LAN queue (0x0C30) 2231 * 2232 * NOTE: 2233 * Prior to calling add Tx LAN queue: 2234 * Initialize the following as part of the Tx queue context: 2235 * Completion queue ID if the queue uses Completion queue, Quanta profile, 2236 * Cache profile and Packet shaper profile. 2237 * 2238 * After add Tx LAN queue AQ command is completed: 2239 * Interrupts should be associated with specific queues, 2240 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 2241 * flow. 2242 */ 2243 static enum ice_status 2244 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 2245 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 2246 struct ice_sq_cd *cd) 2247 { 2248 u16 i, sum_header_size, sum_q_size = 0; 2249 struct ice_aqc_add_tx_qgrp *list; 2250 struct ice_aqc_add_txqs *cmd; 2251 struct ice_aq_desc desc; 2252 2253 cmd = &desc.params.add_txqs; 2254 2255 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 2256 2257 if (!qg_list) 2258 return ICE_ERR_PARAM; 2259 2260 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 2261 return ICE_ERR_PARAM; 2262 2263 sum_header_size = num_qgrps * 2264 (sizeof(*qg_list) - sizeof(*qg_list->txqs)); 2265 2266 list = qg_list; 2267 for (i = 0; i < num_qgrps; i++) { 2268 struct ice_aqc_add_txqs_perq *q = list->txqs; 2269 2270 sum_q_size += list->num_txqs * sizeof(*q); 2271 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs); 2272 } 2273 2274 if (buf_size != (sum_header_size + sum_q_size)) 2275 return ICE_ERR_PARAM; 2276 2277 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2278 2279 cmd->num_qgrps = num_qgrps; 2280 2281 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 2282 } 2283 2284 /** 2285 * ice_aq_dis_lan_txq 2286 * @hw: pointer to the hardware structure 2287 * @num_qgrps: number of groups in the list 2288 * @qg_list: the list of groups to disable 2289 * @buf_size: the total size of the qg_list buffer in bytes 2290 * @rst_src: if called due to reset, specifies the RST source 2291 * @vmvf_num: the relative VM or VF number that is undergoing the reset 2292 * @cd: pointer to command details structure or NULL 2293 * 2294 * Disable LAN Tx queue (0x0C31) 2295 */ 2296 static enum ice_status 2297 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 2298 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 2299 enum ice_disq_rst_src rst_src, u16 vmvf_num, 2300 struct ice_sq_cd *cd) 2301 { 2302 struct ice_aqc_dis_txqs *cmd; 2303 struct ice_aq_desc desc; 2304 u16 i, sz = 0; 2305 2306 cmd = &desc.params.dis_txqs; 2307 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 2308 2309 /* qg_list can be NULL only in VM/VF reset flow */ 2310 if (!qg_list && !rst_src) 2311 return ICE_ERR_PARAM; 2312 2313 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 2314 return ICE_ERR_PARAM; 2315 2316 cmd->num_entries = num_qgrps; 2317 2318 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & 2319 ICE_AQC_Q_DIS_TIMEOUT_M); 2320 2321 switch (rst_src) { 2322 case ICE_VM_RESET: 2323 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 2324 cmd->vmvf_and_timeout |= 2325 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); 2326 break; 2327 case ICE_VF_RESET: 2328 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 2329 /* In this case, FW expects vmvf_num to be absolute VF id */ 2330 cmd->vmvf_and_timeout |= 2331 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) & 2332 ICE_AQC_Q_DIS_VMVF_NUM_M); 2333 break; 2334 case ICE_NO_RESET: 2335 default: 2336 break; 2337 } 2338 2339 /* If no queue group info, we are in a reset flow. Issue the AQ */ 2340 if (!qg_list) 2341 goto do_aq; 2342 2343 /* set RD bit to indicate that command buffer is provided by the driver 2344 * and it needs to be read by the firmware 2345 */ 2346 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2347 2348 for (i = 0; i < num_qgrps; ++i) { 2349 /* Calculate the size taken up by the queue IDs in this group */ 2350 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id); 2351 2352 /* Add the size of the group header */ 2353 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id); 2354 2355 /* If the num of queues is even, add 2 bytes of padding */ 2356 if ((qg_list[i].num_qs % 2) == 0) 2357 sz += 2; 2358 } 2359 2360 if (buf_size != sz) 2361 return ICE_ERR_PARAM; 2362 2363 do_aq: 2364 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 2365 } 2366 2367 /* End of FW Admin Queue command wrappers */ 2368 2369 /** 2370 * ice_write_byte - write a byte to a packed context structure 2371 * @src_ctx: the context structure to read from 2372 * @dest_ctx: the context to be written to 2373 * @ce_info: a description of the struct to be filled 2374 */ 2375 static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx, 2376 const struct ice_ctx_ele *ce_info) 2377 { 2378 u8 src_byte, dest_byte, mask; 2379 u8 *from, *dest; 2380 u16 shift_width; 2381 2382 /* copy from the next struct field */ 2383 from = src_ctx + ce_info->offset; 2384 2385 /* prepare the bits and mask */ 2386 shift_width = ce_info->lsb % 8; 2387 mask = (u8)(BIT(ce_info->width) - 1); 2388 2389 src_byte = *from; 2390 src_byte &= mask; 2391 2392 /* shift to correct alignment */ 2393 mask <<= shift_width; 2394 src_byte <<= shift_width; 2395 2396 /* get the current bits from the target bit string */ 2397 dest = dest_ctx + (ce_info->lsb / 8); 2398 2399 memcpy(&dest_byte, dest, sizeof(dest_byte)); 2400 2401 dest_byte &= ~mask; /* get the bits not changing */ 2402 dest_byte |= src_byte; /* add in the new bits */ 2403 2404 /* put it all back */ 2405 memcpy(dest, &dest_byte, sizeof(dest_byte)); 2406 } 2407 2408 /** 2409 * ice_write_word - write a word to a packed context structure 2410 * @src_ctx: the context structure to read from 2411 * @dest_ctx: the context to be written to 2412 * @ce_info: a description of the struct to be filled 2413 */ 2414 static void ice_write_word(u8 *src_ctx, u8 *dest_ctx, 2415 const struct ice_ctx_ele *ce_info) 2416 { 2417 u16 src_word, mask; 2418 __le16 dest_word; 2419 u8 *from, *dest; 2420 u16 shift_width; 2421 2422 /* copy from the next struct field */ 2423 from = src_ctx + ce_info->offset; 2424 2425 /* prepare the bits and mask */ 2426 shift_width = ce_info->lsb % 8; 2427 mask = BIT(ce_info->width) - 1; 2428 2429 /* don't swizzle the bits until after the mask because the mask bits 2430 * will be in a different bit position on big endian machines 2431 */ 2432 src_word = *(u16 *)from; 2433 src_word &= mask; 2434 2435 /* shift to correct alignment */ 2436 mask <<= shift_width; 2437 src_word <<= shift_width; 2438 2439 /* get the current bits from the target bit string */ 2440 dest = dest_ctx + (ce_info->lsb / 8); 2441 2442 memcpy(&dest_word, dest, sizeof(dest_word)); 2443 2444 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 2445 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 2446 2447 /* put it all back */ 2448 memcpy(dest, &dest_word, sizeof(dest_word)); 2449 } 2450 2451 /** 2452 * ice_write_dword - write a dword to a packed context structure 2453 * @src_ctx: the context structure to read from 2454 * @dest_ctx: the context to be written to 2455 * @ce_info: a description of the struct to be filled 2456 */ 2457 static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx, 2458 const struct ice_ctx_ele *ce_info) 2459 { 2460 u32 src_dword, mask; 2461 __le32 dest_dword; 2462 u8 *from, *dest; 2463 u16 shift_width; 2464 2465 /* copy from the next struct field */ 2466 from = src_ctx + ce_info->offset; 2467 2468 /* prepare the bits and mask */ 2469 shift_width = ce_info->lsb % 8; 2470 2471 /* if the field width is exactly 32 on an x86 machine, then the shift 2472 * operation will not work because the SHL instructions count is masked 2473 * to 5 bits so the shift will do nothing 2474 */ 2475 if (ce_info->width < 32) 2476 mask = BIT(ce_info->width) - 1; 2477 else 2478 mask = (u32)~0; 2479 2480 /* don't swizzle the bits until after the mask because the mask bits 2481 * will be in a different bit position on big endian machines 2482 */ 2483 src_dword = *(u32 *)from; 2484 src_dword &= mask; 2485 2486 /* shift to correct alignment */ 2487 mask <<= shift_width; 2488 src_dword <<= shift_width; 2489 2490 /* get the current bits from the target bit string */ 2491 dest = dest_ctx + (ce_info->lsb / 8); 2492 2493 memcpy(&dest_dword, dest, sizeof(dest_dword)); 2494 2495 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 2496 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 2497 2498 /* put it all back */ 2499 memcpy(dest, &dest_dword, sizeof(dest_dword)); 2500 } 2501 2502 /** 2503 * ice_write_qword - write a qword to a packed context structure 2504 * @src_ctx: the context structure to read from 2505 * @dest_ctx: the context to be written to 2506 * @ce_info: a description of the struct to be filled 2507 */ 2508 static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx, 2509 const struct ice_ctx_ele *ce_info) 2510 { 2511 u64 src_qword, mask; 2512 __le64 dest_qword; 2513 u8 *from, *dest; 2514 u16 shift_width; 2515 2516 /* copy from the next struct field */ 2517 from = src_ctx + ce_info->offset; 2518 2519 /* prepare the bits and mask */ 2520 shift_width = ce_info->lsb % 8; 2521 2522 /* if the field width is exactly 64 on an x86 machine, then the shift 2523 * operation will not work because the SHL instructions count is masked 2524 * to 6 bits so the shift will do nothing 2525 */ 2526 if (ce_info->width < 64) 2527 mask = BIT_ULL(ce_info->width) - 1; 2528 else 2529 mask = (u64)~0; 2530 2531 /* don't swizzle the bits until after the mask because the mask bits 2532 * will be in a different bit position on big endian machines 2533 */ 2534 src_qword = *(u64 *)from; 2535 src_qword &= mask; 2536 2537 /* shift to correct alignment */ 2538 mask <<= shift_width; 2539 src_qword <<= shift_width; 2540 2541 /* get the current bits from the target bit string */ 2542 dest = dest_ctx + (ce_info->lsb / 8); 2543 2544 memcpy(&dest_qword, dest, sizeof(dest_qword)); 2545 2546 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 2547 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 2548 2549 /* put it all back */ 2550 memcpy(dest, &dest_qword, sizeof(dest_qword)); 2551 } 2552 2553 /** 2554 * ice_set_ctx - set context bits in packed structure 2555 * @src_ctx: pointer to a generic non-packed context structure 2556 * @dest_ctx: pointer to memory for the packed structure 2557 * @ce_info: a description of the structure to be transformed 2558 */ 2559 enum ice_status 2560 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 2561 { 2562 int f; 2563 2564 for (f = 0; ce_info[f].width; f++) { 2565 /* We have to deal with each element of the FW response 2566 * using the correct size so that we are correct regardless 2567 * of the endianness of the machine. 2568 */ 2569 switch (ce_info[f].size_of) { 2570 case sizeof(u8): 2571 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); 2572 break; 2573 case sizeof(u16): 2574 ice_write_word(src_ctx, dest_ctx, &ce_info[f]); 2575 break; 2576 case sizeof(u32): 2577 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); 2578 break; 2579 case sizeof(u64): 2580 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); 2581 break; 2582 default: 2583 return ICE_ERR_INVAL_SIZE; 2584 } 2585 } 2586 2587 return 0; 2588 } 2589 2590 /** 2591 * ice_ena_vsi_txq 2592 * @pi: port information structure 2593 * @vsi_handle: software VSI handle 2594 * @tc: tc number 2595 * @num_qgrps: Number of added queue groups 2596 * @buf: list of queue groups to be added 2597 * @buf_size: size of buffer for indirect command 2598 * @cd: pointer to command details structure or NULL 2599 * 2600 * This function adds one lan q 2601 */ 2602 enum ice_status 2603 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, 2604 struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 2605 struct ice_sq_cd *cd) 2606 { 2607 struct ice_aqc_txsched_elem_data node = { 0 }; 2608 struct ice_sched_node *parent; 2609 enum ice_status status; 2610 struct ice_hw *hw; 2611 2612 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 2613 return ICE_ERR_CFG; 2614 2615 if (num_qgrps > 1 || buf->num_txqs > 1) 2616 return ICE_ERR_MAX_LIMIT; 2617 2618 hw = pi->hw; 2619 2620 if (!ice_is_vsi_valid(hw, vsi_handle)) 2621 return ICE_ERR_PARAM; 2622 2623 mutex_lock(&pi->sched_lock); 2624 2625 /* find a parent node */ 2626 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 2627 ICE_SCHED_NODE_OWNER_LAN); 2628 if (!parent) { 2629 status = ICE_ERR_PARAM; 2630 goto ena_txq_exit; 2631 } 2632 2633 buf->parent_teid = parent->info.node_teid; 2634 node.parent_teid = parent->info.node_teid; 2635 /* Mark that the values in the "generic" section as valid. The default 2636 * value in the "generic" section is zero. This means that : 2637 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 2638 * - 0 priority among siblings, indicated by Bit 1-3. 2639 * - WFQ, indicated by Bit 4. 2640 * - 0 Adjustment value is used in PSM credit update flow, indicated by 2641 * Bit 5-6. 2642 * - Bit 7 is reserved. 2643 * Without setting the generic section as valid in valid_sections, the 2644 * Admin Q command will fail with error code ICE_AQ_RC_EINVAL. 2645 */ 2646 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC; 2647 2648 /* add the lan q */ 2649 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 2650 if (status) 2651 goto ena_txq_exit; 2652 2653 node.node_teid = buf->txqs[0].q_teid; 2654 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 2655 2656 /* add a leaf node into schduler tree q layer */ 2657 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); 2658 2659 ena_txq_exit: 2660 mutex_unlock(&pi->sched_lock); 2661 return status; 2662 } 2663 2664 /** 2665 * ice_dis_vsi_txq 2666 * @pi: port information structure 2667 * @num_queues: number of queues 2668 * @q_ids: pointer to the q_id array 2669 * @q_teids: pointer to queue node teids 2670 * @rst_src: if called due to reset, specifies the RST source 2671 * @vmvf_num: the relative VM or VF number that is undergoing the reset 2672 * @cd: pointer to command details structure or NULL 2673 * 2674 * This function removes queues and their corresponding nodes in SW DB 2675 */ 2676 enum ice_status 2677 ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, 2678 u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, 2679 struct ice_sq_cd *cd) 2680 { 2681 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 2682 struct ice_aqc_dis_txq_item qg_list; 2683 u16 i; 2684 2685 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 2686 return ICE_ERR_CFG; 2687 2688 /* if queue is disabled already yet the disable queue command has to be 2689 * sent to complete the VF reset, then call ice_aq_dis_lan_txq without 2690 * any queue information 2691 */ 2692 2693 if (!num_queues && rst_src) 2694 return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num, 2695 NULL); 2696 2697 mutex_lock(&pi->sched_lock); 2698 2699 for (i = 0; i < num_queues; i++) { 2700 struct ice_sched_node *node; 2701 2702 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 2703 if (!node) 2704 continue; 2705 qg_list.parent_teid = node->info.parent_teid; 2706 qg_list.num_qs = 1; 2707 qg_list.q_id[0] = cpu_to_le16(q_ids[i]); 2708 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list, 2709 sizeof(qg_list), rst_src, vmvf_num, 2710 cd); 2711 2712 if (status) 2713 break; 2714 ice_free_sched_node(pi, node); 2715 } 2716 mutex_unlock(&pi->sched_lock); 2717 return status; 2718 } 2719 2720 /** 2721 * ice_cfg_vsi_qs - configure the new/exisiting VSI queues 2722 * @pi: port information structure 2723 * @vsi_handle: software VSI handle 2724 * @tc_bitmap: TC bitmap 2725 * @maxqs: max queues array per TC 2726 * @owner: lan or rdma 2727 * 2728 * This function adds/updates the VSI queues per TC. 2729 */ 2730 static enum ice_status 2731 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 2732 u16 *maxqs, u8 owner) 2733 { 2734 enum ice_status status = 0; 2735 u8 i; 2736 2737 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 2738 return ICE_ERR_CFG; 2739 2740 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 2741 return ICE_ERR_PARAM; 2742 2743 mutex_lock(&pi->sched_lock); 2744 2745 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { 2746 /* configuration is possible only if TC node is present */ 2747 if (!ice_sched_get_tc_node(pi, i)) 2748 continue; 2749 2750 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 2751 ice_is_tc_ena(tc_bitmap, i)); 2752 if (status) 2753 break; 2754 } 2755 2756 mutex_unlock(&pi->sched_lock); 2757 return status; 2758 } 2759 2760 /** 2761 * ice_cfg_vsi_lan - configure VSI lan queues 2762 * @pi: port information structure 2763 * @vsi_handle: software VSI handle 2764 * @tc_bitmap: TC bitmap 2765 * @max_lanqs: max lan queues array per TC 2766 * 2767 * This function adds/updates the VSI lan queues per TC. 2768 */ 2769 enum ice_status 2770 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 2771 u16 *max_lanqs) 2772 { 2773 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 2774 ICE_SCHED_NODE_OWNER_LAN); 2775 } 2776 2777 /** 2778 * ice_replay_pre_init - replay pre initialization 2779 * @hw: pointer to the hw struct 2780 * 2781 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 2782 */ 2783 static enum ice_status ice_replay_pre_init(struct ice_hw *hw) 2784 { 2785 struct ice_switch_info *sw = hw->switch_info; 2786 u8 i; 2787 2788 /* Delete old entries from replay filter list head if there is any */ 2789 ice_rm_all_sw_replay_rule_info(hw); 2790 /* In start of replay, move entries into replay_rules list, it 2791 * will allow adding rules entries back to filt_rules list, 2792 * which is operational list. 2793 */ 2794 for (i = 0; i < ICE_SW_LKUP_LAST; i++) 2795 list_replace_init(&sw->recp_list[i].filt_rules, 2796 &sw->recp_list[i].filt_replay_rules); 2797 2798 return 0; 2799 } 2800 2801 /** 2802 * ice_replay_vsi - replay VSI configuration 2803 * @hw: pointer to the hw struct 2804 * @vsi_handle: driver VSI handle 2805 * 2806 * Restore all VSI configuration after reset. It is required to call this 2807 * function with main VSI first. 2808 */ 2809 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 2810 { 2811 enum ice_status status; 2812 2813 if (!ice_is_vsi_valid(hw, vsi_handle)) 2814 return ICE_ERR_PARAM; 2815 2816 /* Replay pre-initialization if there is any */ 2817 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 2818 status = ice_replay_pre_init(hw); 2819 if (status) 2820 return status; 2821 } 2822 2823 /* Replay per VSI all filters */ 2824 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 2825 return status; 2826 } 2827 2828 /** 2829 * ice_replay_post - post replay configuration cleanup 2830 * @hw: pointer to the hw struct 2831 * 2832 * Post replay cleanup. 2833 */ 2834 void ice_replay_post(struct ice_hw *hw) 2835 { 2836 /* Delete old entries from replay filter list head */ 2837 ice_rm_all_sw_replay_rule_info(hw); 2838 } 2839 2840 /** 2841 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 2842 * @hw: ptr to the hardware info 2843 * @hireg: high 32 bit HW register to read from 2844 * @loreg: low 32 bit HW register to read from 2845 * @prev_stat_loaded: bool to specify if previous stats are loaded 2846 * @prev_stat: ptr to previous loaded stat value 2847 * @cur_stat: ptr to current stat value 2848 */ 2849 void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, 2850 bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat) 2851 { 2852 u64 new_data; 2853 2854 new_data = rd32(hw, loreg); 2855 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 2856 2857 /* device stats are not reset at PFR, they likely will not be zeroed 2858 * when the driver starts. So save the first values read and use them as 2859 * offsets to be subtracted from the raw values in order to report stats 2860 * that count from zero. 2861 */ 2862 if (!prev_stat_loaded) 2863 *prev_stat = new_data; 2864 if (new_data >= *prev_stat) 2865 *cur_stat = new_data - *prev_stat; 2866 else 2867 /* to manage the potential roll-over */ 2868 *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat; 2869 *cur_stat &= 0xFFFFFFFFFFULL; 2870 } 2871 2872 /** 2873 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 2874 * @hw: ptr to the hardware info 2875 * @reg: HW register to read from 2876 * @prev_stat_loaded: bool to specify if previous stats are loaded 2877 * @prev_stat: ptr to previous loaded stat value 2878 * @cur_stat: ptr to current stat value 2879 */ 2880 void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 2881 u64 *prev_stat, u64 *cur_stat) 2882 { 2883 u32 new_data; 2884 2885 new_data = rd32(hw, reg); 2886 2887 /* device stats are not reset at PFR, they likely will not be zeroed 2888 * when the driver starts. So save the first values read and use them as 2889 * offsets to be subtracted from the raw values in order to report stats 2890 * that count from zero. 2891 */ 2892 if (!prev_stat_loaded) 2893 *prev_stat = new_data; 2894 if (new_data >= *prev_stat) 2895 *cur_stat = new_data - *prev_stat; 2896 else 2897 /* to manage the potential roll-over */ 2898 *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat; 2899 } 2900