1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 8 #define ICE_PF_RESET_WAIT_COUNT 200 9 10 #define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \ 11 wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \ 12 ((ICE_RX_OPC_MDID << \ 13 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \ 14 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \ 15 (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \ 16 GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M)) 17 18 #define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \ 19 wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \ 20 (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \ 21 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \ 22 (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \ 23 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \ 24 (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \ 25 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \ 26 (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \ 27 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M)) 28 29 /** 30 * ice_set_mac_type - Sets MAC type 31 * @hw: pointer to the HW structure 32 * 33 * This function sets the MAC type of the adapter based on the 34 * vendor ID and device ID stored in the hw structure. 35 */ 36 static enum ice_status ice_set_mac_type(struct ice_hw *hw) 37 { 38 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 39 return ICE_ERR_DEVICE_NOT_SUPPORTED; 40 41 hw->mac_type = ICE_MAC_GENERIC; 42 return 0; 43 } 44 45 /** 46 * ice_dev_onetime_setup - Temporary HW/FW workarounds 47 * @hw: pointer to the HW structure 48 * 49 * This function provides temporary workarounds for certain issues 50 * that are expected to be fixed in the HW/FW. 51 */ 52 void ice_dev_onetime_setup(struct ice_hw *hw) 53 { 54 /* configure Rx - set non pxe mode */ 55 wr32(hw, GLLAN_RCTL_0, 0x1); 56 57 #define MBX_PF_VT_PFALLOC 0x00231E80 58 /* set VFs per PF */ 59 wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF)); 60 } 61 62 /** 63 * ice_clear_pf_cfg - Clear PF configuration 64 * @hw: pointer to the hardware structure 65 * 66 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 67 * configuration, flow director filters, etc.). 68 */ 69 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) 70 { 71 struct ice_aq_desc desc; 72 73 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 74 75 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 76 } 77 78 /** 79 * ice_aq_manage_mac_read - manage MAC address read command 80 * @hw: pointer to the hw struct 81 * @buf: a virtual buffer to hold the manage MAC read response 82 * @buf_size: Size of the virtual buffer 83 * @cd: pointer to command details structure or NULL 84 * 85 * This function is used to return per PF station MAC address (0x0107). 86 * NOTE: Upon successful completion of this command, MAC address information 87 * is returned in user specified buffer. Please interpret user specified 88 * buffer as "manage_mac_read" response. 89 * Response such as various MAC addresses are stored in HW struct (port.mac) 90 * ice_aq_discover_caps is expected to be called before this function is called. 91 */ 92 static enum ice_status 93 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 94 struct ice_sq_cd *cd) 95 { 96 struct ice_aqc_manage_mac_read_resp *resp; 97 struct ice_aqc_manage_mac_read *cmd; 98 struct ice_aq_desc desc; 99 enum ice_status status; 100 u16 flags; 101 u8 i; 102 103 cmd = &desc.params.mac_read; 104 105 if (buf_size < sizeof(*resp)) 106 return ICE_ERR_BUF_TOO_SHORT; 107 108 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 109 110 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 111 if (status) 112 return status; 113 114 resp = (struct ice_aqc_manage_mac_read_resp *)buf; 115 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 116 117 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 118 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 119 return ICE_ERR_CFG; 120 } 121 122 /* A single port can report up to two (LAN and WoL) addresses */ 123 for (i = 0; i < cmd->num_addr; i++) 124 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 125 ether_addr_copy(hw->port_info->mac.lan_addr, 126 resp[i].mac_addr); 127 ether_addr_copy(hw->port_info->mac.perm_addr, 128 resp[i].mac_addr); 129 break; 130 } 131 132 return 0; 133 } 134 135 /** 136 * ice_aq_get_phy_caps - returns PHY capabilities 137 * @pi: port information structure 138 * @qual_mods: report qualified modules 139 * @report_mode: report mode capabilities 140 * @pcaps: structure for PHY capabilities to be filled 141 * @cd: pointer to command details structure or NULL 142 * 143 * Returns the various PHY capabilities supported on the Port (0x0600) 144 */ 145 enum ice_status 146 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 147 struct ice_aqc_get_phy_caps_data *pcaps, 148 struct ice_sq_cd *cd) 149 { 150 struct ice_aqc_get_phy_caps *cmd; 151 u16 pcaps_size = sizeof(*pcaps); 152 struct ice_aq_desc desc; 153 enum ice_status status; 154 155 cmd = &desc.params.get_phy; 156 157 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 158 return ICE_ERR_PARAM; 159 160 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 161 162 if (qual_mods) 163 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 164 165 cmd->param0 |= cpu_to_le16(report_mode); 166 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd); 167 168 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) 169 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 170 171 return status; 172 } 173 174 /** 175 * ice_get_media_type - Gets media type 176 * @pi: port information structure 177 */ 178 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 179 { 180 struct ice_link_status *hw_link_info; 181 182 if (!pi) 183 return ICE_MEDIA_UNKNOWN; 184 185 hw_link_info = &pi->phy.link_info; 186 187 if (hw_link_info->phy_type_low) { 188 switch (hw_link_info->phy_type_low) { 189 case ICE_PHY_TYPE_LOW_1000BASE_SX: 190 case ICE_PHY_TYPE_LOW_1000BASE_LX: 191 case ICE_PHY_TYPE_LOW_10GBASE_SR: 192 case ICE_PHY_TYPE_LOW_10GBASE_LR: 193 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 194 case ICE_PHY_TYPE_LOW_25GBASE_SR: 195 case ICE_PHY_TYPE_LOW_25GBASE_LR: 196 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 197 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 198 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 199 return ICE_MEDIA_FIBER; 200 case ICE_PHY_TYPE_LOW_100BASE_TX: 201 case ICE_PHY_TYPE_LOW_1000BASE_T: 202 case ICE_PHY_TYPE_LOW_2500BASE_T: 203 case ICE_PHY_TYPE_LOW_5GBASE_T: 204 case ICE_PHY_TYPE_LOW_10GBASE_T: 205 case ICE_PHY_TYPE_LOW_25GBASE_T: 206 return ICE_MEDIA_BASET; 207 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 208 case ICE_PHY_TYPE_LOW_25GBASE_CR: 209 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 210 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 211 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 212 return ICE_MEDIA_DA; 213 case ICE_PHY_TYPE_LOW_1000BASE_KX: 214 case ICE_PHY_TYPE_LOW_2500BASE_KX: 215 case ICE_PHY_TYPE_LOW_2500BASE_X: 216 case ICE_PHY_TYPE_LOW_5GBASE_KR: 217 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 218 case ICE_PHY_TYPE_LOW_25GBASE_KR: 219 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 220 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 221 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 222 return ICE_MEDIA_BACKPLANE; 223 } 224 } 225 226 return ICE_MEDIA_UNKNOWN; 227 } 228 229 /** 230 * ice_aq_get_link_info 231 * @pi: port information structure 232 * @ena_lse: enable/disable LinkStatusEvent reporting 233 * @link: pointer to link status structure - optional 234 * @cd: pointer to command details structure or NULL 235 * 236 * Get Link Status (0x607). Returns the link status of the adapter. 237 */ 238 static enum ice_status 239 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 240 struct ice_link_status *link, struct ice_sq_cd *cd) 241 { 242 struct ice_link_status *hw_link_info_old, *hw_link_info; 243 struct ice_aqc_get_link_status_data link_data = { 0 }; 244 struct ice_aqc_get_link_status *resp; 245 enum ice_media_type *hw_media_type; 246 struct ice_fc_info *hw_fc_info; 247 bool tx_pause, rx_pause; 248 struct ice_aq_desc desc; 249 enum ice_status status; 250 u16 cmd_flags; 251 252 if (!pi) 253 return ICE_ERR_PARAM; 254 hw_link_info_old = &pi->phy.link_info_old; 255 hw_media_type = &pi->phy.media_type; 256 hw_link_info = &pi->phy.link_info; 257 hw_fc_info = &pi->fc; 258 259 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 260 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 261 resp = &desc.params.get_link_status; 262 resp->cmd_flags = cpu_to_le16(cmd_flags); 263 resp->lport_num = pi->lport; 264 265 status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data), 266 cd); 267 268 if (status) 269 return status; 270 271 /* save off old link status information */ 272 *hw_link_info_old = *hw_link_info; 273 274 /* update current link status information */ 275 hw_link_info->link_speed = le16_to_cpu(link_data.link_speed); 276 hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low); 277 *hw_media_type = ice_get_media_type(pi); 278 hw_link_info->link_info = link_data.link_info; 279 hw_link_info->an_info = link_data.an_info; 280 hw_link_info->ext_info = link_data.ext_info; 281 hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size); 282 hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M; 283 284 /* update fc info */ 285 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 286 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 287 if (tx_pause && rx_pause) 288 hw_fc_info->current_mode = ICE_FC_FULL; 289 else if (tx_pause) 290 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 291 else if (rx_pause) 292 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 293 else 294 hw_fc_info->current_mode = ICE_FC_NONE; 295 296 hw_link_info->lse_ena = 297 !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 298 299 /* save link status information */ 300 if (link) 301 *link = *hw_link_info; 302 303 /* flag cleared so calling functions don't call AQ again */ 304 pi->phy.get_link_info = false; 305 306 return status; 307 } 308 309 /** 310 * ice_init_flex_flags 311 * @hw: pointer to the hardware structure 312 * @prof_id: Rx Descriptor Builder profile ID 313 * 314 * Function to initialize Rx flex flags 315 */ 316 static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id) 317 { 318 u8 idx = 0; 319 320 /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout: 321 * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE 322 * flexiflags1[3:0] - Not used for flag programming 323 * flexiflags2[7:0] - Tunnel and VLAN types 324 * 2 invalid fields in last index 325 */ 326 switch (prof_id) { 327 /* Rx flex flags are currently programmed for the NIC profiles only. 328 * Different flag bit programming configurations can be added per 329 * profile as needed. 330 */ 331 case ICE_RXDID_FLEX_NIC: 332 case ICE_RXDID_FLEX_NIC_2: 333 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG, 334 ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI, 335 ICE_RXFLG_FIN, idx++); 336 /* flex flag 1 is not used for flexi-flag programming, skipping 337 * these four FLG64 bits. 338 */ 339 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST, 340 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++); 341 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI, 342 ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100, 343 ICE_RXFLG_EVLAN_x9100, idx++); 344 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100, 345 ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC, 346 ICE_RXFLG_TNL0, idx++); 347 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2, 348 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx); 349 break; 350 351 default: 352 ice_debug(hw, ICE_DBG_INIT, 353 "Flag programming for profile ID %d not supported\n", 354 prof_id); 355 } 356 } 357 358 /** 359 * ice_init_flex_flds 360 * @hw: pointer to the hardware structure 361 * @prof_id: Rx Descriptor Builder profile ID 362 * 363 * Function to initialize flex descriptors 364 */ 365 static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id) 366 { 367 enum ice_flex_rx_mdid mdid; 368 369 switch (prof_id) { 370 case ICE_RXDID_FLEX_NIC: 371 case ICE_RXDID_FLEX_NIC_2: 372 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0); 373 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1); 374 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2); 375 376 mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ? 377 ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH; 378 379 ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3); 380 381 ice_init_flex_flags(hw, prof_id); 382 break; 383 384 default: 385 ice_debug(hw, ICE_DBG_INIT, 386 "Field init for profile ID %d not supported\n", 387 prof_id); 388 } 389 } 390 391 /** 392 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 393 * @hw: pointer to the hw struct 394 */ 395 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) 396 { 397 struct ice_switch_info *sw; 398 399 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 400 sizeof(*hw->switch_info), GFP_KERNEL); 401 sw = hw->switch_info; 402 403 if (!sw) 404 return ICE_ERR_NO_MEMORY; 405 406 INIT_LIST_HEAD(&sw->vsi_list_map_head); 407 408 ice_init_def_sw_recp(hw); 409 410 return 0; 411 } 412 413 /** 414 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 415 * @hw: pointer to the hw struct 416 */ 417 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 418 { 419 struct ice_switch_info *sw = hw->switch_info; 420 struct ice_vsi_list_map_info *v_pos_map; 421 struct ice_vsi_list_map_info *v_tmp_map; 422 struct ice_sw_recipe *recps; 423 u8 i; 424 425 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 426 list_entry) { 427 list_del(&v_pos_map->list_entry); 428 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 429 } 430 recps = hw->switch_info->recp_list; 431 for (i = 0; i < ICE_SW_LKUP_LAST; i++) { 432 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 433 434 recps[i].root_rid = i; 435 mutex_destroy(&recps[i].filt_rule_lock); 436 list_for_each_entry_safe(lst_itr, tmp_entry, 437 &recps[i].filt_rules, list_entry) { 438 list_del(&lst_itr->list_entry); 439 devm_kfree(ice_hw_to_dev(hw), lst_itr); 440 } 441 } 442 ice_rm_all_sw_replay_rule_info(hw); 443 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 444 devm_kfree(ice_hw_to_dev(hw), sw); 445 } 446 447 #define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \ 448 (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry))) 449 #define ICE_FW_LOG_DESC_SIZE_MAX \ 450 ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX) 451 452 /** 453 * ice_cfg_fw_log - configure FW logging 454 * @hw: pointer to the hw struct 455 * @enable: enable certain FW logging events if true, disable all if false 456 * 457 * This function enables/disables the FW logging via Rx CQ events and a UART 458 * port based on predetermined configurations. FW logging via the Rx CQ can be 459 * enabled/disabled for individual PF's. However, FW logging via the UART can 460 * only be enabled/disabled for all PFs on the same device. 461 * 462 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in 463 * hw->fw_log need to be set accordingly, e.g. based on user-provided input, 464 * before initializing the device. 465 * 466 * When re/configuring FW logging, callers need to update the "cfg" elements of 467 * the hw->fw_log.evnts array with the desired logging event configurations for 468 * modules of interest. When disabling FW logging completely, the callers can 469 * just pass false in the "enable" parameter. On completion, the function will 470 * update the "cur" element of the hw->fw_log.evnts array with the resulting 471 * logging event configurations of the modules that are being re/configured. FW 472 * logging modules that are not part of a reconfiguration operation retain their 473 * previous states. 474 * 475 * Before resetting the device, it is recommended that the driver disables FW 476 * logging before shutting down the control queue. When disabling FW logging 477 * ("enable" = false), the latest configurations of FW logging events stored in 478 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after 479 * a device reset. 480 * 481 * When enabling FW logging to emit log messages via the Rx CQ during the 482 * device's initialization phase, a mechanism alternative to interrupt handlers 483 * needs to be used to extract FW log messages from the Rx CQ periodically and 484 * to prevent the Rx CQ from being full and stalling other types of control 485 * messages from FW to SW. Interrupts are typically disabled during the device's 486 * initialization phase. 487 */ 488 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) 489 { 490 struct ice_aqc_fw_logging_data *data = NULL; 491 struct ice_aqc_fw_logging *cmd; 492 enum ice_status status = 0; 493 u16 i, chgs = 0, len = 0; 494 struct ice_aq_desc desc; 495 u8 actv_evnts = 0; 496 void *buf = NULL; 497 498 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) 499 return 0; 500 501 /* Disable FW logging only when the control queue is still responsive */ 502 if (!enable && 503 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) 504 return 0; 505 506 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); 507 cmd = &desc.params.fw_logging; 508 509 /* Indicate which controls are valid */ 510 if (hw->fw_log.cq_en) 511 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; 512 513 if (hw->fw_log.uart_en) 514 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; 515 516 if (enable) { 517 /* Fill in an array of entries with FW logging modules and 518 * logging events being reconfigured. 519 */ 520 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 521 u16 val; 522 523 /* Keep track of enabled event types */ 524 actv_evnts |= hw->fw_log.evnts[i].cfg; 525 526 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) 527 continue; 528 529 if (!data) { 530 data = devm_kzalloc(ice_hw_to_dev(hw), 531 ICE_FW_LOG_DESC_SIZE_MAX, 532 GFP_KERNEL); 533 if (!data) 534 return ICE_ERR_NO_MEMORY; 535 } 536 537 val = i << ICE_AQC_FW_LOG_ID_S; 538 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; 539 data->entry[chgs++] = cpu_to_le16(val); 540 } 541 542 /* Only enable FW logging if at least one module is specified. 543 * If FW logging is currently enabled but all modules are not 544 * enabled to emit log messages, disable FW logging altogether. 545 */ 546 if (actv_evnts) { 547 /* Leave if there is effectively no change */ 548 if (!chgs) 549 goto out; 550 551 if (hw->fw_log.cq_en) 552 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; 553 554 if (hw->fw_log.uart_en) 555 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; 556 557 buf = data; 558 len = ICE_FW_LOG_DESC_SIZE(chgs); 559 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 560 } 561 } 562 563 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); 564 if (!status) { 565 /* Update the current configuration to reflect events enabled. 566 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW 567 * logging mode is enabled for the device. They do not reflect 568 * actual modules being enabled to emit log messages. So, their 569 * values remain unchanged even when all modules are disabled. 570 */ 571 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; 572 573 hw->fw_log.actv_evnts = actv_evnts; 574 for (i = 0; i < cnt; i++) { 575 u16 v, m; 576 577 if (!enable) { 578 /* When disabling all FW logging events as part 579 * of device's de-initialization, the original 580 * configurations are retained, and can be used 581 * to reconfigure FW logging later if the device 582 * is re-initialized. 583 */ 584 hw->fw_log.evnts[i].cur = 0; 585 continue; 586 } 587 588 v = le16_to_cpu(data->entry[i]); 589 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 590 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; 591 } 592 } 593 594 out: 595 if (data) 596 devm_kfree(ice_hw_to_dev(hw), data); 597 598 return status; 599 } 600 601 /** 602 * ice_output_fw_log 603 * @hw: pointer to the hw struct 604 * @desc: pointer to the AQ message descriptor 605 * @buf: pointer to the buffer accompanying the AQ message 606 * 607 * Formats a FW Log message and outputs it via the standard driver logs. 608 */ 609 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) 610 { 611 ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n"); 612 ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf, 613 le16_to_cpu(desc->datalen)); 614 ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n"); 615 } 616 617 /** 618 * ice_get_itr_intrl_gran - determine int/intrl granularity 619 * @hw: pointer to the hw struct 620 * 621 * Determines the itr/intrl granularities based on the maximum aggregate 622 * bandwidth according to the device's configuration during power-on. 623 */ 624 static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw) 625 { 626 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & 627 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> 628 GL_PWR_MODE_CTL_CAR_MAX_BW_S; 629 630 switch (max_agg_bw) { 631 case ICE_MAX_AGG_BW_200G: 632 case ICE_MAX_AGG_BW_100G: 633 case ICE_MAX_AGG_BW_50G: 634 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 635 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 636 break; 637 case ICE_MAX_AGG_BW_25G: 638 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 639 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 640 break; 641 default: 642 ice_debug(hw, ICE_DBG_INIT, 643 "Failed to determine itr/intrl granularity\n"); 644 return ICE_ERR_CFG; 645 } 646 647 return 0; 648 } 649 650 /** 651 * ice_init_hw - main hardware initialization routine 652 * @hw: pointer to the hardware structure 653 */ 654 enum ice_status ice_init_hw(struct ice_hw *hw) 655 { 656 struct ice_aqc_get_phy_caps_data *pcaps; 657 enum ice_status status; 658 u16 mac_buf_len; 659 void *mac_buf; 660 661 /* Set MAC type based on DeviceID */ 662 status = ice_set_mac_type(hw); 663 if (status) 664 return status; 665 666 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & 667 PF_FUNC_RID_FUNC_NUM_M) >> 668 PF_FUNC_RID_FUNC_NUM_S; 669 670 status = ice_reset(hw, ICE_RESET_PFR); 671 if (status) 672 return status; 673 674 status = ice_get_itr_intrl_gran(hw); 675 if (status) 676 return status; 677 678 status = ice_init_all_ctrlq(hw); 679 if (status) 680 goto err_unroll_cqinit; 681 682 /* Enable FW logging. Not fatal if this fails. */ 683 status = ice_cfg_fw_log(hw, true); 684 if (status) 685 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); 686 687 status = ice_clear_pf_cfg(hw); 688 if (status) 689 goto err_unroll_cqinit; 690 691 ice_clear_pxe_mode(hw); 692 693 status = ice_init_nvm(hw); 694 if (status) 695 goto err_unroll_cqinit; 696 697 status = ice_get_caps(hw); 698 if (status) 699 goto err_unroll_cqinit; 700 701 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 702 sizeof(*hw->port_info), GFP_KERNEL); 703 if (!hw->port_info) { 704 status = ICE_ERR_NO_MEMORY; 705 goto err_unroll_cqinit; 706 } 707 708 /* set the back pointer to hw */ 709 hw->port_info->hw = hw; 710 711 /* Initialize port_info struct with switch configuration data */ 712 status = ice_get_initial_sw_cfg(hw); 713 if (status) 714 goto err_unroll_alloc; 715 716 hw->evb_veb = true; 717 718 /* Query the allocated resources for tx scheduler */ 719 status = ice_sched_query_res_alloc(hw); 720 if (status) { 721 ice_debug(hw, ICE_DBG_SCHED, 722 "Failed to get scheduler allocated resources\n"); 723 goto err_unroll_alloc; 724 } 725 726 /* Initialize port_info struct with scheduler data */ 727 status = ice_sched_init_port(hw->port_info); 728 if (status) 729 goto err_unroll_sched; 730 731 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 732 if (!pcaps) { 733 status = ICE_ERR_NO_MEMORY; 734 goto err_unroll_sched; 735 } 736 737 /* Initialize port_info struct with PHY capabilities */ 738 status = ice_aq_get_phy_caps(hw->port_info, false, 739 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); 740 devm_kfree(ice_hw_to_dev(hw), pcaps); 741 if (status) 742 goto err_unroll_sched; 743 744 /* Initialize port_info struct with link information */ 745 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 746 if (status) 747 goto err_unroll_sched; 748 749 /* need a valid SW entry point to build a Tx tree */ 750 if (!hw->sw_entry_point_layer) { 751 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 752 status = ICE_ERR_CFG; 753 goto err_unroll_sched; 754 } 755 756 status = ice_init_fltr_mgmt_struct(hw); 757 if (status) 758 goto err_unroll_sched; 759 760 ice_dev_onetime_setup(hw); 761 762 /* Get MAC information */ 763 /* A single port can report up to two (LAN and WoL) addresses */ 764 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, 765 sizeof(struct ice_aqc_manage_mac_read_resp), 766 GFP_KERNEL); 767 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 768 769 if (!mac_buf) { 770 status = ICE_ERR_NO_MEMORY; 771 goto err_unroll_fltr_mgmt_struct; 772 } 773 774 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 775 devm_kfree(ice_hw_to_dev(hw), mac_buf); 776 777 if (status) 778 goto err_unroll_fltr_mgmt_struct; 779 780 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC); 781 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2); 782 783 return 0; 784 785 err_unroll_fltr_mgmt_struct: 786 ice_cleanup_fltr_mgmt_struct(hw); 787 err_unroll_sched: 788 ice_sched_cleanup_all(hw); 789 err_unroll_alloc: 790 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 791 err_unroll_cqinit: 792 ice_shutdown_all_ctrlq(hw); 793 return status; 794 } 795 796 /** 797 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 798 * @hw: pointer to the hardware structure 799 */ 800 void ice_deinit_hw(struct ice_hw *hw) 801 { 802 ice_cleanup_fltr_mgmt_struct(hw); 803 804 ice_sched_cleanup_all(hw); 805 806 if (hw->port_info) { 807 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 808 hw->port_info = NULL; 809 } 810 811 /* Attempt to disable FW logging before shutting down control queues */ 812 ice_cfg_fw_log(hw, false); 813 ice_shutdown_all_ctrlq(hw); 814 815 /* Clear VSI contexts if not already cleared */ 816 ice_clear_all_vsi_ctx(hw); 817 } 818 819 /** 820 * ice_check_reset - Check to see if a global reset is complete 821 * @hw: pointer to the hardware structure 822 */ 823 enum ice_status ice_check_reset(struct ice_hw *hw) 824 { 825 u32 cnt, reg = 0, grst_delay; 826 827 /* Poll for Device Active state in case a recent CORER, GLOBR, 828 * or EMPR has occurred. The grst delay value is in 100ms units. 829 * Add 1sec for outstanding AQ commands that can take a long time. 830 */ 831 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> 832 GLGEN_RSTCTL_GRSTDEL_S) + 10; 833 834 for (cnt = 0; cnt < grst_delay; cnt++) { 835 mdelay(100); 836 reg = rd32(hw, GLGEN_RSTAT); 837 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 838 break; 839 } 840 841 if (cnt == grst_delay) { 842 ice_debug(hw, ICE_DBG_INIT, 843 "Global reset polling failed to complete.\n"); 844 return ICE_ERR_RESET_FAILED; 845 } 846 847 #define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \ 848 GLNVM_ULD_GLOBR_DONE_M) 849 850 /* Device is Active; check Global Reset processes are done */ 851 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 852 reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK; 853 if (reg == ICE_RESET_DONE_MASK) { 854 ice_debug(hw, ICE_DBG_INIT, 855 "Global reset processes done. %d\n", cnt); 856 break; 857 } 858 mdelay(10); 859 } 860 861 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 862 ice_debug(hw, ICE_DBG_INIT, 863 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 864 reg); 865 return ICE_ERR_RESET_FAILED; 866 } 867 868 return 0; 869 } 870 871 /** 872 * ice_pf_reset - Reset the PF 873 * @hw: pointer to the hardware structure 874 * 875 * If a global reset has been triggered, this function checks 876 * for its completion and then issues the PF reset 877 */ 878 static enum ice_status ice_pf_reset(struct ice_hw *hw) 879 { 880 u32 cnt, reg; 881 882 /* If at function entry a global reset was already in progress, i.e. 883 * state is not 'device active' or any of the reset done bits are not 884 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 885 * global reset is done. 886 */ 887 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 888 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 889 /* poll on global reset currently in progress until done */ 890 if (ice_check_reset(hw)) 891 return ICE_ERR_RESET_FAILED; 892 893 return 0; 894 } 895 896 /* Reset the PF */ 897 reg = rd32(hw, PFGEN_CTRL); 898 899 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 900 901 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 902 reg = rd32(hw, PFGEN_CTRL); 903 if (!(reg & PFGEN_CTRL_PFSWR_M)) 904 break; 905 906 mdelay(1); 907 } 908 909 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 910 ice_debug(hw, ICE_DBG_INIT, 911 "PF reset polling failed to complete.\n"); 912 return ICE_ERR_RESET_FAILED; 913 } 914 915 return 0; 916 } 917 918 /** 919 * ice_reset - Perform different types of reset 920 * @hw: pointer to the hardware structure 921 * @req: reset request 922 * 923 * This function triggers a reset as specified by the req parameter. 924 * 925 * Note: 926 * If anything other than a PF reset is triggered, PXE mode is restored. 927 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 928 * interface has been restored in the rebuild flow. 929 */ 930 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) 931 { 932 u32 val = 0; 933 934 switch (req) { 935 case ICE_RESET_PFR: 936 return ice_pf_reset(hw); 937 case ICE_RESET_CORER: 938 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 939 val = GLGEN_RTRIG_CORER_M; 940 break; 941 case ICE_RESET_GLOBR: 942 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 943 val = GLGEN_RTRIG_GLOBR_M; 944 break; 945 default: 946 return ICE_ERR_PARAM; 947 } 948 949 val |= rd32(hw, GLGEN_RTRIG); 950 wr32(hw, GLGEN_RTRIG, val); 951 ice_flush(hw); 952 953 /* wait for the FW to be ready */ 954 return ice_check_reset(hw); 955 } 956 957 /** 958 * ice_copy_rxq_ctx_to_hw 959 * @hw: pointer to the hardware structure 960 * @ice_rxq_ctx: pointer to the rxq context 961 * @rxq_index: the index of the rx queue 962 * 963 * Copies rxq context from dense structure to hw register space 964 */ 965 static enum ice_status 966 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 967 { 968 u8 i; 969 970 if (!ice_rxq_ctx) 971 return ICE_ERR_BAD_PTR; 972 973 if (rxq_index > QRX_CTRL_MAX_INDEX) 974 return ICE_ERR_PARAM; 975 976 /* Copy each dword separately to hw */ 977 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 978 wr32(hw, QRX_CONTEXT(i, rxq_index), 979 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 980 981 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 982 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 983 } 984 985 return 0; 986 } 987 988 /* LAN Rx Queue Context */ 989 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 990 /* Field Width LSB */ 991 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 992 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 993 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 994 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 995 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 996 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 997 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 998 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 999 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1000 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1001 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1002 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1003 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1004 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1005 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1006 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1007 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1008 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1009 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1010 { 0 } 1011 }; 1012 1013 /** 1014 * ice_write_rxq_ctx 1015 * @hw: pointer to the hardware structure 1016 * @rlan_ctx: pointer to the rxq context 1017 * @rxq_index: the index of the rx queue 1018 * 1019 * Converts rxq context from sparse to dense structure and then writes 1020 * it to hw register space 1021 */ 1022 enum ice_status 1023 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1024 u32 rxq_index) 1025 { 1026 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1027 1028 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1029 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1030 } 1031 1032 /* LAN Tx Queue Context */ 1033 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1034 /* Field Width LSB */ 1035 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1036 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1037 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1038 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1039 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1040 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1041 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1042 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1043 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1044 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1045 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1046 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1047 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1048 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1049 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1050 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1051 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1052 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1053 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1054 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1055 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1056 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1057 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1058 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1059 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1060 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1061 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 110, 171), 1062 { 0 } 1063 }; 1064 1065 /** 1066 * ice_debug_cq 1067 * @hw: pointer to the hardware structure 1068 * @mask: debug mask 1069 * @desc: pointer to control queue descriptor 1070 * @buf: pointer to command buffer 1071 * @buf_len: max length of buf 1072 * 1073 * Dumps debug log about control command with descriptor contents. 1074 */ 1075 void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, 1076 void *buf, u16 buf_len) 1077 { 1078 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc; 1079 u16 len; 1080 1081 #ifndef CONFIG_DYNAMIC_DEBUG 1082 if (!(mask & hw->debug_mask)) 1083 return; 1084 #endif 1085 1086 if (!desc) 1087 return; 1088 1089 len = le16_to_cpu(cq_desc->datalen); 1090 1091 ice_debug(hw, mask, 1092 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 1093 le16_to_cpu(cq_desc->opcode), 1094 le16_to_cpu(cq_desc->flags), 1095 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval)); 1096 ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", 1097 le32_to_cpu(cq_desc->cookie_high), 1098 le32_to_cpu(cq_desc->cookie_low)); 1099 ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", 1100 le32_to_cpu(cq_desc->params.generic.param0), 1101 le32_to_cpu(cq_desc->params.generic.param1)); 1102 ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", 1103 le32_to_cpu(cq_desc->params.generic.addr_high), 1104 le32_to_cpu(cq_desc->params.generic.addr_low)); 1105 if (buf && cq_desc->datalen != 0) { 1106 ice_debug(hw, mask, "Buffer:\n"); 1107 if (buf_len < len) 1108 len = buf_len; 1109 1110 ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len); 1111 } 1112 } 1113 1114 /* FW Admin Queue command wrappers */ 1115 1116 /** 1117 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1118 * @hw: pointer to the hw struct 1119 * @desc: descriptor describing the command 1120 * @buf: buffer to use for indirect commands (NULL for direct commands) 1121 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1122 * @cd: pointer to command details structure 1123 * 1124 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1125 */ 1126 enum ice_status 1127 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1128 u16 buf_size, struct ice_sq_cd *cd) 1129 { 1130 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); 1131 } 1132 1133 /** 1134 * ice_aq_get_fw_ver 1135 * @hw: pointer to the hw struct 1136 * @cd: pointer to command details structure or NULL 1137 * 1138 * Get the firmware version (0x0001) from the admin queue commands 1139 */ 1140 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1141 { 1142 struct ice_aqc_get_ver *resp; 1143 struct ice_aq_desc desc; 1144 enum ice_status status; 1145 1146 resp = &desc.params.get_ver; 1147 1148 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1149 1150 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1151 1152 if (!status) { 1153 hw->fw_branch = resp->fw_branch; 1154 hw->fw_maj_ver = resp->fw_major; 1155 hw->fw_min_ver = resp->fw_minor; 1156 hw->fw_patch = resp->fw_patch; 1157 hw->fw_build = le32_to_cpu(resp->fw_build); 1158 hw->api_branch = resp->api_branch; 1159 hw->api_maj_ver = resp->api_major; 1160 hw->api_min_ver = resp->api_minor; 1161 hw->api_patch = resp->api_patch; 1162 } 1163 1164 return status; 1165 } 1166 1167 /** 1168 * ice_aq_q_shutdown 1169 * @hw: pointer to the hw struct 1170 * @unloading: is the driver unloading itself 1171 * 1172 * Tell the Firmware that we're shutting down the AdminQ and whether 1173 * or not the driver is unloading as well (0x0003). 1174 */ 1175 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1176 { 1177 struct ice_aqc_q_shutdown *cmd; 1178 struct ice_aq_desc desc; 1179 1180 cmd = &desc.params.q_shutdown; 1181 1182 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1183 1184 if (unloading) 1185 cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING); 1186 1187 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1188 } 1189 1190 /** 1191 * ice_aq_req_res 1192 * @hw: pointer to the hw struct 1193 * @res: resource id 1194 * @access: access type 1195 * @sdp_number: resource number 1196 * @timeout: the maximum time in ms that the driver may hold the resource 1197 * @cd: pointer to command details structure or NULL 1198 * 1199 * Requests common resource using the admin queue commands (0x0008). 1200 * When attempting to acquire the Global Config Lock, the driver can 1201 * learn of three states: 1202 * 1) ICE_SUCCESS - acquired lock, and can perform download package 1203 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load 1204 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has 1205 * successfully downloaded the package; the driver does 1206 * not have to download the package and can continue 1207 * loading 1208 * 1209 * Note that if the caller is in an acquire lock, perform action, release lock 1210 * phase of operation, it is possible that the FW may detect a timeout and issue 1211 * a CORER. In this case, the driver will receive a CORER interrupt and will 1212 * have to determine its cause. The calling thread that is handling this flow 1213 * will likely get an error propagated back to it indicating the Download 1214 * Package, Update Package or the Release Resource AQ commands timed out. 1215 */ 1216 static enum ice_status 1217 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1218 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1219 struct ice_sq_cd *cd) 1220 { 1221 struct ice_aqc_req_res *cmd_resp; 1222 struct ice_aq_desc desc; 1223 enum ice_status status; 1224 1225 cmd_resp = &desc.params.res_owner; 1226 1227 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1228 1229 cmd_resp->res_id = cpu_to_le16(res); 1230 cmd_resp->access_type = cpu_to_le16(access); 1231 cmd_resp->res_number = cpu_to_le32(sdp_number); 1232 cmd_resp->timeout = cpu_to_le32(*timeout); 1233 *timeout = 0; 1234 1235 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1236 1237 /* The completion specifies the maximum time in ms that the driver 1238 * may hold the resource in the Timeout field. 1239 */ 1240 1241 /* Global config lock response utilizes an additional status field. 1242 * 1243 * If the Global config lock resource is held by some other driver, the 1244 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1245 * and the timeout field indicates the maximum time the current owner 1246 * of the resource has to free it. 1247 */ 1248 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1249 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1250 *timeout = le32_to_cpu(cmd_resp->timeout); 1251 return 0; 1252 } else if (le16_to_cpu(cmd_resp->status) == 1253 ICE_AQ_RES_GLBL_IN_PROG) { 1254 *timeout = le32_to_cpu(cmd_resp->timeout); 1255 return ICE_ERR_AQ_ERROR; 1256 } else if (le16_to_cpu(cmd_resp->status) == 1257 ICE_AQ_RES_GLBL_DONE) { 1258 return ICE_ERR_AQ_NO_WORK; 1259 } 1260 1261 /* invalid FW response, force a timeout immediately */ 1262 *timeout = 0; 1263 return ICE_ERR_AQ_ERROR; 1264 } 1265 1266 /* If the resource is held by some other driver, the command completes 1267 * with a busy return value and the timeout field indicates the maximum 1268 * time the current owner of the resource has to free it. 1269 */ 1270 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1271 *timeout = le32_to_cpu(cmd_resp->timeout); 1272 1273 return status; 1274 } 1275 1276 /** 1277 * ice_aq_release_res 1278 * @hw: pointer to the hw struct 1279 * @res: resource id 1280 * @sdp_number: resource number 1281 * @cd: pointer to command details structure or NULL 1282 * 1283 * release common resource using the admin queue commands (0x0009) 1284 */ 1285 static enum ice_status 1286 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1287 struct ice_sq_cd *cd) 1288 { 1289 struct ice_aqc_req_res *cmd; 1290 struct ice_aq_desc desc; 1291 1292 cmd = &desc.params.res_owner; 1293 1294 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1295 1296 cmd->res_id = cpu_to_le16(res); 1297 cmd->res_number = cpu_to_le32(sdp_number); 1298 1299 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1300 } 1301 1302 /** 1303 * ice_acquire_res 1304 * @hw: pointer to the HW structure 1305 * @res: resource id 1306 * @access: access type (read or write) 1307 * @timeout: timeout in milliseconds 1308 * 1309 * This function will attempt to acquire the ownership of a resource. 1310 */ 1311 enum ice_status 1312 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1313 enum ice_aq_res_access_type access, u32 timeout) 1314 { 1315 #define ICE_RES_POLLING_DELAY_MS 10 1316 u32 delay = ICE_RES_POLLING_DELAY_MS; 1317 u32 time_left = timeout; 1318 enum ice_status status; 1319 1320 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1321 1322 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has 1323 * previously acquired the resource and performed any necessary updates; 1324 * in this case the caller does not obtain the resource and has no 1325 * further work to do. 1326 */ 1327 if (status == ICE_ERR_AQ_NO_WORK) 1328 goto ice_acquire_res_exit; 1329 1330 if (status) 1331 ice_debug(hw, ICE_DBG_RES, 1332 "resource %d acquire type %d failed.\n", res, access); 1333 1334 /* If necessary, poll until the current lock owner timeouts */ 1335 timeout = time_left; 1336 while (status && timeout && time_left) { 1337 mdelay(delay); 1338 timeout = (timeout > delay) ? timeout - delay : 0; 1339 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1340 1341 if (status == ICE_ERR_AQ_NO_WORK) 1342 /* lock free, but no work to do */ 1343 break; 1344 1345 if (!status) 1346 /* lock acquired */ 1347 break; 1348 } 1349 if (status && status != ICE_ERR_AQ_NO_WORK) 1350 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1351 1352 ice_acquire_res_exit: 1353 if (status == ICE_ERR_AQ_NO_WORK) { 1354 if (access == ICE_RES_WRITE) 1355 ice_debug(hw, ICE_DBG_RES, 1356 "resource indicates no work to do.\n"); 1357 else 1358 ice_debug(hw, ICE_DBG_RES, 1359 "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); 1360 } 1361 return status; 1362 } 1363 1364 /** 1365 * ice_release_res 1366 * @hw: pointer to the HW structure 1367 * @res: resource id 1368 * 1369 * This function will release a resource using the proper Admin Command. 1370 */ 1371 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1372 { 1373 enum ice_status status; 1374 u32 total_delay = 0; 1375 1376 status = ice_aq_release_res(hw, res, 0, NULL); 1377 1378 /* there are some rare cases when trying to release the resource 1379 * results in an admin Q timeout, so handle them correctly 1380 */ 1381 while ((status == ICE_ERR_AQ_TIMEOUT) && 1382 (total_delay < hw->adminq.sq_cmd_timeout)) { 1383 mdelay(1); 1384 status = ice_aq_release_res(hw, res, 0, NULL); 1385 total_delay++; 1386 } 1387 } 1388 1389 /** 1390 * ice_get_guar_num_vsi - determine number of guar VSI for a PF 1391 * @hw: pointer to the hw structure 1392 * 1393 * Determine the number of valid functions by going through the bitmap returned 1394 * from parsing capabilities and use this to calculate the number of VSI per PF. 1395 */ 1396 static u32 ice_get_guar_num_vsi(struct ice_hw *hw) 1397 { 1398 u8 funcs; 1399 1400 #define ICE_CAPS_VALID_FUNCS_M 0xFF 1401 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 1402 ICE_CAPS_VALID_FUNCS_M); 1403 1404 if (!funcs) 1405 return 0; 1406 1407 return ICE_MAX_VSI / funcs; 1408 } 1409 1410 /** 1411 * ice_parse_caps - parse function/device capabilities 1412 * @hw: pointer to the hw struct 1413 * @buf: pointer to a buffer containing function/device capability records 1414 * @cap_count: number of capability records in the list 1415 * @opc: type of capabilities list to parse 1416 * 1417 * Helper function to parse function(0x000a)/device(0x000b) capabilities list. 1418 */ 1419 static void 1420 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, 1421 enum ice_adminq_opc opc) 1422 { 1423 struct ice_aqc_list_caps_elem *cap_resp; 1424 struct ice_hw_func_caps *func_p = NULL; 1425 struct ice_hw_dev_caps *dev_p = NULL; 1426 struct ice_hw_common_caps *caps; 1427 u32 i; 1428 1429 if (!buf) 1430 return; 1431 1432 cap_resp = (struct ice_aqc_list_caps_elem *)buf; 1433 1434 if (opc == ice_aqc_opc_list_dev_caps) { 1435 dev_p = &hw->dev_caps; 1436 caps = &dev_p->common_cap; 1437 } else if (opc == ice_aqc_opc_list_func_caps) { 1438 func_p = &hw->func_caps; 1439 caps = &func_p->common_cap; 1440 } else { 1441 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n"); 1442 return; 1443 } 1444 1445 for (i = 0; caps && i < cap_count; i++, cap_resp++) { 1446 u32 logical_id = le32_to_cpu(cap_resp->logical_id); 1447 u32 phys_id = le32_to_cpu(cap_resp->phys_id); 1448 u32 number = le32_to_cpu(cap_resp->number); 1449 u16 cap = le16_to_cpu(cap_resp->cap); 1450 1451 switch (cap) { 1452 case ICE_AQC_CAPS_VALID_FUNCTIONS: 1453 caps->valid_functions = number; 1454 ice_debug(hw, ICE_DBG_INIT, 1455 "HW caps: Valid Functions = %d\n", 1456 caps->valid_functions); 1457 break; 1458 case ICE_AQC_CAPS_SRIOV: 1459 caps->sr_iov_1_1 = (number == 1); 1460 ice_debug(hw, ICE_DBG_INIT, 1461 "HW caps: SR-IOV = %d\n", caps->sr_iov_1_1); 1462 break; 1463 case ICE_AQC_CAPS_VF: 1464 if (dev_p) { 1465 dev_p->num_vfs_exposed = number; 1466 ice_debug(hw, ICE_DBG_INIT, 1467 "HW caps: VFs exposed = %d\n", 1468 dev_p->num_vfs_exposed); 1469 } else if (func_p) { 1470 func_p->num_allocd_vfs = number; 1471 func_p->vf_base_id = logical_id; 1472 ice_debug(hw, ICE_DBG_INIT, 1473 "HW caps: VFs allocated = %d\n", 1474 func_p->num_allocd_vfs); 1475 ice_debug(hw, ICE_DBG_INIT, 1476 "HW caps: VF base_id = %d\n", 1477 func_p->vf_base_id); 1478 } 1479 break; 1480 case ICE_AQC_CAPS_VSI: 1481 if (dev_p) { 1482 dev_p->num_vsi_allocd_to_host = number; 1483 ice_debug(hw, ICE_DBG_INIT, 1484 "HW caps: Dev.VSI cnt = %d\n", 1485 dev_p->num_vsi_allocd_to_host); 1486 } else if (func_p) { 1487 func_p->guar_num_vsi = ice_get_guar_num_vsi(hw); 1488 ice_debug(hw, ICE_DBG_INIT, 1489 "HW caps: Func.VSI cnt = %d\n", 1490 number); 1491 } 1492 break; 1493 case ICE_AQC_CAPS_RSS: 1494 caps->rss_table_size = number; 1495 caps->rss_table_entry_width = logical_id; 1496 ice_debug(hw, ICE_DBG_INIT, 1497 "HW caps: RSS table size = %d\n", 1498 caps->rss_table_size); 1499 ice_debug(hw, ICE_DBG_INIT, 1500 "HW caps: RSS table width = %d\n", 1501 caps->rss_table_entry_width); 1502 break; 1503 case ICE_AQC_CAPS_RXQS: 1504 caps->num_rxq = number; 1505 caps->rxq_first_id = phys_id; 1506 ice_debug(hw, ICE_DBG_INIT, 1507 "HW caps: Num Rx Qs = %d\n", caps->num_rxq); 1508 ice_debug(hw, ICE_DBG_INIT, 1509 "HW caps: Rx first queue ID = %d\n", 1510 caps->rxq_first_id); 1511 break; 1512 case ICE_AQC_CAPS_TXQS: 1513 caps->num_txq = number; 1514 caps->txq_first_id = phys_id; 1515 ice_debug(hw, ICE_DBG_INIT, 1516 "HW caps: Num Tx Qs = %d\n", caps->num_txq); 1517 ice_debug(hw, ICE_DBG_INIT, 1518 "HW caps: Tx first queue ID = %d\n", 1519 caps->txq_first_id); 1520 break; 1521 case ICE_AQC_CAPS_MSIX: 1522 caps->num_msix_vectors = number; 1523 caps->msix_vector_first_id = phys_id; 1524 ice_debug(hw, ICE_DBG_INIT, 1525 "HW caps: MSIX vector count = %d\n", 1526 caps->num_msix_vectors); 1527 ice_debug(hw, ICE_DBG_INIT, 1528 "HW caps: MSIX first vector index = %d\n", 1529 caps->msix_vector_first_id); 1530 break; 1531 case ICE_AQC_CAPS_MAX_MTU: 1532 caps->max_mtu = number; 1533 if (dev_p) 1534 ice_debug(hw, ICE_DBG_INIT, 1535 "HW caps: Dev.MaxMTU = %d\n", 1536 caps->max_mtu); 1537 else if (func_p) 1538 ice_debug(hw, ICE_DBG_INIT, 1539 "HW caps: func.MaxMTU = %d\n", 1540 caps->max_mtu); 1541 break; 1542 default: 1543 ice_debug(hw, ICE_DBG_INIT, 1544 "HW caps: Unknown capability[%d]: 0x%x\n", i, 1545 cap); 1546 break; 1547 } 1548 } 1549 } 1550 1551 /** 1552 * ice_aq_discover_caps - query function/device capabilities 1553 * @hw: pointer to the hw struct 1554 * @buf: a virtual buffer to hold the capabilities 1555 * @buf_size: Size of the virtual buffer 1556 * @cap_count: cap count needed if AQ err==ENOMEM 1557 * @opc: capabilities type to discover - pass in the command opcode 1558 * @cd: pointer to command details structure or NULL 1559 * 1560 * Get the function(0x000a)/device(0x000b) capabilities description from 1561 * the firmware. 1562 */ 1563 static enum ice_status 1564 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 1565 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 1566 { 1567 struct ice_aqc_list_caps *cmd; 1568 struct ice_aq_desc desc; 1569 enum ice_status status; 1570 1571 cmd = &desc.params.get_cap; 1572 1573 if (opc != ice_aqc_opc_list_func_caps && 1574 opc != ice_aqc_opc_list_dev_caps) 1575 return ICE_ERR_PARAM; 1576 1577 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1578 1579 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 1580 if (!status) 1581 ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc); 1582 else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM) 1583 *cap_count = le32_to_cpu(cmd->count); 1584 return status; 1585 } 1586 1587 /** 1588 * ice_discover_caps - get info about the HW 1589 * @hw: pointer to the hardware structure 1590 * @opc: capabilities type to discover - pass in the command opcode 1591 */ 1592 static enum ice_status ice_discover_caps(struct ice_hw *hw, 1593 enum ice_adminq_opc opc) 1594 { 1595 enum ice_status status; 1596 u32 cap_count; 1597 u16 cbuf_len; 1598 u8 retries; 1599 1600 /* The driver doesn't know how many capabilities the device will return 1601 * so the buffer size required isn't known ahead of time. The driver 1602 * starts with cbuf_len and if this turns out to be insufficient, the 1603 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs. 1604 * The driver then allocates the buffer based on the count and retries 1605 * the operation. So it follows that the retry count is 2. 1606 */ 1607 #define ICE_GET_CAP_BUF_COUNT 40 1608 #define ICE_GET_CAP_RETRY_COUNT 2 1609 1610 cap_count = ICE_GET_CAP_BUF_COUNT; 1611 retries = ICE_GET_CAP_RETRY_COUNT; 1612 1613 do { 1614 void *cbuf; 1615 1616 cbuf_len = (u16)(cap_count * 1617 sizeof(struct ice_aqc_list_caps_elem)); 1618 cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL); 1619 if (!cbuf) 1620 return ICE_ERR_NO_MEMORY; 1621 1622 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count, 1623 opc, NULL); 1624 devm_kfree(ice_hw_to_dev(hw), cbuf); 1625 1626 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM) 1627 break; 1628 1629 /* If ENOMEM is returned, try again with bigger buffer */ 1630 } while (--retries); 1631 1632 return status; 1633 } 1634 1635 /** 1636 * ice_get_caps - get info about the HW 1637 * @hw: pointer to the hardware structure 1638 */ 1639 enum ice_status ice_get_caps(struct ice_hw *hw) 1640 { 1641 enum ice_status status; 1642 1643 status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps); 1644 if (!status) 1645 status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps); 1646 1647 return status; 1648 } 1649 1650 /** 1651 * ice_aq_manage_mac_write - manage MAC address write command 1652 * @hw: pointer to the hw struct 1653 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 1654 * @flags: flags to control write behavior 1655 * @cd: pointer to command details structure or NULL 1656 * 1657 * This function is used to write MAC address to the NVM (0x0108). 1658 */ 1659 enum ice_status 1660 ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags, 1661 struct ice_sq_cd *cd) 1662 { 1663 struct ice_aqc_manage_mac_write *cmd; 1664 struct ice_aq_desc desc; 1665 1666 cmd = &desc.params.mac_write; 1667 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 1668 1669 cmd->flags = flags; 1670 1671 /* Prep values for flags, sah, sal */ 1672 cmd->sah = htons(*((u16 *)mac_addr)); 1673 cmd->sal = htonl(*((u32 *)(mac_addr + 2))); 1674 1675 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1676 } 1677 1678 /** 1679 * ice_aq_clear_pxe_mode 1680 * @hw: pointer to the hw struct 1681 * 1682 * Tell the firmware that the driver is taking over from PXE (0x0110). 1683 */ 1684 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) 1685 { 1686 struct ice_aq_desc desc; 1687 1688 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 1689 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 1690 1691 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1692 } 1693 1694 /** 1695 * ice_clear_pxe_mode - clear pxe operations mode 1696 * @hw: pointer to the hw struct 1697 * 1698 * Make sure all PXE mode settings are cleared, including things 1699 * like descriptor fetch/write-back mode. 1700 */ 1701 void ice_clear_pxe_mode(struct ice_hw *hw) 1702 { 1703 if (ice_check_sq_alive(hw, &hw->adminq)) 1704 ice_aq_clear_pxe_mode(hw); 1705 } 1706 1707 /** 1708 * ice_get_link_speed_based_on_phy_type - returns link speed 1709 * @phy_type_low: lower part of phy_type 1710 * 1711 * This helper function will convert a phy_type_low to its corresponding link 1712 * speed. 1713 * Note: In the structure of phy_type_low, there should be one bit set, as 1714 * this function will convert one phy type to its speed. 1715 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned 1716 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned 1717 */ 1718 static u16 1719 ice_get_link_speed_based_on_phy_type(u64 phy_type_low) 1720 { 1721 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 1722 1723 switch (phy_type_low) { 1724 case ICE_PHY_TYPE_LOW_100BASE_TX: 1725 case ICE_PHY_TYPE_LOW_100M_SGMII: 1726 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 1727 break; 1728 case ICE_PHY_TYPE_LOW_1000BASE_T: 1729 case ICE_PHY_TYPE_LOW_1000BASE_SX: 1730 case ICE_PHY_TYPE_LOW_1000BASE_LX: 1731 case ICE_PHY_TYPE_LOW_1000BASE_KX: 1732 case ICE_PHY_TYPE_LOW_1G_SGMII: 1733 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 1734 break; 1735 case ICE_PHY_TYPE_LOW_2500BASE_T: 1736 case ICE_PHY_TYPE_LOW_2500BASE_X: 1737 case ICE_PHY_TYPE_LOW_2500BASE_KX: 1738 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 1739 break; 1740 case ICE_PHY_TYPE_LOW_5GBASE_T: 1741 case ICE_PHY_TYPE_LOW_5GBASE_KR: 1742 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 1743 break; 1744 case ICE_PHY_TYPE_LOW_10GBASE_T: 1745 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 1746 case ICE_PHY_TYPE_LOW_10GBASE_SR: 1747 case ICE_PHY_TYPE_LOW_10GBASE_LR: 1748 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 1749 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 1750 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 1751 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 1752 break; 1753 case ICE_PHY_TYPE_LOW_25GBASE_T: 1754 case ICE_PHY_TYPE_LOW_25GBASE_CR: 1755 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 1756 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 1757 case ICE_PHY_TYPE_LOW_25GBASE_SR: 1758 case ICE_PHY_TYPE_LOW_25GBASE_LR: 1759 case ICE_PHY_TYPE_LOW_25GBASE_KR: 1760 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 1761 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 1762 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 1763 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 1764 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 1765 break; 1766 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 1767 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 1768 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 1769 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 1770 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 1771 case ICE_PHY_TYPE_LOW_40G_XLAUI: 1772 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 1773 break; 1774 default: 1775 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 1776 break; 1777 } 1778 1779 return speed_phy_type_low; 1780 } 1781 1782 /** 1783 * ice_update_phy_type 1784 * @phy_type_low: pointer to the lower part of phy_type 1785 * @link_speeds_bitmap: targeted link speeds bitmap 1786 * 1787 * Note: For the link_speeds_bitmap structure, you can check it at 1788 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 1789 * link_speeds_bitmap include multiple speeds. 1790 * 1791 * The value of phy_type_low will present a certain link speed. This helper 1792 * function will turn on bits in the phy_type_low based on the value of 1793 * link_speeds_bitmap input parameter. 1794 */ 1795 void ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap) 1796 { 1797 u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN; 1798 u64 pt_low; 1799 int index; 1800 1801 /* We first check with low part of phy_type */ 1802 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 1803 pt_low = BIT_ULL(index); 1804 speed = ice_get_link_speed_based_on_phy_type(pt_low); 1805 1806 if (link_speeds_bitmap & speed) 1807 *phy_type_low |= BIT_ULL(index); 1808 } 1809 } 1810 1811 /** 1812 * ice_aq_set_phy_cfg 1813 * @hw: pointer to the hw struct 1814 * @lport: logical port number 1815 * @cfg: structure with PHY configuration data to be set 1816 * @cd: pointer to command details structure or NULL 1817 * 1818 * Set the various PHY configuration parameters supported on the Port. 1819 * One or more of the Set PHY config parameters may be ignored in an MFP 1820 * mode as the PF may not have the privilege to set some of the PHY Config 1821 * parameters. This status will be indicated by the command response (0x0601). 1822 */ 1823 enum ice_status 1824 ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, 1825 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 1826 { 1827 struct ice_aq_desc desc; 1828 1829 if (!cfg) 1830 return ICE_ERR_PARAM; 1831 1832 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 1833 desc.params.set_phy.lport_num = lport; 1834 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1835 1836 return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 1837 } 1838 1839 /** 1840 * ice_update_link_info - update status of the HW network link 1841 * @pi: port info structure of the interested logical port 1842 */ 1843 enum ice_status ice_update_link_info(struct ice_port_info *pi) 1844 { 1845 struct ice_aqc_get_phy_caps_data *pcaps; 1846 struct ice_phy_info *phy_info; 1847 enum ice_status status; 1848 struct ice_hw *hw; 1849 1850 if (!pi) 1851 return ICE_ERR_PARAM; 1852 1853 hw = pi->hw; 1854 1855 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 1856 if (!pcaps) 1857 return ICE_ERR_NO_MEMORY; 1858 1859 phy_info = &pi->phy; 1860 status = ice_aq_get_link_info(pi, true, NULL, NULL); 1861 if (status) 1862 goto out; 1863 1864 if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 1865 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, 1866 pcaps, NULL); 1867 if (status) 1868 goto out; 1869 1870 memcpy(phy_info->link_info.module_type, &pcaps->module_type, 1871 sizeof(phy_info->link_info.module_type)); 1872 } 1873 out: 1874 devm_kfree(ice_hw_to_dev(hw), pcaps); 1875 return status; 1876 } 1877 1878 /** 1879 * ice_set_fc 1880 * @pi: port information structure 1881 * @aq_failures: pointer to status code, specific to ice_set_fc routine 1882 * @ena_auto_link_update: enable automatic link update 1883 * 1884 * Set the requested flow control mode. 1885 */ 1886 enum ice_status 1887 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 1888 { 1889 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 1890 struct ice_aqc_get_phy_caps_data *pcaps; 1891 enum ice_status status; 1892 u8 pause_mask = 0x0; 1893 struct ice_hw *hw; 1894 1895 if (!pi) 1896 return ICE_ERR_PARAM; 1897 hw = pi->hw; 1898 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE; 1899 1900 switch (pi->fc.req_mode) { 1901 case ICE_FC_FULL: 1902 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 1903 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 1904 break; 1905 case ICE_FC_RX_PAUSE: 1906 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 1907 break; 1908 case ICE_FC_TX_PAUSE: 1909 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 1910 break; 1911 default: 1912 break; 1913 } 1914 1915 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 1916 if (!pcaps) 1917 return ICE_ERR_NO_MEMORY; 1918 1919 /* Get the current phy config */ 1920 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, 1921 NULL); 1922 if (status) { 1923 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 1924 goto out; 1925 } 1926 1927 /* clear the old pause settings */ 1928 cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 1929 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 1930 /* set the new capabilities */ 1931 cfg.caps |= pause_mask; 1932 /* If the capabilities have changed, then set the new config */ 1933 if (cfg.caps != pcaps->caps) { 1934 int retry_count, retry_max = 10; 1935 1936 /* Auto restart link so settings take effect */ 1937 if (ena_auto_link_update) 1938 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 1939 /* Copy over all the old settings */ 1940 cfg.phy_type_low = pcaps->phy_type_low; 1941 cfg.low_power_ctrl = pcaps->low_power_ctrl; 1942 cfg.eee_cap = pcaps->eee_cap; 1943 cfg.eeer_value = pcaps->eeer_value; 1944 cfg.link_fec_opt = pcaps->link_fec_options; 1945 1946 status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL); 1947 if (status) { 1948 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 1949 goto out; 1950 } 1951 1952 /* Update the link info 1953 * It sometimes takes a really long time for link to 1954 * come back from the atomic reset. Thus, we wait a 1955 * little bit. 1956 */ 1957 for (retry_count = 0; retry_count < retry_max; retry_count++) { 1958 status = ice_update_link_info(pi); 1959 1960 if (!status) 1961 break; 1962 1963 mdelay(100); 1964 } 1965 1966 if (status) 1967 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 1968 } 1969 1970 out: 1971 devm_kfree(ice_hw_to_dev(hw), pcaps); 1972 return status; 1973 } 1974 1975 /** 1976 * ice_get_link_status - get status of the HW network link 1977 * @pi: port information structure 1978 * @link_up: pointer to bool (true/false = linkup/linkdown) 1979 * 1980 * Variable link_up is true if link is up, false if link is down. 1981 * The variable link_up is invalid if status is non zero. As a 1982 * result of this call, link status reporting becomes enabled 1983 */ 1984 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) 1985 { 1986 struct ice_phy_info *phy_info; 1987 enum ice_status status = 0; 1988 1989 if (!pi || !link_up) 1990 return ICE_ERR_PARAM; 1991 1992 phy_info = &pi->phy; 1993 1994 if (phy_info->get_link_info) { 1995 status = ice_update_link_info(pi); 1996 1997 if (status) 1998 ice_debug(pi->hw, ICE_DBG_LINK, 1999 "get link status error, status = %d\n", 2000 status); 2001 } 2002 2003 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 2004 2005 return status; 2006 } 2007 2008 /** 2009 * ice_aq_set_link_restart_an 2010 * @pi: pointer to the port information structure 2011 * @ena_link: if true: enable link, if false: disable link 2012 * @cd: pointer to command details structure or NULL 2013 * 2014 * Sets up the link and restarts the Auto-Negotiation over the link. 2015 */ 2016 enum ice_status 2017 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 2018 struct ice_sq_cd *cd) 2019 { 2020 struct ice_aqc_restart_an *cmd; 2021 struct ice_aq_desc desc; 2022 2023 cmd = &desc.params.restart_an; 2024 2025 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 2026 2027 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 2028 cmd->lport_num = pi->lport; 2029 if (ena_link) 2030 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 2031 else 2032 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 2033 2034 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 2035 } 2036 2037 /** 2038 * __ice_aq_get_set_rss_lut 2039 * @hw: pointer to the hardware structure 2040 * @vsi_id: VSI FW index 2041 * @lut_type: LUT table type 2042 * @lut: pointer to the LUT buffer provided by the caller 2043 * @lut_size: size of the LUT buffer 2044 * @glob_lut_idx: global LUT index 2045 * @set: set true to set the table, false to get the table 2046 * 2047 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 2048 */ 2049 static enum ice_status 2050 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, 2051 u16 lut_size, u8 glob_lut_idx, bool set) 2052 { 2053 struct ice_aqc_get_set_rss_lut *cmd_resp; 2054 struct ice_aq_desc desc; 2055 enum ice_status status; 2056 u16 flags = 0; 2057 2058 cmd_resp = &desc.params.get_set_rss_lut; 2059 2060 if (set) { 2061 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut); 2062 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2063 } else { 2064 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut); 2065 } 2066 2067 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 2068 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) & 2069 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) | 2070 ICE_AQC_GSET_RSS_LUT_VSI_VALID); 2071 2072 switch (lut_type) { 2073 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI: 2074 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF: 2075 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL: 2076 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) & 2077 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); 2078 break; 2079 default: 2080 status = ICE_ERR_PARAM; 2081 goto ice_aq_get_set_rss_lut_exit; 2082 } 2083 2084 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) { 2085 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) & 2086 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M); 2087 2088 if (!set) 2089 goto ice_aq_get_set_rss_lut_send; 2090 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 2091 if (!set) 2092 goto ice_aq_get_set_rss_lut_send; 2093 } else { 2094 goto ice_aq_get_set_rss_lut_send; 2095 } 2096 2097 /* LUT size is only valid for Global and PF table types */ 2098 switch (lut_size) { 2099 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128: 2100 break; 2101 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512: 2102 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << 2103 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 2104 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 2105 break; 2106 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K: 2107 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 2108 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << 2109 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 2110 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 2111 break; 2112 } 2113 /* fall-through */ 2114 default: 2115 status = ICE_ERR_PARAM; 2116 goto ice_aq_get_set_rss_lut_exit; 2117 } 2118 2119 ice_aq_get_set_rss_lut_send: 2120 cmd_resp->flags = cpu_to_le16(flags); 2121 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 2122 2123 ice_aq_get_set_rss_lut_exit: 2124 return status; 2125 } 2126 2127 /** 2128 * ice_aq_get_rss_lut 2129 * @hw: pointer to the hardware structure 2130 * @vsi_handle: software VSI handle 2131 * @lut_type: LUT table type 2132 * @lut: pointer to the LUT buffer provided by the caller 2133 * @lut_size: size of the LUT buffer 2134 * 2135 * get the RSS lookup table, PF or VSI type 2136 */ 2137 enum ice_status 2138 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, 2139 u8 *lut, u16 lut_size) 2140 { 2141 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) 2142 return ICE_ERR_PARAM; 2143 2144 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle), 2145 lut_type, lut, lut_size, 0, false); 2146 } 2147 2148 /** 2149 * ice_aq_set_rss_lut 2150 * @hw: pointer to the hardware structure 2151 * @vsi_handle: software VSI handle 2152 * @lut_type: LUT table type 2153 * @lut: pointer to the LUT buffer provided by the caller 2154 * @lut_size: size of the LUT buffer 2155 * 2156 * set the RSS lookup table, PF or VSI type 2157 */ 2158 enum ice_status 2159 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, 2160 u8 *lut, u16 lut_size) 2161 { 2162 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) 2163 return ICE_ERR_PARAM; 2164 2165 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle), 2166 lut_type, lut, lut_size, 0, true); 2167 } 2168 2169 /** 2170 * __ice_aq_get_set_rss_key 2171 * @hw: pointer to the hw struct 2172 * @vsi_id: VSI FW index 2173 * @key: pointer to key info struct 2174 * @set: set true to set the key, false to get the key 2175 * 2176 * get (0x0B04) or set (0x0B02) the RSS key per VSI 2177 */ 2178 static enum 2179 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 2180 struct ice_aqc_get_set_rss_keys *key, 2181 bool set) 2182 { 2183 struct ice_aqc_get_set_rss_key *cmd_resp; 2184 u16 key_size = sizeof(*key); 2185 struct ice_aq_desc desc; 2186 2187 cmd_resp = &desc.params.get_set_rss_key; 2188 2189 if (set) { 2190 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 2191 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2192 } else { 2193 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 2194 } 2195 2196 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 2197 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) & 2198 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) | 2199 ICE_AQC_GSET_RSS_KEY_VSI_VALID); 2200 2201 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 2202 } 2203 2204 /** 2205 * ice_aq_get_rss_key 2206 * @hw: pointer to the hw struct 2207 * @vsi_handle: software VSI handle 2208 * @key: pointer to key info struct 2209 * 2210 * get the RSS key per VSI 2211 */ 2212 enum ice_status 2213 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 2214 struct ice_aqc_get_set_rss_keys *key) 2215 { 2216 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 2217 return ICE_ERR_PARAM; 2218 2219 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 2220 key, false); 2221 } 2222 2223 /** 2224 * ice_aq_set_rss_key 2225 * @hw: pointer to the hw struct 2226 * @vsi_handle: software VSI handle 2227 * @keys: pointer to key info struct 2228 * 2229 * set the RSS key per VSI 2230 */ 2231 enum ice_status 2232 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 2233 struct ice_aqc_get_set_rss_keys *keys) 2234 { 2235 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 2236 return ICE_ERR_PARAM; 2237 2238 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 2239 keys, true); 2240 } 2241 2242 /** 2243 * ice_aq_add_lan_txq 2244 * @hw: pointer to the hardware structure 2245 * @num_qgrps: Number of added queue groups 2246 * @qg_list: list of queue groups to be added 2247 * @buf_size: size of buffer for indirect command 2248 * @cd: pointer to command details structure or NULL 2249 * 2250 * Add Tx LAN queue (0x0C30) 2251 * 2252 * NOTE: 2253 * Prior to calling add Tx LAN queue: 2254 * Initialize the following as part of the Tx queue context: 2255 * Completion queue ID if the queue uses Completion queue, Quanta profile, 2256 * Cache profile and Packet shaper profile. 2257 * 2258 * After add Tx LAN queue AQ command is completed: 2259 * Interrupts should be associated with specific queues, 2260 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 2261 * flow. 2262 */ 2263 static enum ice_status 2264 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 2265 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 2266 struct ice_sq_cd *cd) 2267 { 2268 u16 i, sum_header_size, sum_q_size = 0; 2269 struct ice_aqc_add_tx_qgrp *list; 2270 struct ice_aqc_add_txqs *cmd; 2271 struct ice_aq_desc desc; 2272 2273 cmd = &desc.params.add_txqs; 2274 2275 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 2276 2277 if (!qg_list) 2278 return ICE_ERR_PARAM; 2279 2280 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 2281 return ICE_ERR_PARAM; 2282 2283 sum_header_size = num_qgrps * 2284 (sizeof(*qg_list) - sizeof(*qg_list->txqs)); 2285 2286 list = qg_list; 2287 for (i = 0; i < num_qgrps; i++) { 2288 struct ice_aqc_add_txqs_perq *q = list->txqs; 2289 2290 sum_q_size += list->num_txqs * sizeof(*q); 2291 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs); 2292 } 2293 2294 if (buf_size != (sum_header_size + sum_q_size)) 2295 return ICE_ERR_PARAM; 2296 2297 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2298 2299 cmd->num_qgrps = num_qgrps; 2300 2301 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 2302 } 2303 2304 /** 2305 * ice_aq_dis_lan_txq 2306 * @hw: pointer to the hardware structure 2307 * @num_qgrps: number of groups in the list 2308 * @qg_list: the list of groups to disable 2309 * @buf_size: the total size of the qg_list buffer in bytes 2310 * @rst_src: if called due to reset, specifies the RST source 2311 * @vmvf_num: the relative VM or VF number that is undergoing the reset 2312 * @cd: pointer to command details structure or NULL 2313 * 2314 * Disable LAN Tx queue (0x0C31) 2315 */ 2316 static enum ice_status 2317 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 2318 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 2319 enum ice_disq_rst_src rst_src, u16 vmvf_num, 2320 struct ice_sq_cd *cd) 2321 { 2322 struct ice_aqc_dis_txqs *cmd; 2323 struct ice_aq_desc desc; 2324 u16 i, sz = 0; 2325 2326 cmd = &desc.params.dis_txqs; 2327 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 2328 2329 /* qg_list can be NULL only in VM/VF reset flow */ 2330 if (!qg_list && !rst_src) 2331 return ICE_ERR_PARAM; 2332 2333 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 2334 return ICE_ERR_PARAM; 2335 2336 cmd->num_entries = num_qgrps; 2337 2338 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & 2339 ICE_AQC_Q_DIS_TIMEOUT_M); 2340 2341 switch (rst_src) { 2342 case ICE_VM_RESET: 2343 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 2344 cmd->vmvf_and_timeout |= 2345 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); 2346 break; 2347 case ICE_VF_RESET: 2348 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 2349 /* In this case, FW expects vmvf_num to be absolute VF id */ 2350 cmd->vmvf_and_timeout |= 2351 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) & 2352 ICE_AQC_Q_DIS_VMVF_NUM_M); 2353 break; 2354 case ICE_NO_RESET: 2355 default: 2356 break; 2357 } 2358 2359 /* If no queue group info, we are in a reset flow. Issue the AQ */ 2360 if (!qg_list) 2361 goto do_aq; 2362 2363 /* set RD bit to indicate that command buffer is provided by the driver 2364 * and it needs to be read by the firmware 2365 */ 2366 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2367 2368 for (i = 0; i < num_qgrps; ++i) { 2369 /* Calculate the size taken up by the queue IDs in this group */ 2370 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id); 2371 2372 /* Add the size of the group header */ 2373 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id); 2374 2375 /* If the num of queues is even, add 2 bytes of padding */ 2376 if ((qg_list[i].num_qs % 2) == 0) 2377 sz += 2; 2378 } 2379 2380 if (buf_size != sz) 2381 return ICE_ERR_PARAM; 2382 2383 do_aq: 2384 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 2385 } 2386 2387 /* End of FW Admin Queue command wrappers */ 2388 2389 /** 2390 * ice_write_byte - write a byte to a packed context structure 2391 * @src_ctx: the context structure to read from 2392 * @dest_ctx: the context to be written to 2393 * @ce_info: a description of the struct to be filled 2394 */ 2395 static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx, 2396 const struct ice_ctx_ele *ce_info) 2397 { 2398 u8 src_byte, dest_byte, mask; 2399 u8 *from, *dest; 2400 u16 shift_width; 2401 2402 /* copy from the next struct field */ 2403 from = src_ctx + ce_info->offset; 2404 2405 /* prepare the bits and mask */ 2406 shift_width = ce_info->lsb % 8; 2407 mask = (u8)(BIT(ce_info->width) - 1); 2408 2409 src_byte = *from; 2410 src_byte &= mask; 2411 2412 /* shift to correct alignment */ 2413 mask <<= shift_width; 2414 src_byte <<= shift_width; 2415 2416 /* get the current bits from the target bit string */ 2417 dest = dest_ctx + (ce_info->lsb / 8); 2418 2419 memcpy(&dest_byte, dest, sizeof(dest_byte)); 2420 2421 dest_byte &= ~mask; /* get the bits not changing */ 2422 dest_byte |= src_byte; /* add in the new bits */ 2423 2424 /* put it all back */ 2425 memcpy(dest, &dest_byte, sizeof(dest_byte)); 2426 } 2427 2428 /** 2429 * ice_write_word - write a word to a packed context structure 2430 * @src_ctx: the context structure to read from 2431 * @dest_ctx: the context to be written to 2432 * @ce_info: a description of the struct to be filled 2433 */ 2434 static void ice_write_word(u8 *src_ctx, u8 *dest_ctx, 2435 const struct ice_ctx_ele *ce_info) 2436 { 2437 u16 src_word, mask; 2438 __le16 dest_word; 2439 u8 *from, *dest; 2440 u16 shift_width; 2441 2442 /* copy from the next struct field */ 2443 from = src_ctx + ce_info->offset; 2444 2445 /* prepare the bits and mask */ 2446 shift_width = ce_info->lsb % 8; 2447 mask = BIT(ce_info->width) - 1; 2448 2449 /* don't swizzle the bits until after the mask because the mask bits 2450 * will be in a different bit position on big endian machines 2451 */ 2452 src_word = *(u16 *)from; 2453 src_word &= mask; 2454 2455 /* shift to correct alignment */ 2456 mask <<= shift_width; 2457 src_word <<= shift_width; 2458 2459 /* get the current bits from the target bit string */ 2460 dest = dest_ctx + (ce_info->lsb / 8); 2461 2462 memcpy(&dest_word, dest, sizeof(dest_word)); 2463 2464 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 2465 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 2466 2467 /* put it all back */ 2468 memcpy(dest, &dest_word, sizeof(dest_word)); 2469 } 2470 2471 /** 2472 * ice_write_dword - write a dword to a packed context structure 2473 * @src_ctx: the context structure to read from 2474 * @dest_ctx: the context to be written to 2475 * @ce_info: a description of the struct to be filled 2476 */ 2477 static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx, 2478 const struct ice_ctx_ele *ce_info) 2479 { 2480 u32 src_dword, mask; 2481 __le32 dest_dword; 2482 u8 *from, *dest; 2483 u16 shift_width; 2484 2485 /* copy from the next struct field */ 2486 from = src_ctx + ce_info->offset; 2487 2488 /* prepare the bits and mask */ 2489 shift_width = ce_info->lsb % 8; 2490 2491 /* if the field width is exactly 32 on an x86 machine, then the shift 2492 * operation will not work because the SHL instructions count is masked 2493 * to 5 bits so the shift will do nothing 2494 */ 2495 if (ce_info->width < 32) 2496 mask = BIT(ce_info->width) - 1; 2497 else 2498 mask = (u32)~0; 2499 2500 /* don't swizzle the bits until after the mask because the mask bits 2501 * will be in a different bit position on big endian machines 2502 */ 2503 src_dword = *(u32 *)from; 2504 src_dword &= mask; 2505 2506 /* shift to correct alignment */ 2507 mask <<= shift_width; 2508 src_dword <<= shift_width; 2509 2510 /* get the current bits from the target bit string */ 2511 dest = dest_ctx + (ce_info->lsb / 8); 2512 2513 memcpy(&dest_dword, dest, sizeof(dest_dword)); 2514 2515 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 2516 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 2517 2518 /* put it all back */ 2519 memcpy(dest, &dest_dword, sizeof(dest_dword)); 2520 } 2521 2522 /** 2523 * ice_write_qword - write a qword to a packed context structure 2524 * @src_ctx: the context structure to read from 2525 * @dest_ctx: the context to be written to 2526 * @ce_info: a description of the struct to be filled 2527 */ 2528 static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx, 2529 const struct ice_ctx_ele *ce_info) 2530 { 2531 u64 src_qword, mask; 2532 __le64 dest_qword; 2533 u8 *from, *dest; 2534 u16 shift_width; 2535 2536 /* copy from the next struct field */ 2537 from = src_ctx + ce_info->offset; 2538 2539 /* prepare the bits and mask */ 2540 shift_width = ce_info->lsb % 8; 2541 2542 /* if the field width is exactly 64 on an x86 machine, then the shift 2543 * operation will not work because the SHL instructions count is masked 2544 * to 6 bits so the shift will do nothing 2545 */ 2546 if (ce_info->width < 64) 2547 mask = BIT_ULL(ce_info->width) - 1; 2548 else 2549 mask = (u64)~0; 2550 2551 /* don't swizzle the bits until after the mask because the mask bits 2552 * will be in a different bit position on big endian machines 2553 */ 2554 src_qword = *(u64 *)from; 2555 src_qword &= mask; 2556 2557 /* shift to correct alignment */ 2558 mask <<= shift_width; 2559 src_qword <<= shift_width; 2560 2561 /* get the current bits from the target bit string */ 2562 dest = dest_ctx + (ce_info->lsb / 8); 2563 2564 memcpy(&dest_qword, dest, sizeof(dest_qword)); 2565 2566 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 2567 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 2568 2569 /* put it all back */ 2570 memcpy(dest, &dest_qword, sizeof(dest_qword)); 2571 } 2572 2573 /** 2574 * ice_set_ctx - set context bits in packed structure 2575 * @src_ctx: pointer to a generic non-packed context structure 2576 * @dest_ctx: pointer to memory for the packed structure 2577 * @ce_info: a description of the structure to be transformed 2578 */ 2579 enum ice_status 2580 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 2581 { 2582 int f; 2583 2584 for (f = 0; ce_info[f].width; f++) { 2585 /* We have to deal with each element of the FW response 2586 * using the correct size so that we are correct regardless 2587 * of the endianness of the machine. 2588 */ 2589 switch (ce_info[f].size_of) { 2590 case sizeof(u8): 2591 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); 2592 break; 2593 case sizeof(u16): 2594 ice_write_word(src_ctx, dest_ctx, &ce_info[f]); 2595 break; 2596 case sizeof(u32): 2597 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); 2598 break; 2599 case sizeof(u64): 2600 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); 2601 break; 2602 default: 2603 return ICE_ERR_INVAL_SIZE; 2604 } 2605 } 2606 2607 return 0; 2608 } 2609 2610 /** 2611 * ice_ena_vsi_txq 2612 * @pi: port information structure 2613 * @vsi_handle: software VSI handle 2614 * @tc: tc number 2615 * @num_qgrps: Number of added queue groups 2616 * @buf: list of queue groups to be added 2617 * @buf_size: size of buffer for indirect command 2618 * @cd: pointer to command details structure or NULL 2619 * 2620 * This function adds one lan q 2621 */ 2622 enum ice_status 2623 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, 2624 struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 2625 struct ice_sq_cd *cd) 2626 { 2627 struct ice_aqc_txsched_elem_data node = { 0 }; 2628 struct ice_sched_node *parent; 2629 enum ice_status status; 2630 struct ice_hw *hw; 2631 2632 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 2633 return ICE_ERR_CFG; 2634 2635 if (num_qgrps > 1 || buf->num_txqs > 1) 2636 return ICE_ERR_MAX_LIMIT; 2637 2638 hw = pi->hw; 2639 2640 if (!ice_is_vsi_valid(hw, vsi_handle)) 2641 return ICE_ERR_PARAM; 2642 2643 mutex_lock(&pi->sched_lock); 2644 2645 /* find a parent node */ 2646 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 2647 ICE_SCHED_NODE_OWNER_LAN); 2648 if (!parent) { 2649 status = ICE_ERR_PARAM; 2650 goto ena_txq_exit; 2651 } 2652 2653 buf->parent_teid = parent->info.node_teid; 2654 node.parent_teid = parent->info.node_teid; 2655 /* Mark that the values in the "generic" section as valid. The default 2656 * value in the "generic" section is zero. This means that : 2657 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 2658 * - 0 priority among siblings, indicated by Bit 1-3. 2659 * - WFQ, indicated by Bit 4. 2660 * - 0 Adjustment value is used in PSM credit update flow, indicated by 2661 * Bit 5-6. 2662 * - Bit 7 is reserved. 2663 * Without setting the generic section as valid in valid_sections, the 2664 * Admin Q command will fail with error code ICE_AQ_RC_EINVAL. 2665 */ 2666 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC; 2667 2668 /* add the lan q */ 2669 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 2670 if (status) 2671 goto ena_txq_exit; 2672 2673 node.node_teid = buf->txqs[0].q_teid; 2674 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 2675 2676 /* add a leaf node into schduler tree q layer */ 2677 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); 2678 2679 ena_txq_exit: 2680 mutex_unlock(&pi->sched_lock); 2681 return status; 2682 } 2683 2684 /** 2685 * ice_dis_vsi_txq 2686 * @pi: port information structure 2687 * @num_queues: number of queues 2688 * @q_ids: pointer to the q_id array 2689 * @q_teids: pointer to queue node teids 2690 * @rst_src: if called due to reset, specifies the RST source 2691 * @vmvf_num: the relative VM or VF number that is undergoing the reset 2692 * @cd: pointer to command details structure or NULL 2693 * 2694 * This function removes queues and their corresponding nodes in SW DB 2695 */ 2696 enum ice_status 2697 ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, 2698 u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, 2699 struct ice_sq_cd *cd) 2700 { 2701 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 2702 struct ice_aqc_dis_txq_item qg_list; 2703 u16 i; 2704 2705 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 2706 return ICE_ERR_CFG; 2707 2708 /* if queue is disabled already yet the disable queue command has to be 2709 * sent to complete the VF reset, then call ice_aq_dis_lan_txq without 2710 * any queue information 2711 */ 2712 2713 if (!num_queues && rst_src) 2714 return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num, 2715 NULL); 2716 2717 mutex_lock(&pi->sched_lock); 2718 2719 for (i = 0; i < num_queues; i++) { 2720 struct ice_sched_node *node; 2721 2722 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 2723 if (!node) 2724 continue; 2725 qg_list.parent_teid = node->info.parent_teid; 2726 qg_list.num_qs = 1; 2727 qg_list.q_id[0] = cpu_to_le16(q_ids[i]); 2728 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list, 2729 sizeof(qg_list), rst_src, vmvf_num, 2730 cd); 2731 2732 if (status) 2733 break; 2734 ice_free_sched_node(pi, node); 2735 } 2736 mutex_unlock(&pi->sched_lock); 2737 return status; 2738 } 2739 2740 /** 2741 * ice_cfg_vsi_qs - configure the new/exisiting VSI queues 2742 * @pi: port information structure 2743 * @vsi_handle: software VSI handle 2744 * @tc_bitmap: TC bitmap 2745 * @maxqs: max queues array per TC 2746 * @owner: lan or rdma 2747 * 2748 * This function adds/updates the VSI queues per TC. 2749 */ 2750 static enum ice_status 2751 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 2752 u16 *maxqs, u8 owner) 2753 { 2754 enum ice_status status = 0; 2755 u8 i; 2756 2757 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 2758 return ICE_ERR_CFG; 2759 2760 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 2761 return ICE_ERR_PARAM; 2762 2763 mutex_lock(&pi->sched_lock); 2764 2765 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { 2766 /* configuration is possible only if TC node is present */ 2767 if (!ice_sched_get_tc_node(pi, i)) 2768 continue; 2769 2770 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 2771 ice_is_tc_ena(tc_bitmap, i)); 2772 if (status) 2773 break; 2774 } 2775 2776 mutex_unlock(&pi->sched_lock); 2777 return status; 2778 } 2779 2780 /** 2781 * ice_cfg_vsi_lan - configure VSI lan queues 2782 * @pi: port information structure 2783 * @vsi_handle: software VSI handle 2784 * @tc_bitmap: TC bitmap 2785 * @max_lanqs: max lan queues array per TC 2786 * 2787 * This function adds/updates the VSI lan queues per TC. 2788 */ 2789 enum ice_status 2790 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 2791 u16 *max_lanqs) 2792 { 2793 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 2794 ICE_SCHED_NODE_OWNER_LAN); 2795 } 2796 2797 /** 2798 * ice_replay_pre_init - replay pre initialization 2799 * @hw: pointer to the hw struct 2800 * 2801 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 2802 */ 2803 static enum ice_status ice_replay_pre_init(struct ice_hw *hw) 2804 { 2805 struct ice_switch_info *sw = hw->switch_info; 2806 u8 i; 2807 2808 /* Delete old entries from replay filter list head if there is any */ 2809 ice_rm_all_sw_replay_rule_info(hw); 2810 /* In start of replay, move entries into replay_rules list, it 2811 * will allow adding rules entries back to filt_rules list, 2812 * which is operational list. 2813 */ 2814 for (i = 0; i < ICE_SW_LKUP_LAST; i++) 2815 list_replace_init(&sw->recp_list[i].filt_rules, 2816 &sw->recp_list[i].filt_replay_rules); 2817 2818 return 0; 2819 } 2820 2821 /** 2822 * ice_replay_vsi - replay VSI configuration 2823 * @hw: pointer to the hw struct 2824 * @vsi_handle: driver VSI handle 2825 * 2826 * Restore all VSI configuration after reset. It is required to call this 2827 * function with main VSI first. 2828 */ 2829 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 2830 { 2831 enum ice_status status; 2832 2833 if (!ice_is_vsi_valid(hw, vsi_handle)) 2834 return ICE_ERR_PARAM; 2835 2836 /* Replay pre-initialization if there is any */ 2837 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 2838 status = ice_replay_pre_init(hw); 2839 if (status) 2840 return status; 2841 } 2842 2843 /* Replay per VSI all filters */ 2844 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 2845 return status; 2846 } 2847 2848 /** 2849 * ice_replay_post - post replay configuration cleanup 2850 * @hw: pointer to the hw struct 2851 * 2852 * Post replay cleanup. 2853 */ 2854 void ice_replay_post(struct ice_hw *hw) 2855 { 2856 /* Delete old entries from replay filter list head */ 2857 ice_rm_all_sw_replay_rule_info(hw); 2858 } 2859 2860 /** 2861 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 2862 * @hw: ptr to the hardware info 2863 * @hireg: high 32 bit HW register to read from 2864 * @loreg: low 32 bit HW register to read from 2865 * @prev_stat_loaded: bool to specify if previous stats are loaded 2866 * @prev_stat: ptr to previous loaded stat value 2867 * @cur_stat: ptr to current stat value 2868 */ 2869 void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, 2870 bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat) 2871 { 2872 u64 new_data; 2873 2874 new_data = rd32(hw, loreg); 2875 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 2876 2877 /* device stats are not reset at PFR, they likely will not be zeroed 2878 * when the driver starts. So save the first values read and use them as 2879 * offsets to be subtracted from the raw values in order to report stats 2880 * that count from zero. 2881 */ 2882 if (!prev_stat_loaded) 2883 *prev_stat = new_data; 2884 if (new_data >= *prev_stat) 2885 *cur_stat = new_data - *prev_stat; 2886 else 2887 /* to manage the potential roll-over */ 2888 *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat; 2889 *cur_stat &= 0xFFFFFFFFFFULL; 2890 } 2891 2892 /** 2893 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 2894 * @hw: ptr to the hardware info 2895 * @reg: HW register to read from 2896 * @prev_stat_loaded: bool to specify if previous stats are loaded 2897 * @prev_stat: ptr to previous loaded stat value 2898 * @cur_stat: ptr to current stat value 2899 */ 2900 void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 2901 u64 *prev_stat, u64 *cur_stat) 2902 { 2903 u32 new_data; 2904 2905 new_data = rd32(hw, reg); 2906 2907 /* device stats are not reset at PFR, they likely will not be zeroed 2908 * when the driver starts. So save the first values read and use them as 2909 * offsets to be subtracted from the raw values in order to report stats 2910 * that count from zero. 2911 */ 2912 if (!prev_stat_loaded) 2913 *prev_stat = new_data; 2914 if (new_data >= *prev_stat) 2915 *cur_stat = new_data - *prev_stat; 2916 else 2917 /* to manage the potential roll-over */ 2918 *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat; 2919 } 2920