1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 8 #define ICE_PF_RESET_WAIT_COUNT 200 9 10 #define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \ 11 wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \ 12 ((ICE_RX_OPC_MDID << \ 13 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \ 14 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \ 15 (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \ 16 GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M)) 17 18 #define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \ 19 wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \ 20 (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \ 21 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \ 22 (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \ 23 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \ 24 (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \ 25 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \ 26 (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \ 27 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M)) 28 29 /** 30 * ice_set_mac_type - Sets MAC type 31 * @hw: pointer to the HW structure 32 * 33 * This function sets the MAC type of the adapter based on the 34 * vendor ID and device ID stored in the hw structure. 35 */ 36 static enum ice_status ice_set_mac_type(struct ice_hw *hw) 37 { 38 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 39 return ICE_ERR_DEVICE_NOT_SUPPORTED; 40 41 hw->mac_type = ICE_MAC_GENERIC; 42 return 0; 43 } 44 45 /** 46 * ice_dev_onetime_setup - Temporary HW/FW workarounds 47 * @hw: pointer to the HW structure 48 * 49 * This function provides temporary workarounds for certain issues 50 * that are expected to be fixed in the HW/FW. 51 */ 52 void ice_dev_onetime_setup(struct ice_hw *hw) 53 { 54 /* configure Rx - set non pxe mode */ 55 wr32(hw, GLLAN_RCTL_0, 0x1); 56 57 #define MBX_PF_VT_PFALLOC 0x00231E80 58 /* set VFs per PF */ 59 wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF)); 60 } 61 62 /** 63 * ice_clear_pf_cfg - Clear PF configuration 64 * @hw: pointer to the hardware structure 65 * 66 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 67 * configuration, flow director filters, etc.). 68 */ 69 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) 70 { 71 struct ice_aq_desc desc; 72 73 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 74 75 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 76 } 77 78 /** 79 * ice_aq_manage_mac_read - manage MAC address read command 80 * @hw: pointer to the hw struct 81 * @buf: a virtual buffer to hold the manage MAC read response 82 * @buf_size: Size of the virtual buffer 83 * @cd: pointer to command details structure or NULL 84 * 85 * This function is used to return per PF station MAC address (0x0107). 86 * NOTE: Upon successful completion of this command, MAC address information 87 * is returned in user specified buffer. Please interpret user specified 88 * buffer as "manage_mac_read" response. 89 * Response such as various MAC addresses are stored in HW struct (port.mac) 90 * ice_aq_discover_caps is expected to be called before this function is called. 91 */ 92 static enum ice_status 93 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 94 struct ice_sq_cd *cd) 95 { 96 struct ice_aqc_manage_mac_read_resp *resp; 97 struct ice_aqc_manage_mac_read *cmd; 98 struct ice_aq_desc desc; 99 enum ice_status status; 100 u16 flags; 101 u8 i; 102 103 cmd = &desc.params.mac_read; 104 105 if (buf_size < sizeof(*resp)) 106 return ICE_ERR_BUF_TOO_SHORT; 107 108 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 109 110 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 111 if (status) 112 return status; 113 114 resp = (struct ice_aqc_manage_mac_read_resp *)buf; 115 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 116 117 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 118 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 119 return ICE_ERR_CFG; 120 } 121 122 /* A single port can report up to two (LAN and WoL) addresses */ 123 for (i = 0; i < cmd->num_addr; i++) 124 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 125 ether_addr_copy(hw->port_info->mac.lan_addr, 126 resp[i].mac_addr); 127 ether_addr_copy(hw->port_info->mac.perm_addr, 128 resp[i].mac_addr); 129 break; 130 } 131 132 return 0; 133 } 134 135 /** 136 * ice_aq_get_phy_caps - returns PHY capabilities 137 * @pi: port information structure 138 * @qual_mods: report qualified modules 139 * @report_mode: report mode capabilities 140 * @pcaps: structure for PHY capabilities to be filled 141 * @cd: pointer to command details structure or NULL 142 * 143 * Returns the various PHY capabilities supported on the Port (0x0600) 144 */ 145 enum ice_status 146 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 147 struct ice_aqc_get_phy_caps_data *pcaps, 148 struct ice_sq_cd *cd) 149 { 150 struct ice_aqc_get_phy_caps *cmd; 151 u16 pcaps_size = sizeof(*pcaps); 152 struct ice_aq_desc desc; 153 enum ice_status status; 154 155 cmd = &desc.params.get_phy; 156 157 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 158 return ICE_ERR_PARAM; 159 160 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 161 162 if (qual_mods) 163 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 164 165 cmd->param0 |= cpu_to_le16(report_mode); 166 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd); 167 168 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) 169 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 170 171 return status; 172 } 173 174 /** 175 * ice_get_media_type - Gets media type 176 * @pi: port information structure 177 */ 178 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 179 { 180 struct ice_link_status *hw_link_info; 181 182 if (!pi) 183 return ICE_MEDIA_UNKNOWN; 184 185 hw_link_info = &pi->phy.link_info; 186 187 if (hw_link_info->phy_type_low) { 188 switch (hw_link_info->phy_type_low) { 189 case ICE_PHY_TYPE_LOW_1000BASE_SX: 190 case ICE_PHY_TYPE_LOW_1000BASE_LX: 191 case ICE_PHY_TYPE_LOW_10GBASE_SR: 192 case ICE_PHY_TYPE_LOW_10GBASE_LR: 193 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 194 case ICE_PHY_TYPE_LOW_25GBASE_SR: 195 case ICE_PHY_TYPE_LOW_25GBASE_LR: 196 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 197 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 198 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 199 return ICE_MEDIA_FIBER; 200 case ICE_PHY_TYPE_LOW_100BASE_TX: 201 case ICE_PHY_TYPE_LOW_1000BASE_T: 202 case ICE_PHY_TYPE_LOW_2500BASE_T: 203 case ICE_PHY_TYPE_LOW_5GBASE_T: 204 case ICE_PHY_TYPE_LOW_10GBASE_T: 205 case ICE_PHY_TYPE_LOW_25GBASE_T: 206 return ICE_MEDIA_BASET; 207 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 208 case ICE_PHY_TYPE_LOW_25GBASE_CR: 209 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 210 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 211 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 212 return ICE_MEDIA_DA; 213 case ICE_PHY_TYPE_LOW_1000BASE_KX: 214 case ICE_PHY_TYPE_LOW_2500BASE_KX: 215 case ICE_PHY_TYPE_LOW_2500BASE_X: 216 case ICE_PHY_TYPE_LOW_5GBASE_KR: 217 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 218 case ICE_PHY_TYPE_LOW_25GBASE_KR: 219 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 220 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 221 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 222 return ICE_MEDIA_BACKPLANE; 223 } 224 } 225 226 return ICE_MEDIA_UNKNOWN; 227 } 228 229 /** 230 * ice_aq_get_link_info 231 * @pi: port information structure 232 * @ena_lse: enable/disable LinkStatusEvent reporting 233 * @link: pointer to link status structure - optional 234 * @cd: pointer to command details structure or NULL 235 * 236 * Get Link Status (0x607). Returns the link status of the adapter. 237 */ 238 static enum ice_status 239 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 240 struct ice_link_status *link, struct ice_sq_cd *cd) 241 { 242 struct ice_link_status *hw_link_info_old, *hw_link_info; 243 struct ice_aqc_get_link_status_data link_data = { 0 }; 244 struct ice_aqc_get_link_status *resp; 245 enum ice_media_type *hw_media_type; 246 struct ice_fc_info *hw_fc_info; 247 bool tx_pause, rx_pause; 248 struct ice_aq_desc desc; 249 enum ice_status status; 250 u16 cmd_flags; 251 252 if (!pi) 253 return ICE_ERR_PARAM; 254 hw_link_info_old = &pi->phy.link_info_old; 255 hw_media_type = &pi->phy.media_type; 256 hw_link_info = &pi->phy.link_info; 257 hw_fc_info = &pi->fc; 258 259 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 260 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 261 resp = &desc.params.get_link_status; 262 resp->cmd_flags = cpu_to_le16(cmd_flags); 263 resp->lport_num = pi->lport; 264 265 status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data), 266 cd); 267 268 if (status) 269 return status; 270 271 /* save off old link status information */ 272 *hw_link_info_old = *hw_link_info; 273 274 /* update current link status information */ 275 hw_link_info->link_speed = le16_to_cpu(link_data.link_speed); 276 hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low); 277 *hw_media_type = ice_get_media_type(pi); 278 hw_link_info->link_info = link_data.link_info; 279 hw_link_info->an_info = link_data.an_info; 280 hw_link_info->ext_info = link_data.ext_info; 281 hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size); 282 hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M; 283 284 /* update fc info */ 285 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 286 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 287 if (tx_pause && rx_pause) 288 hw_fc_info->current_mode = ICE_FC_FULL; 289 else if (tx_pause) 290 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 291 else if (rx_pause) 292 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 293 else 294 hw_fc_info->current_mode = ICE_FC_NONE; 295 296 hw_link_info->lse_ena = 297 !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 298 299 /* save link status information */ 300 if (link) 301 *link = *hw_link_info; 302 303 /* flag cleared so calling functions don't call AQ again */ 304 pi->phy.get_link_info = false; 305 306 return status; 307 } 308 309 /** 310 * ice_init_flex_flags 311 * @hw: pointer to the hardware structure 312 * @prof_id: Rx Descriptor Builder profile ID 313 * 314 * Function to initialize Rx flex flags 315 */ 316 static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id) 317 { 318 u8 idx = 0; 319 320 /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout: 321 * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE 322 * flexiflags1[3:0] - Not used for flag programming 323 * flexiflags2[7:0] - Tunnel and VLAN types 324 * 2 invalid fields in last index 325 */ 326 switch (prof_id) { 327 /* Rx flex flags are currently programmed for the NIC profiles only. 328 * Different flag bit programming configurations can be added per 329 * profile as needed. 330 */ 331 case ICE_RXDID_FLEX_NIC: 332 case ICE_RXDID_FLEX_NIC_2: 333 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG, 334 ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI, 335 ICE_RXFLG_FIN, idx++); 336 /* flex flag 1 is not used for flexi-flag programming, skipping 337 * these four FLG64 bits. 338 */ 339 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST, 340 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++); 341 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI, 342 ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100, 343 ICE_RXFLG_EVLAN_x9100, idx++); 344 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100, 345 ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC, 346 ICE_RXFLG_TNL0, idx++); 347 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2, 348 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx); 349 break; 350 351 default: 352 ice_debug(hw, ICE_DBG_INIT, 353 "Flag programming for profile ID %d not supported\n", 354 prof_id); 355 } 356 } 357 358 /** 359 * ice_init_flex_flds 360 * @hw: pointer to the hardware structure 361 * @prof_id: Rx Descriptor Builder profile ID 362 * 363 * Function to initialize flex descriptors 364 */ 365 static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id) 366 { 367 enum ice_flex_rx_mdid mdid; 368 369 switch (prof_id) { 370 case ICE_RXDID_FLEX_NIC: 371 case ICE_RXDID_FLEX_NIC_2: 372 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0); 373 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1); 374 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2); 375 376 mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ? 377 ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH; 378 379 ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3); 380 381 ice_init_flex_flags(hw, prof_id); 382 break; 383 384 default: 385 ice_debug(hw, ICE_DBG_INIT, 386 "Field init for profile ID %d not supported\n", 387 prof_id); 388 } 389 } 390 391 /** 392 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 393 * @hw: pointer to the hw struct 394 */ 395 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) 396 { 397 struct ice_switch_info *sw; 398 399 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 400 sizeof(*hw->switch_info), GFP_KERNEL); 401 sw = hw->switch_info; 402 403 if (!sw) 404 return ICE_ERR_NO_MEMORY; 405 406 INIT_LIST_HEAD(&sw->vsi_list_map_head); 407 408 return ice_init_def_sw_recp(hw); 409 } 410 411 /** 412 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 413 * @hw: pointer to the hw struct 414 */ 415 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 416 { 417 struct ice_switch_info *sw = hw->switch_info; 418 struct ice_vsi_list_map_info *v_pos_map; 419 struct ice_vsi_list_map_info *v_tmp_map; 420 struct ice_sw_recipe *recps; 421 u8 i; 422 423 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 424 list_entry) { 425 list_del(&v_pos_map->list_entry); 426 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 427 } 428 recps = hw->switch_info->recp_list; 429 for (i = 0; i < ICE_SW_LKUP_LAST; i++) { 430 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 431 432 recps[i].root_rid = i; 433 mutex_destroy(&recps[i].filt_rule_lock); 434 list_for_each_entry_safe(lst_itr, tmp_entry, 435 &recps[i].filt_rules, list_entry) { 436 list_del(&lst_itr->list_entry); 437 devm_kfree(ice_hw_to_dev(hw), lst_itr); 438 } 439 } 440 ice_rm_all_sw_replay_rule_info(hw); 441 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 442 devm_kfree(ice_hw_to_dev(hw), sw); 443 } 444 445 #define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \ 446 (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry))) 447 #define ICE_FW_LOG_DESC_SIZE_MAX \ 448 ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX) 449 450 /** 451 * ice_cfg_fw_log - configure FW logging 452 * @hw: pointer to the hw struct 453 * @enable: enable certain FW logging events if true, disable all if false 454 * 455 * This function enables/disables the FW logging via Rx CQ events and a UART 456 * port based on predetermined configurations. FW logging via the Rx CQ can be 457 * enabled/disabled for individual PF's. However, FW logging via the UART can 458 * only be enabled/disabled for all PFs on the same device. 459 * 460 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in 461 * hw->fw_log need to be set accordingly, e.g. based on user-provided input, 462 * before initializing the device. 463 * 464 * When re/configuring FW logging, callers need to update the "cfg" elements of 465 * the hw->fw_log.evnts array with the desired logging event configurations for 466 * modules of interest. When disabling FW logging completely, the callers can 467 * just pass false in the "enable" parameter. On completion, the function will 468 * update the "cur" element of the hw->fw_log.evnts array with the resulting 469 * logging event configurations of the modules that are being re/configured. FW 470 * logging modules that are not part of a reconfiguration operation retain their 471 * previous states. 472 * 473 * Before resetting the device, it is recommended that the driver disables FW 474 * logging before shutting down the control queue. When disabling FW logging 475 * ("enable" = false), the latest configurations of FW logging events stored in 476 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after 477 * a device reset. 478 * 479 * When enabling FW logging to emit log messages via the Rx CQ during the 480 * device's initialization phase, a mechanism alternative to interrupt handlers 481 * needs to be used to extract FW log messages from the Rx CQ periodically and 482 * to prevent the Rx CQ from being full and stalling other types of control 483 * messages from FW to SW. Interrupts are typically disabled during the device's 484 * initialization phase. 485 */ 486 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) 487 { 488 struct ice_aqc_fw_logging_data *data = NULL; 489 struct ice_aqc_fw_logging *cmd; 490 enum ice_status status = 0; 491 u16 i, chgs = 0, len = 0; 492 struct ice_aq_desc desc; 493 u8 actv_evnts = 0; 494 void *buf = NULL; 495 496 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) 497 return 0; 498 499 /* Disable FW logging only when the control queue is still responsive */ 500 if (!enable && 501 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) 502 return 0; 503 504 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); 505 cmd = &desc.params.fw_logging; 506 507 /* Indicate which controls are valid */ 508 if (hw->fw_log.cq_en) 509 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; 510 511 if (hw->fw_log.uart_en) 512 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; 513 514 if (enable) { 515 /* Fill in an array of entries with FW logging modules and 516 * logging events being reconfigured. 517 */ 518 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 519 u16 val; 520 521 /* Keep track of enabled event types */ 522 actv_evnts |= hw->fw_log.evnts[i].cfg; 523 524 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) 525 continue; 526 527 if (!data) { 528 data = devm_kzalloc(ice_hw_to_dev(hw), 529 ICE_FW_LOG_DESC_SIZE_MAX, 530 GFP_KERNEL); 531 if (!data) 532 return ICE_ERR_NO_MEMORY; 533 } 534 535 val = i << ICE_AQC_FW_LOG_ID_S; 536 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; 537 data->entry[chgs++] = cpu_to_le16(val); 538 } 539 540 /* Only enable FW logging if at least one module is specified. 541 * If FW logging is currently enabled but all modules are not 542 * enabled to emit log messages, disable FW logging altogether. 543 */ 544 if (actv_evnts) { 545 /* Leave if there is effectively no change */ 546 if (!chgs) 547 goto out; 548 549 if (hw->fw_log.cq_en) 550 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; 551 552 if (hw->fw_log.uart_en) 553 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; 554 555 buf = data; 556 len = ICE_FW_LOG_DESC_SIZE(chgs); 557 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 558 } 559 } 560 561 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); 562 if (!status) { 563 /* Update the current configuration to reflect events enabled. 564 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW 565 * logging mode is enabled for the device. They do not reflect 566 * actual modules being enabled to emit log messages. So, their 567 * values remain unchanged even when all modules are disabled. 568 */ 569 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; 570 571 hw->fw_log.actv_evnts = actv_evnts; 572 for (i = 0; i < cnt; i++) { 573 u16 v, m; 574 575 if (!enable) { 576 /* When disabling all FW logging events as part 577 * of device's de-initialization, the original 578 * configurations are retained, and can be used 579 * to reconfigure FW logging later if the device 580 * is re-initialized. 581 */ 582 hw->fw_log.evnts[i].cur = 0; 583 continue; 584 } 585 586 v = le16_to_cpu(data->entry[i]); 587 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 588 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; 589 } 590 } 591 592 out: 593 if (data) 594 devm_kfree(ice_hw_to_dev(hw), data); 595 596 return status; 597 } 598 599 /** 600 * ice_output_fw_log 601 * @hw: pointer to the hw struct 602 * @desc: pointer to the AQ message descriptor 603 * @buf: pointer to the buffer accompanying the AQ message 604 * 605 * Formats a FW Log message and outputs it via the standard driver logs. 606 */ 607 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) 608 { 609 ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n"); 610 ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf, 611 le16_to_cpu(desc->datalen)); 612 ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n"); 613 } 614 615 /** 616 * ice_get_itr_intrl_gran - determine int/intrl granularity 617 * @hw: pointer to the hw struct 618 * 619 * Determines the itr/intrl granularities based on the maximum aggregate 620 * bandwidth according to the device's configuration during power-on. 621 */ 622 static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw) 623 { 624 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & 625 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> 626 GL_PWR_MODE_CTL_CAR_MAX_BW_S; 627 628 switch (max_agg_bw) { 629 case ICE_MAX_AGG_BW_200G: 630 case ICE_MAX_AGG_BW_100G: 631 case ICE_MAX_AGG_BW_50G: 632 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 633 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 634 break; 635 case ICE_MAX_AGG_BW_25G: 636 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 637 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 638 break; 639 default: 640 ice_debug(hw, ICE_DBG_INIT, 641 "Failed to determine itr/intrl granularity\n"); 642 return ICE_ERR_CFG; 643 } 644 645 return 0; 646 } 647 648 /** 649 * ice_init_hw - main hardware initialization routine 650 * @hw: pointer to the hardware structure 651 */ 652 enum ice_status ice_init_hw(struct ice_hw *hw) 653 { 654 struct ice_aqc_get_phy_caps_data *pcaps; 655 enum ice_status status; 656 u16 mac_buf_len; 657 void *mac_buf; 658 659 /* Set MAC type based on DeviceID */ 660 status = ice_set_mac_type(hw); 661 if (status) 662 return status; 663 664 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & 665 PF_FUNC_RID_FUNC_NUM_M) >> 666 PF_FUNC_RID_FUNC_NUM_S; 667 668 status = ice_reset(hw, ICE_RESET_PFR); 669 if (status) 670 return status; 671 672 status = ice_get_itr_intrl_gran(hw); 673 if (status) 674 return status; 675 676 status = ice_init_all_ctrlq(hw); 677 if (status) 678 goto err_unroll_cqinit; 679 680 /* Enable FW logging. Not fatal if this fails. */ 681 status = ice_cfg_fw_log(hw, true); 682 if (status) 683 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); 684 685 status = ice_clear_pf_cfg(hw); 686 if (status) 687 goto err_unroll_cqinit; 688 689 ice_clear_pxe_mode(hw); 690 691 status = ice_init_nvm(hw); 692 if (status) 693 goto err_unroll_cqinit; 694 695 status = ice_get_caps(hw); 696 if (status) 697 goto err_unroll_cqinit; 698 699 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 700 sizeof(*hw->port_info), GFP_KERNEL); 701 if (!hw->port_info) { 702 status = ICE_ERR_NO_MEMORY; 703 goto err_unroll_cqinit; 704 } 705 706 /* set the back pointer to hw */ 707 hw->port_info->hw = hw; 708 709 /* Initialize port_info struct with switch configuration data */ 710 status = ice_get_initial_sw_cfg(hw); 711 if (status) 712 goto err_unroll_alloc; 713 714 hw->evb_veb = true; 715 716 /* Query the allocated resources for Tx scheduler */ 717 status = ice_sched_query_res_alloc(hw); 718 if (status) { 719 ice_debug(hw, ICE_DBG_SCHED, 720 "Failed to get scheduler allocated resources\n"); 721 goto err_unroll_alloc; 722 } 723 724 /* Initialize port_info struct with scheduler data */ 725 status = ice_sched_init_port(hw->port_info); 726 if (status) 727 goto err_unroll_sched; 728 729 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 730 if (!pcaps) { 731 status = ICE_ERR_NO_MEMORY; 732 goto err_unroll_sched; 733 } 734 735 /* Initialize port_info struct with PHY capabilities */ 736 status = ice_aq_get_phy_caps(hw->port_info, false, 737 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); 738 devm_kfree(ice_hw_to_dev(hw), pcaps); 739 if (status) 740 goto err_unroll_sched; 741 742 /* Initialize port_info struct with link information */ 743 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 744 if (status) 745 goto err_unroll_sched; 746 747 /* need a valid SW entry point to build a Tx tree */ 748 if (!hw->sw_entry_point_layer) { 749 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 750 status = ICE_ERR_CFG; 751 goto err_unroll_sched; 752 } 753 754 status = ice_init_fltr_mgmt_struct(hw); 755 if (status) 756 goto err_unroll_sched; 757 758 ice_dev_onetime_setup(hw); 759 760 /* Get MAC information */ 761 /* A single port can report up to two (LAN and WoL) addresses */ 762 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, 763 sizeof(struct ice_aqc_manage_mac_read_resp), 764 GFP_KERNEL); 765 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 766 767 if (!mac_buf) { 768 status = ICE_ERR_NO_MEMORY; 769 goto err_unroll_fltr_mgmt_struct; 770 } 771 772 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 773 devm_kfree(ice_hw_to_dev(hw), mac_buf); 774 775 if (status) 776 goto err_unroll_fltr_mgmt_struct; 777 778 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC); 779 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2); 780 781 return 0; 782 783 err_unroll_fltr_mgmt_struct: 784 ice_cleanup_fltr_mgmt_struct(hw); 785 err_unroll_sched: 786 ice_sched_cleanup_all(hw); 787 err_unroll_alloc: 788 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 789 err_unroll_cqinit: 790 ice_shutdown_all_ctrlq(hw); 791 return status; 792 } 793 794 /** 795 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 796 * @hw: pointer to the hardware structure 797 */ 798 void ice_deinit_hw(struct ice_hw *hw) 799 { 800 ice_cleanup_fltr_mgmt_struct(hw); 801 802 ice_sched_cleanup_all(hw); 803 804 if (hw->port_info) { 805 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 806 hw->port_info = NULL; 807 } 808 809 /* Attempt to disable FW logging before shutting down control queues */ 810 ice_cfg_fw_log(hw, false); 811 ice_shutdown_all_ctrlq(hw); 812 813 /* Clear VSI contexts if not already cleared */ 814 ice_clear_all_vsi_ctx(hw); 815 } 816 817 /** 818 * ice_check_reset - Check to see if a global reset is complete 819 * @hw: pointer to the hardware structure 820 */ 821 enum ice_status ice_check_reset(struct ice_hw *hw) 822 { 823 u32 cnt, reg = 0, grst_delay; 824 825 /* Poll for Device Active state in case a recent CORER, GLOBR, 826 * or EMPR has occurred. The grst delay value is in 100ms units. 827 * Add 1sec for outstanding AQ commands that can take a long time. 828 */ 829 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> 830 GLGEN_RSTCTL_GRSTDEL_S) + 10; 831 832 for (cnt = 0; cnt < grst_delay; cnt++) { 833 mdelay(100); 834 reg = rd32(hw, GLGEN_RSTAT); 835 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 836 break; 837 } 838 839 if (cnt == grst_delay) { 840 ice_debug(hw, ICE_DBG_INIT, 841 "Global reset polling failed to complete.\n"); 842 return ICE_ERR_RESET_FAILED; 843 } 844 845 #define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \ 846 GLNVM_ULD_GLOBR_DONE_M) 847 848 /* Device is Active; check Global Reset processes are done */ 849 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 850 reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK; 851 if (reg == ICE_RESET_DONE_MASK) { 852 ice_debug(hw, ICE_DBG_INIT, 853 "Global reset processes done. %d\n", cnt); 854 break; 855 } 856 mdelay(10); 857 } 858 859 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 860 ice_debug(hw, ICE_DBG_INIT, 861 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 862 reg); 863 return ICE_ERR_RESET_FAILED; 864 } 865 866 return 0; 867 } 868 869 /** 870 * ice_pf_reset - Reset the PF 871 * @hw: pointer to the hardware structure 872 * 873 * If a global reset has been triggered, this function checks 874 * for its completion and then issues the PF reset 875 */ 876 static enum ice_status ice_pf_reset(struct ice_hw *hw) 877 { 878 u32 cnt, reg; 879 880 /* If at function entry a global reset was already in progress, i.e. 881 * state is not 'device active' or any of the reset done bits are not 882 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 883 * global reset is done. 884 */ 885 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 886 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 887 /* poll on global reset currently in progress until done */ 888 if (ice_check_reset(hw)) 889 return ICE_ERR_RESET_FAILED; 890 891 return 0; 892 } 893 894 /* Reset the PF */ 895 reg = rd32(hw, PFGEN_CTRL); 896 897 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 898 899 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 900 reg = rd32(hw, PFGEN_CTRL); 901 if (!(reg & PFGEN_CTRL_PFSWR_M)) 902 break; 903 904 mdelay(1); 905 } 906 907 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 908 ice_debug(hw, ICE_DBG_INIT, 909 "PF reset polling failed to complete.\n"); 910 return ICE_ERR_RESET_FAILED; 911 } 912 913 return 0; 914 } 915 916 /** 917 * ice_reset - Perform different types of reset 918 * @hw: pointer to the hardware structure 919 * @req: reset request 920 * 921 * This function triggers a reset as specified by the req parameter. 922 * 923 * Note: 924 * If anything other than a PF reset is triggered, PXE mode is restored. 925 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 926 * interface has been restored in the rebuild flow. 927 */ 928 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) 929 { 930 u32 val = 0; 931 932 switch (req) { 933 case ICE_RESET_PFR: 934 return ice_pf_reset(hw); 935 case ICE_RESET_CORER: 936 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 937 val = GLGEN_RTRIG_CORER_M; 938 break; 939 case ICE_RESET_GLOBR: 940 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 941 val = GLGEN_RTRIG_GLOBR_M; 942 break; 943 default: 944 return ICE_ERR_PARAM; 945 } 946 947 val |= rd32(hw, GLGEN_RTRIG); 948 wr32(hw, GLGEN_RTRIG, val); 949 ice_flush(hw); 950 951 /* wait for the FW to be ready */ 952 return ice_check_reset(hw); 953 } 954 955 /** 956 * ice_copy_rxq_ctx_to_hw 957 * @hw: pointer to the hardware structure 958 * @ice_rxq_ctx: pointer to the rxq context 959 * @rxq_index: the index of the Rx queue 960 * 961 * Copies rxq context from dense structure to hw register space 962 */ 963 static enum ice_status 964 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 965 { 966 u8 i; 967 968 if (!ice_rxq_ctx) 969 return ICE_ERR_BAD_PTR; 970 971 if (rxq_index > QRX_CTRL_MAX_INDEX) 972 return ICE_ERR_PARAM; 973 974 /* Copy each dword separately to hw */ 975 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 976 wr32(hw, QRX_CONTEXT(i, rxq_index), 977 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 978 979 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 980 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 981 } 982 983 return 0; 984 } 985 986 /* LAN Rx Queue Context */ 987 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 988 /* Field Width LSB */ 989 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 990 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 991 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 992 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 993 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 994 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 995 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 996 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 997 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 998 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 999 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1000 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1001 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1002 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1003 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1004 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1005 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1006 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1007 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1008 { 0 } 1009 }; 1010 1011 /** 1012 * ice_write_rxq_ctx 1013 * @hw: pointer to the hardware structure 1014 * @rlan_ctx: pointer to the rxq context 1015 * @rxq_index: the index of the Rx queue 1016 * 1017 * Converts rxq context from sparse to dense structure and then writes 1018 * it to hw register space 1019 */ 1020 enum ice_status 1021 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1022 u32 rxq_index) 1023 { 1024 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1025 1026 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1027 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1028 } 1029 1030 /* LAN Tx Queue Context */ 1031 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1032 /* Field Width LSB */ 1033 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1034 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1035 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1036 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1037 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1038 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1039 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1040 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1041 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1042 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1043 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1044 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1045 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1046 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1047 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1048 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1049 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1050 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1051 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1052 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1053 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1054 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1055 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1056 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1057 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1058 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1059 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 110, 171), 1060 { 0 } 1061 }; 1062 1063 /** 1064 * ice_debug_cq 1065 * @hw: pointer to the hardware structure 1066 * @mask: debug mask 1067 * @desc: pointer to control queue descriptor 1068 * @buf: pointer to command buffer 1069 * @buf_len: max length of buf 1070 * 1071 * Dumps debug log about control command with descriptor contents. 1072 */ 1073 void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, 1074 void *buf, u16 buf_len) 1075 { 1076 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc; 1077 u16 len; 1078 1079 #ifndef CONFIG_DYNAMIC_DEBUG 1080 if (!(mask & hw->debug_mask)) 1081 return; 1082 #endif 1083 1084 if (!desc) 1085 return; 1086 1087 len = le16_to_cpu(cq_desc->datalen); 1088 1089 ice_debug(hw, mask, 1090 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 1091 le16_to_cpu(cq_desc->opcode), 1092 le16_to_cpu(cq_desc->flags), 1093 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval)); 1094 ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", 1095 le32_to_cpu(cq_desc->cookie_high), 1096 le32_to_cpu(cq_desc->cookie_low)); 1097 ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", 1098 le32_to_cpu(cq_desc->params.generic.param0), 1099 le32_to_cpu(cq_desc->params.generic.param1)); 1100 ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", 1101 le32_to_cpu(cq_desc->params.generic.addr_high), 1102 le32_to_cpu(cq_desc->params.generic.addr_low)); 1103 if (buf && cq_desc->datalen != 0) { 1104 ice_debug(hw, mask, "Buffer:\n"); 1105 if (buf_len < len) 1106 len = buf_len; 1107 1108 ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len); 1109 } 1110 } 1111 1112 /* FW Admin Queue command wrappers */ 1113 1114 /** 1115 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1116 * @hw: pointer to the hw struct 1117 * @desc: descriptor describing the command 1118 * @buf: buffer to use for indirect commands (NULL for direct commands) 1119 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1120 * @cd: pointer to command details structure 1121 * 1122 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1123 */ 1124 enum ice_status 1125 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1126 u16 buf_size, struct ice_sq_cd *cd) 1127 { 1128 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); 1129 } 1130 1131 /** 1132 * ice_aq_get_fw_ver 1133 * @hw: pointer to the hw struct 1134 * @cd: pointer to command details structure or NULL 1135 * 1136 * Get the firmware version (0x0001) from the admin queue commands 1137 */ 1138 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1139 { 1140 struct ice_aqc_get_ver *resp; 1141 struct ice_aq_desc desc; 1142 enum ice_status status; 1143 1144 resp = &desc.params.get_ver; 1145 1146 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1147 1148 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1149 1150 if (!status) { 1151 hw->fw_branch = resp->fw_branch; 1152 hw->fw_maj_ver = resp->fw_major; 1153 hw->fw_min_ver = resp->fw_minor; 1154 hw->fw_patch = resp->fw_patch; 1155 hw->fw_build = le32_to_cpu(resp->fw_build); 1156 hw->api_branch = resp->api_branch; 1157 hw->api_maj_ver = resp->api_major; 1158 hw->api_min_ver = resp->api_minor; 1159 hw->api_patch = resp->api_patch; 1160 } 1161 1162 return status; 1163 } 1164 1165 /** 1166 * ice_aq_q_shutdown 1167 * @hw: pointer to the hw struct 1168 * @unloading: is the driver unloading itself 1169 * 1170 * Tell the Firmware that we're shutting down the AdminQ and whether 1171 * or not the driver is unloading as well (0x0003). 1172 */ 1173 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1174 { 1175 struct ice_aqc_q_shutdown *cmd; 1176 struct ice_aq_desc desc; 1177 1178 cmd = &desc.params.q_shutdown; 1179 1180 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1181 1182 if (unloading) 1183 cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING); 1184 1185 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1186 } 1187 1188 /** 1189 * ice_aq_req_res 1190 * @hw: pointer to the hw struct 1191 * @res: resource id 1192 * @access: access type 1193 * @sdp_number: resource number 1194 * @timeout: the maximum time in ms that the driver may hold the resource 1195 * @cd: pointer to command details structure or NULL 1196 * 1197 * Requests common resource using the admin queue commands (0x0008). 1198 * When attempting to acquire the Global Config Lock, the driver can 1199 * learn of three states: 1200 * 1) ICE_SUCCESS - acquired lock, and can perform download package 1201 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load 1202 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has 1203 * successfully downloaded the package; the driver does 1204 * not have to download the package and can continue 1205 * loading 1206 * 1207 * Note that if the caller is in an acquire lock, perform action, release lock 1208 * phase of operation, it is possible that the FW may detect a timeout and issue 1209 * a CORER. In this case, the driver will receive a CORER interrupt and will 1210 * have to determine its cause. The calling thread that is handling this flow 1211 * will likely get an error propagated back to it indicating the Download 1212 * Package, Update Package or the Release Resource AQ commands timed out. 1213 */ 1214 static enum ice_status 1215 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1216 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1217 struct ice_sq_cd *cd) 1218 { 1219 struct ice_aqc_req_res *cmd_resp; 1220 struct ice_aq_desc desc; 1221 enum ice_status status; 1222 1223 cmd_resp = &desc.params.res_owner; 1224 1225 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1226 1227 cmd_resp->res_id = cpu_to_le16(res); 1228 cmd_resp->access_type = cpu_to_le16(access); 1229 cmd_resp->res_number = cpu_to_le32(sdp_number); 1230 cmd_resp->timeout = cpu_to_le32(*timeout); 1231 *timeout = 0; 1232 1233 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1234 1235 /* The completion specifies the maximum time in ms that the driver 1236 * may hold the resource in the Timeout field. 1237 */ 1238 1239 /* Global config lock response utilizes an additional status field. 1240 * 1241 * If the Global config lock resource is held by some other driver, the 1242 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1243 * and the timeout field indicates the maximum time the current owner 1244 * of the resource has to free it. 1245 */ 1246 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1247 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1248 *timeout = le32_to_cpu(cmd_resp->timeout); 1249 return 0; 1250 } else if (le16_to_cpu(cmd_resp->status) == 1251 ICE_AQ_RES_GLBL_IN_PROG) { 1252 *timeout = le32_to_cpu(cmd_resp->timeout); 1253 return ICE_ERR_AQ_ERROR; 1254 } else if (le16_to_cpu(cmd_resp->status) == 1255 ICE_AQ_RES_GLBL_DONE) { 1256 return ICE_ERR_AQ_NO_WORK; 1257 } 1258 1259 /* invalid FW response, force a timeout immediately */ 1260 *timeout = 0; 1261 return ICE_ERR_AQ_ERROR; 1262 } 1263 1264 /* If the resource is held by some other driver, the command completes 1265 * with a busy return value and the timeout field indicates the maximum 1266 * time the current owner of the resource has to free it. 1267 */ 1268 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1269 *timeout = le32_to_cpu(cmd_resp->timeout); 1270 1271 return status; 1272 } 1273 1274 /** 1275 * ice_aq_release_res 1276 * @hw: pointer to the hw struct 1277 * @res: resource id 1278 * @sdp_number: resource number 1279 * @cd: pointer to command details structure or NULL 1280 * 1281 * release common resource using the admin queue commands (0x0009) 1282 */ 1283 static enum ice_status 1284 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1285 struct ice_sq_cd *cd) 1286 { 1287 struct ice_aqc_req_res *cmd; 1288 struct ice_aq_desc desc; 1289 1290 cmd = &desc.params.res_owner; 1291 1292 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1293 1294 cmd->res_id = cpu_to_le16(res); 1295 cmd->res_number = cpu_to_le32(sdp_number); 1296 1297 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1298 } 1299 1300 /** 1301 * ice_acquire_res 1302 * @hw: pointer to the HW structure 1303 * @res: resource id 1304 * @access: access type (read or write) 1305 * @timeout: timeout in milliseconds 1306 * 1307 * This function will attempt to acquire the ownership of a resource. 1308 */ 1309 enum ice_status 1310 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1311 enum ice_aq_res_access_type access, u32 timeout) 1312 { 1313 #define ICE_RES_POLLING_DELAY_MS 10 1314 u32 delay = ICE_RES_POLLING_DELAY_MS; 1315 u32 time_left = timeout; 1316 enum ice_status status; 1317 1318 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1319 1320 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has 1321 * previously acquired the resource and performed any necessary updates; 1322 * in this case the caller does not obtain the resource and has no 1323 * further work to do. 1324 */ 1325 if (status == ICE_ERR_AQ_NO_WORK) 1326 goto ice_acquire_res_exit; 1327 1328 if (status) 1329 ice_debug(hw, ICE_DBG_RES, 1330 "resource %d acquire type %d failed.\n", res, access); 1331 1332 /* If necessary, poll until the current lock owner timeouts */ 1333 timeout = time_left; 1334 while (status && timeout && time_left) { 1335 mdelay(delay); 1336 timeout = (timeout > delay) ? timeout - delay : 0; 1337 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1338 1339 if (status == ICE_ERR_AQ_NO_WORK) 1340 /* lock free, but no work to do */ 1341 break; 1342 1343 if (!status) 1344 /* lock acquired */ 1345 break; 1346 } 1347 if (status && status != ICE_ERR_AQ_NO_WORK) 1348 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1349 1350 ice_acquire_res_exit: 1351 if (status == ICE_ERR_AQ_NO_WORK) { 1352 if (access == ICE_RES_WRITE) 1353 ice_debug(hw, ICE_DBG_RES, 1354 "resource indicates no work to do.\n"); 1355 else 1356 ice_debug(hw, ICE_DBG_RES, 1357 "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); 1358 } 1359 return status; 1360 } 1361 1362 /** 1363 * ice_release_res 1364 * @hw: pointer to the HW structure 1365 * @res: resource id 1366 * 1367 * This function will release a resource using the proper Admin Command. 1368 */ 1369 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1370 { 1371 enum ice_status status; 1372 u32 total_delay = 0; 1373 1374 status = ice_aq_release_res(hw, res, 0, NULL); 1375 1376 /* there are some rare cases when trying to release the resource 1377 * results in an admin Q timeout, so handle them correctly 1378 */ 1379 while ((status == ICE_ERR_AQ_TIMEOUT) && 1380 (total_delay < hw->adminq.sq_cmd_timeout)) { 1381 mdelay(1); 1382 status = ice_aq_release_res(hw, res, 0, NULL); 1383 total_delay++; 1384 } 1385 } 1386 1387 /** 1388 * ice_get_guar_num_vsi - determine number of guar VSI for a PF 1389 * @hw: pointer to the hw structure 1390 * 1391 * Determine the number of valid functions by going through the bitmap returned 1392 * from parsing capabilities and use this to calculate the number of VSI per PF. 1393 */ 1394 static u32 ice_get_guar_num_vsi(struct ice_hw *hw) 1395 { 1396 u8 funcs; 1397 1398 #define ICE_CAPS_VALID_FUNCS_M 0xFF 1399 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 1400 ICE_CAPS_VALID_FUNCS_M); 1401 1402 if (!funcs) 1403 return 0; 1404 1405 return ICE_MAX_VSI / funcs; 1406 } 1407 1408 /** 1409 * ice_parse_caps - parse function/device capabilities 1410 * @hw: pointer to the hw struct 1411 * @buf: pointer to a buffer containing function/device capability records 1412 * @cap_count: number of capability records in the list 1413 * @opc: type of capabilities list to parse 1414 * 1415 * Helper function to parse function(0x000a)/device(0x000b) capabilities list. 1416 */ 1417 static void 1418 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, 1419 enum ice_adminq_opc opc) 1420 { 1421 struct ice_aqc_list_caps_elem *cap_resp; 1422 struct ice_hw_func_caps *func_p = NULL; 1423 struct ice_hw_dev_caps *dev_p = NULL; 1424 struct ice_hw_common_caps *caps; 1425 u32 i; 1426 1427 if (!buf) 1428 return; 1429 1430 cap_resp = (struct ice_aqc_list_caps_elem *)buf; 1431 1432 if (opc == ice_aqc_opc_list_dev_caps) { 1433 dev_p = &hw->dev_caps; 1434 caps = &dev_p->common_cap; 1435 } else if (opc == ice_aqc_opc_list_func_caps) { 1436 func_p = &hw->func_caps; 1437 caps = &func_p->common_cap; 1438 } else { 1439 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n"); 1440 return; 1441 } 1442 1443 for (i = 0; caps && i < cap_count; i++, cap_resp++) { 1444 u32 logical_id = le32_to_cpu(cap_resp->logical_id); 1445 u32 phys_id = le32_to_cpu(cap_resp->phys_id); 1446 u32 number = le32_to_cpu(cap_resp->number); 1447 u16 cap = le16_to_cpu(cap_resp->cap); 1448 1449 switch (cap) { 1450 case ICE_AQC_CAPS_VALID_FUNCTIONS: 1451 caps->valid_functions = number; 1452 ice_debug(hw, ICE_DBG_INIT, 1453 "HW caps: Valid Functions = %d\n", 1454 caps->valid_functions); 1455 break; 1456 case ICE_AQC_CAPS_SRIOV: 1457 caps->sr_iov_1_1 = (number == 1); 1458 ice_debug(hw, ICE_DBG_INIT, 1459 "HW caps: SR-IOV = %d\n", caps->sr_iov_1_1); 1460 break; 1461 case ICE_AQC_CAPS_VF: 1462 if (dev_p) { 1463 dev_p->num_vfs_exposed = number; 1464 ice_debug(hw, ICE_DBG_INIT, 1465 "HW caps: VFs exposed = %d\n", 1466 dev_p->num_vfs_exposed); 1467 } else if (func_p) { 1468 func_p->num_allocd_vfs = number; 1469 func_p->vf_base_id = logical_id; 1470 ice_debug(hw, ICE_DBG_INIT, 1471 "HW caps: VFs allocated = %d\n", 1472 func_p->num_allocd_vfs); 1473 ice_debug(hw, ICE_DBG_INIT, 1474 "HW caps: VF base_id = %d\n", 1475 func_p->vf_base_id); 1476 } 1477 break; 1478 case ICE_AQC_CAPS_VSI: 1479 if (dev_p) { 1480 dev_p->num_vsi_allocd_to_host = number; 1481 ice_debug(hw, ICE_DBG_INIT, 1482 "HW caps: Dev.VSI cnt = %d\n", 1483 dev_p->num_vsi_allocd_to_host); 1484 } else if (func_p) { 1485 func_p->guar_num_vsi = ice_get_guar_num_vsi(hw); 1486 ice_debug(hw, ICE_DBG_INIT, 1487 "HW caps: Func.VSI cnt = %d\n", 1488 number); 1489 } 1490 break; 1491 case ICE_AQC_CAPS_RSS: 1492 caps->rss_table_size = number; 1493 caps->rss_table_entry_width = logical_id; 1494 ice_debug(hw, ICE_DBG_INIT, 1495 "HW caps: RSS table size = %d\n", 1496 caps->rss_table_size); 1497 ice_debug(hw, ICE_DBG_INIT, 1498 "HW caps: RSS table width = %d\n", 1499 caps->rss_table_entry_width); 1500 break; 1501 case ICE_AQC_CAPS_RXQS: 1502 caps->num_rxq = number; 1503 caps->rxq_first_id = phys_id; 1504 ice_debug(hw, ICE_DBG_INIT, 1505 "HW caps: Num Rx Qs = %d\n", caps->num_rxq); 1506 ice_debug(hw, ICE_DBG_INIT, 1507 "HW caps: Rx first queue ID = %d\n", 1508 caps->rxq_first_id); 1509 break; 1510 case ICE_AQC_CAPS_TXQS: 1511 caps->num_txq = number; 1512 caps->txq_first_id = phys_id; 1513 ice_debug(hw, ICE_DBG_INIT, 1514 "HW caps: Num Tx Qs = %d\n", caps->num_txq); 1515 ice_debug(hw, ICE_DBG_INIT, 1516 "HW caps: Tx first queue ID = %d\n", 1517 caps->txq_first_id); 1518 break; 1519 case ICE_AQC_CAPS_MSIX: 1520 caps->num_msix_vectors = number; 1521 caps->msix_vector_first_id = phys_id; 1522 ice_debug(hw, ICE_DBG_INIT, 1523 "HW caps: MSIX vector count = %d\n", 1524 caps->num_msix_vectors); 1525 ice_debug(hw, ICE_DBG_INIT, 1526 "HW caps: MSIX first vector index = %d\n", 1527 caps->msix_vector_first_id); 1528 break; 1529 case ICE_AQC_CAPS_MAX_MTU: 1530 caps->max_mtu = number; 1531 if (dev_p) 1532 ice_debug(hw, ICE_DBG_INIT, 1533 "HW caps: Dev.MaxMTU = %d\n", 1534 caps->max_mtu); 1535 else if (func_p) 1536 ice_debug(hw, ICE_DBG_INIT, 1537 "HW caps: func.MaxMTU = %d\n", 1538 caps->max_mtu); 1539 break; 1540 default: 1541 ice_debug(hw, ICE_DBG_INIT, 1542 "HW caps: Unknown capability[%d]: 0x%x\n", i, 1543 cap); 1544 break; 1545 } 1546 } 1547 } 1548 1549 /** 1550 * ice_aq_discover_caps - query function/device capabilities 1551 * @hw: pointer to the hw struct 1552 * @buf: a virtual buffer to hold the capabilities 1553 * @buf_size: Size of the virtual buffer 1554 * @cap_count: cap count needed if AQ err==ENOMEM 1555 * @opc: capabilities type to discover - pass in the command opcode 1556 * @cd: pointer to command details structure or NULL 1557 * 1558 * Get the function(0x000a)/device(0x000b) capabilities description from 1559 * the firmware. 1560 */ 1561 static enum ice_status 1562 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 1563 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 1564 { 1565 struct ice_aqc_list_caps *cmd; 1566 struct ice_aq_desc desc; 1567 enum ice_status status; 1568 1569 cmd = &desc.params.get_cap; 1570 1571 if (opc != ice_aqc_opc_list_func_caps && 1572 opc != ice_aqc_opc_list_dev_caps) 1573 return ICE_ERR_PARAM; 1574 1575 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1576 1577 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 1578 if (!status) 1579 ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc); 1580 else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM) 1581 *cap_count = le32_to_cpu(cmd->count); 1582 return status; 1583 } 1584 1585 /** 1586 * ice_discover_caps - get info about the HW 1587 * @hw: pointer to the hardware structure 1588 * @opc: capabilities type to discover - pass in the command opcode 1589 */ 1590 static enum ice_status ice_discover_caps(struct ice_hw *hw, 1591 enum ice_adminq_opc opc) 1592 { 1593 enum ice_status status; 1594 u32 cap_count; 1595 u16 cbuf_len; 1596 u8 retries; 1597 1598 /* The driver doesn't know how many capabilities the device will return 1599 * so the buffer size required isn't known ahead of time. The driver 1600 * starts with cbuf_len and if this turns out to be insufficient, the 1601 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs. 1602 * The driver then allocates the buffer based on the count and retries 1603 * the operation. So it follows that the retry count is 2. 1604 */ 1605 #define ICE_GET_CAP_BUF_COUNT 40 1606 #define ICE_GET_CAP_RETRY_COUNT 2 1607 1608 cap_count = ICE_GET_CAP_BUF_COUNT; 1609 retries = ICE_GET_CAP_RETRY_COUNT; 1610 1611 do { 1612 void *cbuf; 1613 1614 cbuf_len = (u16)(cap_count * 1615 sizeof(struct ice_aqc_list_caps_elem)); 1616 cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL); 1617 if (!cbuf) 1618 return ICE_ERR_NO_MEMORY; 1619 1620 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count, 1621 opc, NULL); 1622 devm_kfree(ice_hw_to_dev(hw), cbuf); 1623 1624 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM) 1625 break; 1626 1627 /* If ENOMEM is returned, try again with bigger buffer */ 1628 } while (--retries); 1629 1630 return status; 1631 } 1632 1633 /** 1634 * ice_get_caps - get info about the HW 1635 * @hw: pointer to the hardware structure 1636 */ 1637 enum ice_status ice_get_caps(struct ice_hw *hw) 1638 { 1639 enum ice_status status; 1640 1641 status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps); 1642 if (!status) 1643 status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps); 1644 1645 return status; 1646 } 1647 1648 /** 1649 * ice_aq_manage_mac_write - manage MAC address write command 1650 * @hw: pointer to the hw struct 1651 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 1652 * @flags: flags to control write behavior 1653 * @cd: pointer to command details structure or NULL 1654 * 1655 * This function is used to write MAC address to the NVM (0x0108). 1656 */ 1657 enum ice_status 1658 ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags, 1659 struct ice_sq_cd *cd) 1660 { 1661 struct ice_aqc_manage_mac_write *cmd; 1662 struct ice_aq_desc desc; 1663 1664 cmd = &desc.params.mac_write; 1665 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 1666 1667 cmd->flags = flags; 1668 1669 /* Prep values for flags, sah, sal */ 1670 cmd->sah = htons(*((u16 *)mac_addr)); 1671 cmd->sal = htonl(*((u32 *)(mac_addr + 2))); 1672 1673 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1674 } 1675 1676 /** 1677 * ice_aq_clear_pxe_mode 1678 * @hw: pointer to the hw struct 1679 * 1680 * Tell the firmware that the driver is taking over from PXE (0x0110). 1681 */ 1682 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) 1683 { 1684 struct ice_aq_desc desc; 1685 1686 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 1687 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 1688 1689 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1690 } 1691 1692 /** 1693 * ice_clear_pxe_mode - clear pxe operations mode 1694 * @hw: pointer to the hw struct 1695 * 1696 * Make sure all PXE mode settings are cleared, including things 1697 * like descriptor fetch/write-back mode. 1698 */ 1699 void ice_clear_pxe_mode(struct ice_hw *hw) 1700 { 1701 if (ice_check_sq_alive(hw, &hw->adminq)) 1702 ice_aq_clear_pxe_mode(hw); 1703 } 1704 1705 /** 1706 * ice_get_link_speed_based_on_phy_type - returns link speed 1707 * @phy_type_low: lower part of phy_type 1708 * 1709 * This helper function will convert a phy_type_low to its corresponding link 1710 * speed. 1711 * Note: In the structure of phy_type_low, there should be one bit set, as 1712 * this function will convert one phy type to its speed. 1713 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned 1714 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned 1715 */ 1716 static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low) 1717 { 1718 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 1719 1720 switch (phy_type_low) { 1721 case ICE_PHY_TYPE_LOW_100BASE_TX: 1722 case ICE_PHY_TYPE_LOW_100M_SGMII: 1723 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 1724 break; 1725 case ICE_PHY_TYPE_LOW_1000BASE_T: 1726 case ICE_PHY_TYPE_LOW_1000BASE_SX: 1727 case ICE_PHY_TYPE_LOW_1000BASE_LX: 1728 case ICE_PHY_TYPE_LOW_1000BASE_KX: 1729 case ICE_PHY_TYPE_LOW_1G_SGMII: 1730 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 1731 break; 1732 case ICE_PHY_TYPE_LOW_2500BASE_T: 1733 case ICE_PHY_TYPE_LOW_2500BASE_X: 1734 case ICE_PHY_TYPE_LOW_2500BASE_KX: 1735 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 1736 break; 1737 case ICE_PHY_TYPE_LOW_5GBASE_T: 1738 case ICE_PHY_TYPE_LOW_5GBASE_KR: 1739 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 1740 break; 1741 case ICE_PHY_TYPE_LOW_10GBASE_T: 1742 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 1743 case ICE_PHY_TYPE_LOW_10GBASE_SR: 1744 case ICE_PHY_TYPE_LOW_10GBASE_LR: 1745 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 1746 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 1747 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 1748 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 1749 break; 1750 case ICE_PHY_TYPE_LOW_25GBASE_T: 1751 case ICE_PHY_TYPE_LOW_25GBASE_CR: 1752 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 1753 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 1754 case ICE_PHY_TYPE_LOW_25GBASE_SR: 1755 case ICE_PHY_TYPE_LOW_25GBASE_LR: 1756 case ICE_PHY_TYPE_LOW_25GBASE_KR: 1757 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 1758 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 1759 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 1760 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 1761 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 1762 break; 1763 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 1764 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 1765 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 1766 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 1767 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 1768 case ICE_PHY_TYPE_LOW_40G_XLAUI: 1769 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 1770 break; 1771 default: 1772 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 1773 break; 1774 } 1775 1776 return speed_phy_type_low; 1777 } 1778 1779 /** 1780 * ice_update_phy_type 1781 * @phy_type_low: pointer to the lower part of phy_type 1782 * @link_speeds_bitmap: targeted link speeds bitmap 1783 * 1784 * Note: For the link_speeds_bitmap structure, you can check it at 1785 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 1786 * link_speeds_bitmap include multiple speeds. 1787 * 1788 * The value of phy_type_low will present a certain link speed. This helper 1789 * function will turn on bits in the phy_type_low based on the value of 1790 * link_speeds_bitmap input parameter. 1791 */ 1792 void ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap) 1793 { 1794 u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN; 1795 u64 pt_low; 1796 int index; 1797 1798 /* We first check with low part of phy_type */ 1799 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 1800 pt_low = BIT_ULL(index); 1801 speed = ice_get_link_speed_based_on_phy_type(pt_low); 1802 1803 if (link_speeds_bitmap & speed) 1804 *phy_type_low |= BIT_ULL(index); 1805 } 1806 } 1807 1808 /** 1809 * ice_aq_set_phy_cfg 1810 * @hw: pointer to the hw struct 1811 * @lport: logical port number 1812 * @cfg: structure with PHY configuration data to be set 1813 * @cd: pointer to command details structure or NULL 1814 * 1815 * Set the various PHY configuration parameters supported on the Port. 1816 * One or more of the Set PHY config parameters may be ignored in an MFP 1817 * mode as the PF may not have the privilege to set some of the PHY Config 1818 * parameters. This status will be indicated by the command response (0x0601). 1819 */ 1820 enum ice_status 1821 ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, 1822 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 1823 { 1824 struct ice_aq_desc desc; 1825 1826 if (!cfg) 1827 return ICE_ERR_PARAM; 1828 1829 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 1830 desc.params.set_phy.lport_num = lport; 1831 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1832 1833 return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 1834 } 1835 1836 /** 1837 * ice_update_link_info - update status of the HW network link 1838 * @pi: port info structure of the interested logical port 1839 */ 1840 enum ice_status ice_update_link_info(struct ice_port_info *pi) 1841 { 1842 struct ice_aqc_get_phy_caps_data *pcaps; 1843 struct ice_phy_info *phy_info; 1844 enum ice_status status; 1845 struct ice_hw *hw; 1846 1847 if (!pi) 1848 return ICE_ERR_PARAM; 1849 1850 hw = pi->hw; 1851 1852 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 1853 if (!pcaps) 1854 return ICE_ERR_NO_MEMORY; 1855 1856 phy_info = &pi->phy; 1857 status = ice_aq_get_link_info(pi, true, NULL, NULL); 1858 if (status) 1859 goto out; 1860 1861 if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 1862 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, 1863 pcaps, NULL); 1864 if (status) 1865 goto out; 1866 1867 memcpy(phy_info->link_info.module_type, &pcaps->module_type, 1868 sizeof(phy_info->link_info.module_type)); 1869 } 1870 out: 1871 devm_kfree(ice_hw_to_dev(hw), pcaps); 1872 return status; 1873 } 1874 1875 /** 1876 * ice_set_fc 1877 * @pi: port information structure 1878 * @aq_failures: pointer to status code, specific to ice_set_fc routine 1879 * @ena_auto_link_update: enable automatic link update 1880 * 1881 * Set the requested flow control mode. 1882 */ 1883 enum ice_status 1884 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 1885 { 1886 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 1887 struct ice_aqc_get_phy_caps_data *pcaps; 1888 enum ice_status status; 1889 u8 pause_mask = 0x0; 1890 struct ice_hw *hw; 1891 1892 if (!pi) 1893 return ICE_ERR_PARAM; 1894 hw = pi->hw; 1895 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE; 1896 1897 switch (pi->fc.req_mode) { 1898 case ICE_FC_FULL: 1899 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 1900 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 1901 break; 1902 case ICE_FC_RX_PAUSE: 1903 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 1904 break; 1905 case ICE_FC_TX_PAUSE: 1906 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 1907 break; 1908 default: 1909 break; 1910 } 1911 1912 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 1913 if (!pcaps) 1914 return ICE_ERR_NO_MEMORY; 1915 1916 /* Get the current phy config */ 1917 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, 1918 NULL); 1919 if (status) { 1920 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 1921 goto out; 1922 } 1923 1924 /* clear the old pause settings */ 1925 cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 1926 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 1927 /* set the new capabilities */ 1928 cfg.caps |= pause_mask; 1929 /* If the capabilities have changed, then set the new config */ 1930 if (cfg.caps != pcaps->caps) { 1931 int retry_count, retry_max = 10; 1932 1933 /* Auto restart link so settings take effect */ 1934 if (ena_auto_link_update) 1935 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 1936 /* Copy over all the old settings */ 1937 cfg.phy_type_low = pcaps->phy_type_low; 1938 cfg.low_power_ctrl = pcaps->low_power_ctrl; 1939 cfg.eee_cap = pcaps->eee_cap; 1940 cfg.eeer_value = pcaps->eeer_value; 1941 cfg.link_fec_opt = pcaps->link_fec_options; 1942 1943 status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL); 1944 if (status) { 1945 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 1946 goto out; 1947 } 1948 1949 /* Update the link info 1950 * It sometimes takes a really long time for link to 1951 * come back from the atomic reset. Thus, we wait a 1952 * little bit. 1953 */ 1954 for (retry_count = 0; retry_count < retry_max; retry_count++) { 1955 status = ice_update_link_info(pi); 1956 1957 if (!status) 1958 break; 1959 1960 mdelay(100); 1961 } 1962 1963 if (status) 1964 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 1965 } 1966 1967 out: 1968 devm_kfree(ice_hw_to_dev(hw), pcaps); 1969 return status; 1970 } 1971 1972 /** 1973 * ice_get_link_status - get status of the HW network link 1974 * @pi: port information structure 1975 * @link_up: pointer to bool (true/false = linkup/linkdown) 1976 * 1977 * Variable link_up is true if link is up, false if link is down. 1978 * The variable link_up is invalid if status is non zero. As a 1979 * result of this call, link status reporting becomes enabled 1980 */ 1981 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) 1982 { 1983 struct ice_phy_info *phy_info; 1984 enum ice_status status = 0; 1985 1986 if (!pi || !link_up) 1987 return ICE_ERR_PARAM; 1988 1989 phy_info = &pi->phy; 1990 1991 if (phy_info->get_link_info) { 1992 status = ice_update_link_info(pi); 1993 1994 if (status) 1995 ice_debug(pi->hw, ICE_DBG_LINK, 1996 "get link status error, status = %d\n", 1997 status); 1998 } 1999 2000 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 2001 2002 return status; 2003 } 2004 2005 /** 2006 * ice_aq_set_link_restart_an 2007 * @pi: pointer to the port information structure 2008 * @ena_link: if true: enable link, if false: disable link 2009 * @cd: pointer to command details structure or NULL 2010 * 2011 * Sets up the link and restarts the Auto-Negotiation over the link. 2012 */ 2013 enum ice_status 2014 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 2015 struct ice_sq_cd *cd) 2016 { 2017 struct ice_aqc_restart_an *cmd; 2018 struct ice_aq_desc desc; 2019 2020 cmd = &desc.params.restart_an; 2021 2022 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 2023 2024 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 2025 cmd->lport_num = pi->lport; 2026 if (ena_link) 2027 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 2028 else 2029 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 2030 2031 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 2032 } 2033 2034 /** 2035 * __ice_aq_get_set_rss_lut 2036 * @hw: pointer to the hardware structure 2037 * @vsi_id: VSI FW index 2038 * @lut_type: LUT table type 2039 * @lut: pointer to the LUT buffer provided by the caller 2040 * @lut_size: size of the LUT buffer 2041 * @glob_lut_idx: global LUT index 2042 * @set: set true to set the table, false to get the table 2043 * 2044 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 2045 */ 2046 static enum ice_status 2047 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, 2048 u16 lut_size, u8 glob_lut_idx, bool set) 2049 { 2050 struct ice_aqc_get_set_rss_lut *cmd_resp; 2051 struct ice_aq_desc desc; 2052 enum ice_status status; 2053 u16 flags = 0; 2054 2055 cmd_resp = &desc.params.get_set_rss_lut; 2056 2057 if (set) { 2058 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut); 2059 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2060 } else { 2061 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut); 2062 } 2063 2064 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 2065 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) & 2066 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) | 2067 ICE_AQC_GSET_RSS_LUT_VSI_VALID); 2068 2069 switch (lut_type) { 2070 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI: 2071 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF: 2072 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL: 2073 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) & 2074 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); 2075 break; 2076 default: 2077 status = ICE_ERR_PARAM; 2078 goto ice_aq_get_set_rss_lut_exit; 2079 } 2080 2081 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) { 2082 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) & 2083 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M); 2084 2085 if (!set) 2086 goto ice_aq_get_set_rss_lut_send; 2087 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 2088 if (!set) 2089 goto ice_aq_get_set_rss_lut_send; 2090 } else { 2091 goto ice_aq_get_set_rss_lut_send; 2092 } 2093 2094 /* LUT size is only valid for Global and PF table types */ 2095 switch (lut_size) { 2096 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128: 2097 break; 2098 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512: 2099 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << 2100 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 2101 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 2102 break; 2103 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K: 2104 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 2105 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << 2106 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 2107 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 2108 break; 2109 } 2110 /* fall-through */ 2111 default: 2112 status = ICE_ERR_PARAM; 2113 goto ice_aq_get_set_rss_lut_exit; 2114 } 2115 2116 ice_aq_get_set_rss_lut_send: 2117 cmd_resp->flags = cpu_to_le16(flags); 2118 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 2119 2120 ice_aq_get_set_rss_lut_exit: 2121 return status; 2122 } 2123 2124 /** 2125 * ice_aq_get_rss_lut 2126 * @hw: pointer to the hardware structure 2127 * @vsi_handle: software VSI handle 2128 * @lut_type: LUT table type 2129 * @lut: pointer to the LUT buffer provided by the caller 2130 * @lut_size: size of the LUT buffer 2131 * 2132 * get the RSS lookup table, PF or VSI type 2133 */ 2134 enum ice_status 2135 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, 2136 u8 *lut, u16 lut_size) 2137 { 2138 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) 2139 return ICE_ERR_PARAM; 2140 2141 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle), 2142 lut_type, lut, lut_size, 0, false); 2143 } 2144 2145 /** 2146 * ice_aq_set_rss_lut 2147 * @hw: pointer to the hardware structure 2148 * @vsi_handle: software VSI handle 2149 * @lut_type: LUT table type 2150 * @lut: pointer to the LUT buffer provided by the caller 2151 * @lut_size: size of the LUT buffer 2152 * 2153 * set the RSS lookup table, PF or VSI type 2154 */ 2155 enum ice_status 2156 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, 2157 u8 *lut, u16 lut_size) 2158 { 2159 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) 2160 return ICE_ERR_PARAM; 2161 2162 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle), 2163 lut_type, lut, lut_size, 0, true); 2164 } 2165 2166 /** 2167 * __ice_aq_get_set_rss_key 2168 * @hw: pointer to the hw struct 2169 * @vsi_id: VSI FW index 2170 * @key: pointer to key info struct 2171 * @set: set true to set the key, false to get the key 2172 * 2173 * get (0x0B04) or set (0x0B02) the RSS key per VSI 2174 */ 2175 static enum 2176 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 2177 struct ice_aqc_get_set_rss_keys *key, 2178 bool set) 2179 { 2180 struct ice_aqc_get_set_rss_key *cmd_resp; 2181 u16 key_size = sizeof(*key); 2182 struct ice_aq_desc desc; 2183 2184 cmd_resp = &desc.params.get_set_rss_key; 2185 2186 if (set) { 2187 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 2188 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2189 } else { 2190 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 2191 } 2192 2193 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 2194 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) & 2195 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) | 2196 ICE_AQC_GSET_RSS_KEY_VSI_VALID); 2197 2198 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 2199 } 2200 2201 /** 2202 * ice_aq_get_rss_key 2203 * @hw: pointer to the hw struct 2204 * @vsi_handle: software VSI handle 2205 * @key: pointer to key info struct 2206 * 2207 * get the RSS key per VSI 2208 */ 2209 enum ice_status 2210 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 2211 struct ice_aqc_get_set_rss_keys *key) 2212 { 2213 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 2214 return ICE_ERR_PARAM; 2215 2216 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 2217 key, false); 2218 } 2219 2220 /** 2221 * ice_aq_set_rss_key 2222 * @hw: pointer to the hw struct 2223 * @vsi_handle: software VSI handle 2224 * @keys: pointer to key info struct 2225 * 2226 * set the RSS key per VSI 2227 */ 2228 enum ice_status 2229 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 2230 struct ice_aqc_get_set_rss_keys *keys) 2231 { 2232 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 2233 return ICE_ERR_PARAM; 2234 2235 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 2236 keys, true); 2237 } 2238 2239 /** 2240 * ice_aq_add_lan_txq 2241 * @hw: pointer to the hardware structure 2242 * @num_qgrps: Number of added queue groups 2243 * @qg_list: list of queue groups to be added 2244 * @buf_size: size of buffer for indirect command 2245 * @cd: pointer to command details structure or NULL 2246 * 2247 * Add Tx LAN queue (0x0C30) 2248 * 2249 * NOTE: 2250 * Prior to calling add Tx LAN queue: 2251 * Initialize the following as part of the Tx queue context: 2252 * Completion queue ID if the queue uses Completion queue, Quanta profile, 2253 * Cache profile and Packet shaper profile. 2254 * 2255 * After add Tx LAN queue AQ command is completed: 2256 * Interrupts should be associated with specific queues, 2257 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 2258 * flow. 2259 */ 2260 static enum ice_status 2261 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 2262 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 2263 struct ice_sq_cd *cd) 2264 { 2265 u16 i, sum_header_size, sum_q_size = 0; 2266 struct ice_aqc_add_tx_qgrp *list; 2267 struct ice_aqc_add_txqs *cmd; 2268 struct ice_aq_desc desc; 2269 2270 cmd = &desc.params.add_txqs; 2271 2272 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 2273 2274 if (!qg_list) 2275 return ICE_ERR_PARAM; 2276 2277 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 2278 return ICE_ERR_PARAM; 2279 2280 sum_header_size = num_qgrps * 2281 (sizeof(*qg_list) - sizeof(*qg_list->txqs)); 2282 2283 list = qg_list; 2284 for (i = 0; i < num_qgrps; i++) { 2285 struct ice_aqc_add_txqs_perq *q = list->txqs; 2286 2287 sum_q_size += list->num_txqs * sizeof(*q); 2288 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs); 2289 } 2290 2291 if (buf_size != (sum_header_size + sum_q_size)) 2292 return ICE_ERR_PARAM; 2293 2294 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2295 2296 cmd->num_qgrps = num_qgrps; 2297 2298 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 2299 } 2300 2301 /** 2302 * ice_aq_dis_lan_txq 2303 * @hw: pointer to the hardware structure 2304 * @num_qgrps: number of groups in the list 2305 * @qg_list: the list of groups to disable 2306 * @buf_size: the total size of the qg_list buffer in bytes 2307 * @rst_src: if called due to reset, specifies the RST source 2308 * @vmvf_num: the relative VM or VF number that is undergoing the reset 2309 * @cd: pointer to command details structure or NULL 2310 * 2311 * Disable LAN Tx queue (0x0C31) 2312 */ 2313 static enum ice_status 2314 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 2315 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 2316 enum ice_disq_rst_src rst_src, u16 vmvf_num, 2317 struct ice_sq_cd *cd) 2318 { 2319 struct ice_aqc_dis_txqs *cmd; 2320 struct ice_aq_desc desc; 2321 u16 i, sz = 0; 2322 2323 cmd = &desc.params.dis_txqs; 2324 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 2325 2326 /* qg_list can be NULL only in VM/VF reset flow */ 2327 if (!qg_list && !rst_src) 2328 return ICE_ERR_PARAM; 2329 2330 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 2331 return ICE_ERR_PARAM; 2332 2333 cmd->num_entries = num_qgrps; 2334 2335 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & 2336 ICE_AQC_Q_DIS_TIMEOUT_M); 2337 2338 switch (rst_src) { 2339 case ICE_VM_RESET: 2340 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 2341 cmd->vmvf_and_timeout |= 2342 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); 2343 break; 2344 case ICE_VF_RESET: 2345 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 2346 /* In this case, FW expects vmvf_num to be absolute VF id */ 2347 cmd->vmvf_and_timeout |= 2348 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) & 2349 ICE_AQC_Q_DIS_VMVF_NUM_M); 2350 break; 2351 case ICE_NO_RESET: 2352 default: 2353 break; 2354 } 2355 2356 /* If no queue group info, we are in a reset flow. Issue the AQ */ 2357 if (!qg_list) 2358 goto do_aq; 2359 2360 /* set RD bit to indicate that command buffer is provided by the driver 2361 * and it needs to be read by the firmware 2362 */ 2363 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2364 2365 for (i = 0; i < num_qgrps; ++i) { 2366 /* Calculate the size taken up by the queue IDs in this group */ 2367 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id); 2368 2369 /* Add the size of the group header */ 2370 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id); 2371 2372 /* If the num of queues is even, add 2 bytes of padding */ 2373 if ((qg_list[i].num_qs % 2) == 0) 2374 sz += 2; 2375 } 2376 2377 if (buf_size != sz) 2378 return ICE_ERR_PARAM; 2379 2380 do_aq: 2381 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 2382 } 2383 2384 /* End of FW Admin Queue command wrappers */ 2385 2386 /** 2387 * ice_write_byte - write a byte to a packed context structure 2388 * @src_ctx: the context structure to read from 2389 * @dest_ctx: the context to be written to 2390 * @ce_info: a description of the struct to be filled 2391 */ 2392 static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx, 2393 const struct ice_ctx_ele *ce_info) 2394 { 2395 u8 src_byte, dest_byte, mask; 2396 u8 *from, *dest; 2397 u16 shift_width; 2398 2399 /* copy from the next struct field */ 2400 from = src_ctx + ce_info->offset; 2401 2402 /* prepare the bits and mask */ 2403 shift_width = ce_info->lsb % 8; 2404 mask = (u8)(BIT(ce_info->width) - 1); 2405 2406 src_byte = *from; 2407 src_byte &= mask; 2408 2409 /* shift to correct alignment */ 2410 mask <<= shift_width; 2411 src_byte <<= shift_width; 2412 2413 /* get the current bits from the target bit string */ 2414 dest = dest_ctx + (ce_info->lsb / 8); 2415 2416 memcpy(&dest_byte, dest, sizeof(dest_byte)); 2417 2418 dest_byte &= ~mask; /* get the bits not changing */ 2419 dest_byte |= src_byte; /* add in the new bits */ 2420 2421 /* put it all back */ 2422 memcpy(dest, &dest_byte, sizeof(dest_byte)); 2423 } 2424 2425 /** 2426 * ice_write_word - write a word to a packed context structure 2427 * @src_ctx: the context structure to read from 2428 * @dest_ctx: the context to be written to 2429 * @ce_info: a description of the struct to be filled 2430 */ 2431 static void ice_write_word(u8 *src_ctx, u8 *dest_ctx, 2432 const struct ice_ctx_ele *ce_info) 2433 { 2434 u16 src_word, mask; 2435 __le16 dest_word; 2436 u8 *from, *dest; 2437 u16 shift_width; 2438 2439 /* copy from the next struct field */ 2440 from = src_ctx + ce_info->offset; 2441 2442 /* prepare the bits and mask */ 2443 shift_width = ce_info->lsb % 8; 2444 mask = BIT(ce_info->width) - 1; 2445 2446 /* don't swizzle the bits until after the mask because the mask bits 2447 * will be in a different bit position on big endian machines 2448 */ 2449 src_word = *(u16 *)from; 2450 src_word &= mask; 2451 2452 /* shift to correct alignment */ 2453 mask <<= shift_width; 2454 src_word <<= shift_width; 2455 2456 /* get the current bits from the target bit string */ 2457 dest = dest_ctx + (ce_info->lsb / 8); 2458 2459 memcpy(&dest_word, dest, sizeof(dest_word)); 2460 2461 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 2462 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 2463 2464 /* put it all back */ 2465 memcpy(dest, &dest_word, sizeof(dest_word)); 2466 } 2467 2468 /** 2469 * ice_write_dword - write a dword to a packed context structure 2470 * @src_ctx: the context structure to read from 2471 * @dest_ctx: the context to be written to 2472 * @ce_info: a description of the struct to be filled 2473 */ 2474 static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx, 2475 const struct ice_ctx_ele *ce_info) 2476 { 2477 u32 src_dword, mask; 2478 __le32 dest_dword; 2479 u8 *from, *dest; 2480 u16 shift_width; 2481 2482 /* copy from the next struct field */ 2483 from = src_ctx + ce_info->offset; 2484 2485 /* prepare the bits and mask */ 2486 shift_width = ce_info->lsb % 8; 2487 2488 /* if the field width is exactly 32 on an x86 machine, then the shift 2489 * operation will not work because the SHL instructions count is masked 2490 * to 5 bits so the shift will do nothing 2491 */ 2492 if (ce_info->width < 32) 2493 mask = BIT(ce_info->width) - 1; 2494 else 2495 mask = (u32)~0; 2496 2497 /* don't swizzle the bits until after the mask because the mask bits 2498 * will be in a different bit position on big endian machines 2499 */ 2500 src_dword = *(u32 *)from; 2501 src_dword &= mask; 2502 2503 /* shift to correct alignment */ 2504 mask <<= shift_width; 2505 src_dword <<= shift_width; 2506 2507 /* get the current bits from the target bit string */ 2508 dest = dest_ctx + (ce_info->lsb / 8); 2509 2510 memcpy(&dest_dword, dest, sizeof(dest_dword)); 2511 2512 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 2513 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 2514 2515 /* put it all back */ 2516 memcpy(dest, &dest_dword, sizeof(dest_dword)); 2517 } 2518 2519 /** 2520 * ice_write_qword - write a qword to a packed context structure 2521 * @src_ctx: the context structure to read from 2522 * @dest_ctx: the context to be written to 2523 * @ce_info: a description of the struct to be filled 2524 */ 2525 static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx, 2526 const struct ice_ctx_ele *ce_info) 2527 { 2528 u64 src_qword, mask; 2529 __le64 dest_qword; 2530 u8 *from, *dest; 2531 u16 shift_width; 2532 2533 /* copy from the next struct field */ 2534 from = src_ctx + ce_info->offset; 2535 2536 /* prepare the bits and mask */ 2537 shift_width = ce_info->lsb % 8; 2538 2539 /* if the field width is exactly 64 on an x86 machine, then the shift 2540 * operation will not work because the SHL instructions count is masked 2541 * to 6 bits so the shift will do nothing 2542 */ 2543 if (ce_info->width < 64) 2544 mask = BIT_ULL(ce_info->width) - 1; 2545 else 2546 mask = (u64)~0; 2547 2548 /* don't swizzle the bits until after the mask because the mask bits 2549 * will be in a different bit position on big endian machines 2550 */ 2551 src_qword = *(u64 *)from; 2552 src_qword &= mask; 2553 2554 /* shift to correct alignment */ 2555 mask <<= shift_width; 2556 src_qword <<= shift_width; 2557 2558 /* get the current bits from the target bit string */ 2559 dest = dest_ctx + (ce_info->lsb / 8); 2560 2561 memcpy(&dest_qword, dest, sizeof(dest_qword)); 2562 2563 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 2564 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 2565 2566 /* put it all back */ 2567 memcpy(dest, &dest_qword, sizeof(dest_qword)); 2568 } 2569 2570 /** 2571 * ice_set_ctx - set context bits in packed structure 2572 * @src_ctx: pointer to a generic non-packed context structure 2573 * @dest_ctx: pointer to memory for the packed structure 2574 * @ce_info: a description of the structure to be transformed 2575 */ 2576 enum ice_status 2577 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 2578 { 2579 int f; 2580 2581 for (f = 0; ce_info[f].width; f++) { 2582 /* We have to deal with each element of the FW response 2583 * using the correct size so that we are correct regardless 2584 * of the endianness of the machine. 2585 */ 2586 switch (ce_info[f].size_of) { 2587 case sizeof(u8): 2588 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); 2589 break; 2590 case sizeof(u16): 2591 ice_write_word(src_ctx, dest_ctx, &ce_info[f]); 2592 break; 2593 case sizeof(u32): 2594 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); 2595 break; 2596 case sizeof(u64): 2597 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); 2598 break; 2599 default: 2600 return ICE_ERR_INVAL_SIZE; 2601 } 2602 } 2603 2604 return 0; 2605 } 2606 2607 /** 2608 * ice_ena_vsi_txq 2609 * @pi: port information structure 2610 * @vsi_handle: software VSI handle 2611 * @tc: tc number 2612 * @num_qgrps: Number of added queue groups 2613 * @buf: list of queue groups to be added 2614 * @buf_size: size of buffer for indirect command 2615 * @cd: pointer to command details structure or NULL 2616 * 2617 * This function adds one lan q 2618 */ 2619 enum ice_status 2620 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, 2621 struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 2622 struct ice_sq_cd *cd) 2623 { 2624 struct ice_aqc_txsched_elem_data node = { 0 }; 2625 struct ice_sched_node *parent; 2626 enum ice_status status; 2627 struct ice_hw *hw; 2628 2629 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 2630 return ICE_ERR_CFG; 2631 2632 if (num_qgrps > 1 || buf->num_txqs > 1) 2633 return ICE_ERR_MAX_LIMIT; 2634 2635 hw = pi->hw; 2636 2637 if (!ice_is_vsi_valid(hw, vsi_handle)) 2638 return ICE_ERR_PARAM; 2639 2640 mutex_lock(&pi->sched_lock); 2641 2642 /* find a parent node */ 2643 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 2644 ICE_SCHED_NODE_OWNER_LAN); 2645 if (!parent) { 2646 status = ICE_ERR_PARAM; 2647 goto ena_txq_exit; 2648 } 2649 2650 buf->parent_teid = parent->info.node_teid; 2651 node.parent_teid = parent->info.node_teid; 2652 /* Mark that the values in the "generic" section as valid. The default 2653 * value in the "generic" section is zero. This means that : 2654 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 2655 * - 0 priority among siblings, indicated by Bit 1-3. 2656 * - WFQ, indicated by Bit 4. 2657 * - 0 Adjustment value is used in PSM credit update flow, indicated by 2658 * Bit 5-6. 2659 * - Bit 7 is reserved. 2660 * Without setting the generic section as valid in valid_sections, the 2661 * Admin Q command will fail with error code ICE_AQ_RC_EINVAL. 2662 */ 2663 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC; 2664 2665 /* add the lan q */ 2666 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 2667 if (status) 2668 goto ena_txq_exit; 2669 2670 node.node_teid = buf->txqs[0].q_teid; 2671 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 2672 2673 /* add a leaf node into schduler tree q layer */ 2674 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); 2675 2676 ena_txq_exit: 2677 mutex_unlock(&pi->sched_lock); 2678 return status; 2679 } 2680 2681 /** 2682 * ice_dis_vsi_txq 2683 * @pi: port information structure 2684 * @num_queues: number of queues 2685 * @q_ids: pointer to the q_id array 2686 * @q_teids: pointer to queue node teids 2687 * @rst_src: if called due to reset, specifies the RST source 2688 * @vmvf_num: the relative VM or VF number that is undergoing the reset 2689 * @cd: pointer to command details structure or NULL 2690 * 2691 * This function removes queues and their corresponding nodes in SW DB 2692 */ 2693 enum ice_status 2694 ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, 2695 u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, 2696 struct ice_sq_cd *cd) 2697 { 2698 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 2699 struct ice_aqc_dis_txq_item qg_list; 2700 u16 i; 2701 2702 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 2703 return ICE_ERR_CFG; 2704 2705 /* if queue is disabled already yet the disable queue command has to be 2706 * sent to complete the VF reset, then call ice_aq_dis_lan_txq without 2707 * any queue information 2708 */ 2709 2710 if (!num_queues && rst_src) 2711 return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num, 2712 NULL); 2713 2714 mutex_lock(&pi->sched_lock); 2715 2716 for (i = 0; i < num_queues; i++) { 2717 struct ice_sched_node *node; 2718 2719 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 2720 if (!node) 2721 continue; 2722 qg_list.parent_teid = node->info.parent_teid; 2723 qg_list.num_qs = 1; 2724 qg_list.q_id[0] = cpu_to_le16(q_ids[i]); 2725 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list, 2726 sizeof(qg_list), rst_src, vmvf_num, 2727 cd); 2728 2729 if (status) 2730 break; 2731 ice_free_sched_node(pi, node); 2732 } 2733 mutex_unlock(&pi->sched_lock); 2734 return status; 2735 } 2736 2737 /** 2738 * ice_cfg_vsi_qs - configure the new/exisiting VSI queues 2739 * @pi: port information structure 2740 * @vsi_handle: software VSI handle 2741 * @tc_bitmap: TC bitmap 2742 * @maxqs: max queues array per TC 2743 * @owner: lan or rdma 2744 * 2745 * This function adds/updates the VSI queues per TC. 2746 */ 2747 static enum ice_status 2748 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 2749 u16 *maxqs, u8 owner) 2750 { 2751 enum ice_status status = 0; 2752 u8 i; 2753 2754 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 2755 return ICE_ERR_CFG; 2756 2757 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 2758 return ICE_ERR_PARAM; 2759 2760 mutex_lock(&pi->sched_lock); 2761 2762 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { 2763 /* configuration is possible only if TC node is present */ 2764 if (!ice_sched_get_tc_node(pi, i)) 2765 continue; 2766 2767 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 2768 ice_is_tc_ena(tc_bitmap, i)); 2769 if (status) 2770 break; 2771 } 2772 2773 mutex_unlock(&pi->sched_lock); 2774 return status; 2775 } 2776 2777 /** 2778 * ice_cfg_vsi_lan - configure VSI lan queues 2779 * @pi: port information structure 2780 * @vsi_handle: software VSI handle 2781 * @tc_bitmap: TC bitmap 2782 * @max_lanqs: max lan queues array per TC 2783 * 2784 * This function adds/updates the VSI lan queues per TC. 2785 */ 2786 enum ice_status 2787 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 2788 u16 *max_lanqs) 2789 { 2790 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 2791 ICE_SCHED_NODE_OWNER_LAN); 2792 } 2793 2794 /** 2795 * ice_replay_pre_init - replay pre initialization 2796 * @hw: pointer to the hw struct 2797 * 2798 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 2799 */ 2800 static enum ice_status ice_replay_pre_init(struct ice_hw *hw) 2801 { 2802 struct ice_switch_info *sw = hw->switch_info; 2803 u8 i; 2804 2805 /* Delete old entries from replay filter list head if there is any */ 2806 ice_rm_all_sw_replay_rule_info(hw); 2807 /* In start of replay, move entries into replay_rules list, it 2808 * will allow adding rules entries back to filt_rules list, 2809 * which is operational list. 2810 */ 2811 for (i = 0; i < ICE_SW_LKUP_LAST; i++) 2812 list_replace_init(&sw->recp_list[i].filt_rules, 2813 &sw->recp_list[i].filt_replay_rules); 2814 2815 return 0; 2816 } 2817 2818 /** 2819 * ice_replay_vsi - replay VSI configuration 2820 * @hw: pointer to the hw struct 2821 * @vsi_handle: driver VSI handle 2822 * 2823 * Restore all VSI configuration after reset. It is required to call this 2824 * function with main VSI first. 2825 */ 2826 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 2827 { 2828 enum ice_status status; 2829 2830 if (!ice_is_vsi_valid(hw, vsi_handle)) 2831 return ICE_ERR_PARAM; 2832 2833 /* Replay pre-initialization if there is any */ 2834 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 2835 status = ice_replay_pre_init(hw); 2836 if (status) 2837 return status; 2838 } 2839 2840 /* Replay per VSI all filters */ 2841 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 2842 return status; 2843 } 2844 2845 /** 2846 * ice_replay_post - post replay configuration cleanup 2847 * @hw: pointer to the hw struct 2848 * 2849 * Post replay cleanup. 2850 */ 2851 void ice_replay_post(struct ice_hw *hw) 2852 { 2853 /* Delete old entries from replay filter list head */ 2854 ice_rm_all_sw_replay_rule_info(hw); 2855 } 2856 2857 /** 2858 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 2859 * @hw: ptr to the hardware info 2860 * @hireg: high 32 bit HW register to read from 2861 * @loreg: low 32 bit HW register to read from 2862 * @prev_stat_loaded: bool to specify if previous stats are loaded 2863 * @prev_stat: ptr to previous loaded stat value 2864 * @cur_stat: ptr to current stat value 2865 */ 2866 void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, 2867 bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat) 2868 { 2869 u64 new_data; 2870 2871 new_data = rd32(hw, loreg); 2872 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 2873 2874 /* device stats are not reset at PFR, they likely will not be zeroed 2875 * when the driver starts. So save the first values read and use them as 2876 * offsets to be subtracted from the raw values in order to report stats 2877 * that count from zero. 2878 */ 2879 if (!prev_stat_loaded) 2880 *prev_stat = new_data; 2881 if (new_data >= *prev_stat) 2882 *cur_stat = new_data - *prev_stat; 2883 else 2884 /* to manage the potential roll-over */ 2885 *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat; 2886 *cur_stat &= 0xFFFFFFFFFFULL; 2887 } 2888 2889 /** 2890 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 2891 * @hw: ptr to the hardware info 2892 * @reg: HW register to read from 2893 * @prev_stat_loaded: bool to specify if previous stats are loaded 2894 * @prev_stat: ptr to previous loaded stat value 2895 * @cur_stat: ptr to current stat value 2896 */ 2897 void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 2898 u64 *prev_stat, u64 *cur_stat) 2899 { 2900 u32 new_data; 2901 2902 new_data = rd32(hw, reg); 2903 2904 /* device stats are not reset at PFR, they likely will not be zeroed 2905 * when the driver starts. So save the first values read and use them as 2906 * offsets to be subtracted from the raw values in order to report stats 2907 * that count from zero. 2908 */ 2909 if (!prev_stat_loaded) 2910 *prev_stat = new_data; 2911 if (new_data >= *prev_stat) 2912 *cur_stat = new_data - *prev_stat; 2913 else 2914 /* to manage the potential roll-over */ 2915 *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat; 2916 } 2917