1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 8 #define ICE_PF_RESET_WAIT_COUNT 200 9 10 #define ICE_NIC_FLX_ENTRY(hw, mdid, idx) \ 11 wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(ICE_RXDID_FLEX_NIC), \ 12 ((ICE_RX_OPC_MDID << \ 13 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \ 14 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \ 15 (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \ 16 GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M)) 17 18 #define ICE_NIC_FLX_FLG_ENTRY(hw, flg_0, flg_1, flg_2, flg_3, idx) \ 19 wr32((hw), GLFLXP_RXDID_FLAGS(ICE_RXDID_FLEX_NIC, idx), \ 20 (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \ 21 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \ 22 (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \ 23 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \ 24 (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \ 25 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \ 26 (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \ 27 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M)) 28 29 /** 30 * ice_set_mac_type - Sets MAC type 31 * @hw: pointer to the HW structure 32 * 33 * This function sets the MAC type of the adapter based on the 34 * vendor ID and device ID stored in the hw structure. 35 */ 36 static enum ice_status ice_set_mac_type(struct ice_hw *hw) 37 { 38 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 39 return ICE_ERR_DEVICE_NOT_SUPPORTED; 40 41 hw->mac_type = ICE_MAC_GENERIC; 42 return 0; 43 } 44 45 /** 46 * ice_clear_pf_cfg - Clear PF configuration 47 * @hw: pointer to the hardware structure 48 */ 49 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) 50 { 51 struct ice_aq_desc desc; 52 53 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 54 55 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 56 } 57 58 /** 59 * ice_aq_manage_mac_read - manage MAC address read command 60 * @hw: pointer to the hw struct 61 * @buf: a virtual buffer to hold the manage MAC read response 62 * @buf_size: Size of the virtual buffer 63 * @cd: pointer to command details structure or NULL 64 * 65 * This function is used to return per PF station MAC address (0x0107). 66 * NOTE: Upon successful completion of this command, MAC address information 67 * is returned in user specified buffer. Please interpret user specified 68 * buffer as "manage_mac_read" response. 69 * Response such as various MAC addresses are stored in HW struct (port.mac) 70 * ice_aq_discover_caps is expected to be called before this function is called. 71 */ 72 static enum ice_status 73 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 74 struct ice_sq_cd *cd) 75 { 76 struct ice_aqc_manage_mac_read_resp *resp; 77 struct ice_aqc_manage_mac_read *cmd; 78 struct ice_aq_desc desc; 79 enum ice_status status; 80 u16 flags; 81 82 cmd = &desc.params.mac_read; 83 84 if (buf_size < sizeof(*resp)) 85 return ICE_ERR_BUF_TOO_SHORT; 86 87 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 88 89 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 90 if (status) 91 return status; 92 93 resp = (struct ice_aqc_manage_mac_read_resp *)buf; 94 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 95 96 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 97 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 98 return ICE_ERR_CFG; 99 } 100 101 ether_addr_copy(hw->port_info->mac.lan_addr, resp->mac_addr); 102 ether_addr_copy(hw->port_info->mac.perm_addr, resp->mac_addr); 103 return 0; 104 } 105 106 /** 107 * ice_aq_get_phy_caps - returns PHY capabilities 108 * @pi: port information structure 109 * @qual_mods: report qualified modules 110 * @report_mode: report mode capabilities 111 * @pcaps: structure for PHY capabilities to be filled 112 * @cd: pointer to command details structure or NULL 113 * 114 * Returns the various PHY capabilities supported on the Port (0x0600) 115 */ 116 static enum ice_status 117 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 118 struct ice_aqc_get_phy_caps_data *pcaps, 119 struct ice_sq_cd *cd) 120 { 121 struct ice_aqc_get_phy_caps *cmd; 122 u16 pcaps_size = sizeof(*pcaps); 123 struct ice_aq_desc desc; 124 enum ice_status status; 125 126 cmd = &desc.params.get_phy; 127 128 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 129 return ICE_ERR_PARAM; 130 131 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 132 133 if (qual_mods) 134 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 135 136 cmd->param0 |= cpu_to_le16(report_mode); 137 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd); 138 139 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) 140 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 141 142 return status; 143 } 144 145 /** 146 * ice_get_media_type - Gets media type 147 * @pi: port information structure 148 */ 149 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 150 { 151 struct ice_link_status *hw_link_info; 152 153 if (!pi) 154 return ICE_MEDIA_UNKNOWN; 155 156 hw_link_info = &pi->phy.link_info; 157 158 if (hw_link_info->phy_type_low) { 159 switch (hw_link_info->phy_type_low) { 160 case ICE_PHY_TYPE_LOW_1000BASE_SX: 161 case ICE_PHY_TYPE_LOW_1000BASE_LX: 162 case ICE_PHY_TYPE_LOW_10GBASE_SR: 163 case ICE_PHY_TYPE_LOW_10GBASE_LR: 164 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 165 case ICE_PHY_TYPE_LOW_25GBASE_SR: 166 case ICE_PHY_TYPE_LOW_25GBASE_LR: 167 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 168 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 169 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 170 return ICE_MEDIA_FIBER; 171 case ICE_PHY_TYPE_LOW_100BASE_TX: 172 case ICE_PHY_TYPE_LOW_1000BASE_T: 173 case ICE_PHY_TYPE_LOW_2500BASE_T: 174 case ICE_PHY_TYPE_LOW_5GBASE_T: 175 case ICE_PHY_TYPE_LOW_10GBASE_T: 176 case ICE_PHY_TYPE_LOW_25GBASE_T: 177 return ICE_MEDIA_BASET; 178 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 179 case ICE_PHY_TYPE_LOW_25GBASE_CR: 180 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 181 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 182 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 183 return ICE_MEDIA_DA; 184 case ICE_PHY_TYPE_LOW_1000BASE_KX: 185 case ICE_PHY_TYPE_LOW_2500BASE_KX: 186 case ICE_PHY_TYPE_LOW_2500BASE_X: 187 case ICE_PHY_TYPE_LOW_5GBASE_KR: 188 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 189 case ICE_PHY_TYPE_LOW_25GBASE_KR: 190 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 191 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 192 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 193 return ICE_MEDIA_BACKPLANE; 194 } 195 } 196 197 return ICE_MEDIA_UNKNOWN; 198 } 199 200 /** 201 * ice_aq_get_link_info 202 * @pi: port information structure 203 * @ena_lse: enable/disable LinkStatusEvent reporting 204 * @link: pointer to link status structure - optional 205 * @cd: pointer to command details structure or NULL 206 * 207 * Get Link Status (0x607). Returns the link status of the adapter. 208 */ 209 enum ice_status 210 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 211 struct ice_link_status *link, struct ice_sq_cd *cd) 212 { 213 struct ice_link_status *hw_link_info_old, *hw_link_info; 214 struct ice_aqc_get_link_status_data link_data = { 0 }; 215 struct ice_aqc_get_link_status *resp; 216 enum ice_media_type *hw_media_type; 217 struct ice_fc_info *hw_fc_info; 218 bool tx_pause, rx_pause; 219 struct ice_aq_desc desc; 220 enum ice_status status; 221 u16 cmd_flags; 222 223 if (!pi) 224 return ICE_ERR_PARAM; 225 hw_link_info_old = &pi->phy.link_info_old; 226 hw_media_type = &pi->phy.media_type; 227 hw_link_info = &pi->phy.link_info; 228 hw_fc_info = &pi->fc; 229 230 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 231 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 232 resp = &desc.params.get_link_status; 233 resp->cmd_flags = cpu_to_le16(cmd_flags); 234 resp->lport_num = pi->lport; 235 236 status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data), 237 cd); 238 239 if (status) 240 return status; 241 242 /* save off old link status information */ 243 *hw_link_info_old = *hw_link_info; 244 245 /* update current link status information */ 246 hw_link_info->link_speed = le16_to_cpu(link_data.link_speed); 247 hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low); 248 *hw_media_type = ice_get_media_type(pi); 249 hw_link_info->link_info = link_data.link_info; 250 hw_link_info->an_info = link_data.an_info; 251 hw_link_info->ext_info = link_data.ext_info; 252 hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size); 253 hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M; 254 255 /* update fc info */ 256 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 257 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 258 if (tx_pause && rx_pause) 259 hw_fc_info->current_mode = ICE_FC_FULL; 260 else if (tx_pause) 261 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 262 else if (rx_pause) 263 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 264 else 265 hw_fc_info->current_mode = ICE_FC_NONE; 266 267 hw_link_info->lse_ena = 268 !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 269 270 /* save link status information */ 271 if (link) 272 *link = *hw_link_info; 273 274 /* flag cleared so calling functions don't call AQ again */ 275 pi->phy.get_link_info = false; 276 277 return status; 278 } 279 280 /** 281 * ice_init_flex_parser - initialize rx flex parser 282 * @hw: pointer to the hardware structure 283 * 284 * Function to initialize flex descriptors 285 */ 286 static void ice_init_flex_parser(struct ice_hw *hw) 287 { 288 u8 idx = 0; 289 290 ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_LOW, 0); 291 ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_HIGH, 1); 292 ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_LOWER, 2); 293 ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_HIGH, 3); 294 ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_FRG, ICE_RXFLG_UDP_GRE, 295 ICE_RXFLG_PKT_DSI, ICE_RXFLG_FIN, idx++); 296 ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_SYN, ICE_RXFLG_RST, 297 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++); 298 ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, 299 ICE_RXFLG_EVLAN_x8100, ICE_RXFLG_EVLAN_x9100, 300 idx++); 301 ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_VLAN_x8100, ICE_RXFLG_TNL_VLAN, 302 ICE_RXFLG_TNL_MAC, ICE_RXFLG_TNL0, idx++); 303 ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2, 304 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx); 305 } 306 307 /** 308 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 309 * @hw: pointer to the hw struct 310 */ 311 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) 312 { 313 struct ice_switch_info *sw; 314 315 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 316 sizeof(*hw->switch_info), GFP_KERNEL); 317 sw = hw->switch_info; 318 319 if (!sw) 320 return ICE_ERR_NO_MEMORY; 321 322 INIT_LIST_HEAD(&sw->vsi_list_map_head); 323 324 mutex_init(&sw->mac_list_lock); 325 INIT_LIST_HEAD(&sw->mac_list_head); 326 327 mutex_init(&sw->vlan_list_lock); 328 INIT_LIST_HEAD(&sw->vlan_list_head); 329 330 mutex_init(&sw->eth_m_list_lock); 331 INIT_LIST_HEAD(&sw->eth_m_list_head); 332 333 mutex_init(&sw->promisc_list_lock); 334 INIT_LIST_HEAD(&sw->promisc_list_head); 335 336 mutex_init(&sw->mac_vlan_list_lock); 337 INIT_LIST_HEAD(&sw->mac_vlan_list_head); 338 339 return 0; 340 } 341 342 /** 343 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 344 * @hw: pointer to the hw struct 345 */ 346 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 347 { 348 struct ice_switch_info *sw = hw->switch_info; 349 struct ice_vsi_list_map_info *v_pos_map; 350 struct ice_vsi_list_map_info *v_tmp_map; 351 352 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 353 list_entry) { 354 list_del(&v_pos_map->list_entry); 355 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 356 } 357 358 mutex_destroy(&sw->mac_list_lock); 359 mutex_destroy(&sw->vlan_list_lock); 360 mutex_destroy(&sw->eth_m_list_lock); 361 mutex_destroy(&sw->promisc_list_lock); 362 mutex_destroy(&sw->mac_vlan_list_lock); 363 364 devm_kfree(ice_hw_to_dev(hw), sw); 365 } 366 367 /** 368 * ice_init_hw - main hardware initialization routine 369 * @hw: pointer to the hardware structure 370 */ 371 enum ice_status ice_init_hw(struct ice_hw *hw) 372 { 373 struct ice_aqc_get_phy_caps_data *pcaps; 374 enum ice_status status; 375 u16 mac_buf_len; 376 void *mac_buf; 377 378 /* Set MAC type based on DeviceID */ 379 status = ice_set_mac_type(hw); 380 if (status) 381 return status; 382 383 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & 384 PF_FUNC_RID_FUNC_NUM_M) >> 385 PF_FUNC_RID_FUNC_NUM_S; 386 387 status = ice_reset(hw, ICE_RESET_PFR); 388 if (status) 389 return status; 390 391 /* set these values to minimum allowed */ 392 hw->itr_gran_200 = ICE_ITR_GRAN_MIN_200; 393 hw->itr_gran_100 = ICE_ITR_GRAN_MIN_100; 394 hw->itr_gran_50 = ICE_ITR_GRAN_MIN_50; 395 hw->itr_gran_25 = ICE_ITR_GRAN_MIN_25; 396 397 status = ice_init_all_ctrlq(hw); 398 if (status) 399 goto err_unroll_cqinit; 400 401 status = ice_clear_pf_cfg(hw); 402 if (status) 403 goto err_unroll_cqinit; 404 405 ice_clear_pxe_mode(hw); 406 407 status = ice_init_nvm(hw); 408 if (status) 409 goto err_unroll_cqinit; 410 411 status = ice_get_caps(hw); 412 if (status) 413 goto err_unroll_cqinit; 414 415 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 416 sizeof(*hw->port_info), GFP_KERNEL); 417 if (!hw->port_info) { 418 status = ICE_ERR_NO_MEMORY; 419 goto err_unroll_cqinit; 420 } 421 422 /* set the back pointer to hw */ 423 hw->port_info->hw = hw; 424 425 /* Initialize port_info struct with switch configuration data */ 426 status = ice_get_initial_sw_cfg(hw); 427 if (status) 428 goto err_unroll_alloc; 429 430 hw->evb_veb = true; 431 432 /* Query the allocated resources for tx scheduler */ 433 status = ice_sched_query_res_alloc(hw); 434 if (status) { 435 ice_debug(hw, ICE_DBG_SCHED, 436 "Failed to get scheduler allocated resources\n"); 437 goto err_unroll_alloc; 438 } 439 440 /* Initialize port_info struct with scheduler data */ 441 status = ice_sched_init_port(hw->port_info); 442 if (status) 443 goto err_unroll_sched; 444 445 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 446 if (!pcaps) { 447 status = ICE_ERR_NO_MEMORY; 448 goto err_unroll_sched; 449 } 450 451 /* Initialize port_info struct with PHY capabilities */ 452 status = ice_aq_get_phy_caps(hw->port_info, false, 453 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); 454 devm_kfree(ice_hw_to_dev(hw), pcaps); 455 if (status) 456 goto err_unroll_sched; 457 458 /* Initialize port_info struct with link information */ 459 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 460 if (status) 461 goto err_unroll_sched; 462 463 status = ice_init_fltr_mgmt_struct(hw); 464 if (status) 465 goto err_unroll_sched; 466 467 /* Get port MAC information */ 468 mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp); 469 mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL); 470 471 if (!mac_buf) { 472 status = ICE_ERR_NO_MEMORY; 473 goto err_unroll_fltr_mgmt_struct; 474 } 475 476 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 477 devm_kfree(ice_hw_to_dev(hw), mac_buf); 478 479 if (status) 480 goto err_unroll_fltr_mgmt_struct; 481 482 ice_init_flex_parser(hw); 483 484 return 0; 485 486 err_unroll_fltr_mgmt_struct: 487 ice_cleanup_fltr_mgmt_struct(hw); 488 err_unroll_sched: 489 ice_sched_cleanup_all(hw); 490 err_unroll_alloc: 491 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 492 err_unroll_cqinit: 493 ice_shutdown_all_ctrlq(hw); 494 return status; 495 } 496 497 /** 498 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 499 * @hw: pointer to the hardware structure 500 */ 501 void ice_deinit_hw(struct ice_hw *hw) 502 { 503 ice_sched_cleanup_all(hw); 504 ice_shutdown_all_ctrlq(hw); 505 506 if (hw->port_info) { 507 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 508 hw->port_info = NULL; 509 } 510 511 ice_cleanup_fltr_mgmt_struct(hw); 512 } 513 514 /** 515 * ice_check_reset - Check to see if a global reset is complete 516 * @hw: pointer to the hardware structure 517 */ 518 enum ice_status ice_check_reset(struct ice_hw *hw) 519 { 520 u32 cnt, reg = 0, grst_delay; 521 522 /* Poll for Device Active state in case a recent CORER, GLOBR, 523 * or EMPR has occurred. The grst delay value is in 100ms units. 524 * Add 1sec for outstanding AQ commands that can take a long time. 525 */ 526 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> 527 GLGEN_RSTCTL_GRSTDEL_S) + 10; 528 529 for (cnt = 0; cnt < grst_delay; cnt++) { 530 mdelay(100); 531 reg = rd32(hw, GLGEN_RSTAT); 532 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 533 break; 534 } 535 536 if (cnt == grst_delay) { 537 ice_debug(hw, ICE_DBG_INIT, 538 "Global reset polling failed to complete.\n"); 539 return ICE_ERR_RESET_FAILED; 540 } 541 542 #define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \ 543 GLNVM_ULD_GLOBR_DONE_M) 544 545 /* Device is Active; check Global Reset processes are done */ 546 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 547 reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK; 548 if (reg == ICE_RESET_DONE_MASK) { 549 ice_debug(hw, ICE_DBG_INIT, 550 "Global reset processes done. %d\n", cnt); 551 break; 552 } 553 mdelay(10); 554 } 555 556 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 557 ice_debug(hw, ICE_DBG_INIT, 558 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 559 reg); 560 return ICE_ERR_RESET_FAILED; 561 } 562 563 return 0; 564 } 565 566 /** 567 * ice_pf_reset - Reset the PF 568 * @hw: pointer to the hardware structure 569 * 570 * If a global reset has been triggered, this function checks 571 * for its completion and then issues the PF reset 572 */ 573 static enum ice_status ice_pf_reset(struct ice_hw *hw) 574 { 575 u32 cnt, reg; 576 577 /* If at function entry a global reset was already in progress, i.e. 578 * state is not 'device active' or any of the reset done bits are not 579 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 580 * global reset is done. 581 */ 582 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 583 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 584 /* poll on global reset currently in progress until done */ 585 if (ice_check_reset(hw)) 586 return ICE_ERR_RESET_FAILED; 587 588 return 0; 589 } 590 591 /* Reset the PF */ 592 reg = rd32(hw, PFGEN_CTRL); 593 594 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 595 596 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 597 reg = rd32(hw, PFGEN_CTRL); 598 if (!(reg & PFGEN_CTRL_PFSWR_M)) 599 break; 600 601 mdelay(1); 602 } 603 604 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 605 ice_debug(hw, ICE_DBG_INIT, 606 "PF reset polling failed to complete.\n"); 607 return ICE_ERR_RESET_FAILED; 608 } 609 610 return 0; 611 } 612 613 /** 614 * ice_reset - Perform different types of reset 615 * @hw: pointer to the hardware structure 616 * @req: reset request 617 * 618 * This function triggers a reset as specified by the req parameter. 619 * 620 * Note: 621 * If anything other than a PF reset is triggered, PXE mode is restored. 622 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 623 * interface has been restored in the rebuild flow. 624 */ 625 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) 626 { 627 u32 val = 0; 628 629 switch (req) { 630 case ICE_RESET_PFR: 631 return ice_pf_reset(hw); 632 case ICE_RESET_CORER: 633 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 634 val = GLGEN_RTRIG_CORER_M; 635 break; 636 case ICE_RESET_GLOBR: 637 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 638 val = GLGEN_RTRIG_GLOBR_M; 639 break; 640 } 641 642 val |= rd32(hw, GLGEN_RTRIG); 643 wr32(hw, GLGEN_RTRIG, val); 644 ice_flush(hw); 645 646 /* wait for the FW to be ready */ 647 return ice_check_reset(hw); 648 } 649 650 /** 651 * ice_copy_rxq_ctx_to_hw 652 * @hw: pointer to the hardware structure 653 * @ice_rxq_ctx: pointer to the rxq context 654 * @rxq_index: the index of the rx queue 655 * 656 * Copies rxq context from dense structure to hw register space 657 */ 658 static enum ice_status 659 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 660 { 661 u8 i; 662 663 if (!ice_rxq_ctx) 664 return ICE_ERR_BAD_PTR; 665 666 if (rxq_index > QRX_CTRL_MAX_INDEX) 667 return ICE_ERR_PARAM; 668 669 /* Copy each dword separately to hw */ 670 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 671 wr32(hw, QRX_CONTEXT(i, rxq_index), 672 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 673 674 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 675 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 676 } 677 678 return 0; 679 } 680 681 /* LAN Rx Queue Context */ 682 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 683 /* Field Width LSB */ 684 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 685 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 686 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 687 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 688 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 689 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 690 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 691 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 692 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 693 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 694 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 695 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 696 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 697 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 698 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 699 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 700 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 701 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 702 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 703 { 0 } 704 }; 705 706 /** 707 * ice_write_rxq_ctx 708 * @hw: pointer to the hardware structure 709 * @rlan_ctx: pointer to the rxq context 710 * @rxq_index: the index of the rx queue 711 * 712 * Converts rxq context from sparse to dense structure and then writes 713 * it to hw register space 714 */ 715 enum ice_status 716 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 717 u32 rxq_index) 718 { 719 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 720 721 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 722 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 723 } 724 725 /* LAN Tx Queue Context */ 726 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 727 /* Field Width LSB */ 728 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 729 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 730 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 731 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 732 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 733 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 734 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 735 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 736 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 737 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 738 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 739 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 740 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 741 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 742 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 743 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 744 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 745 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 746 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 747 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 748 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 749 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 750 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 751 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 752 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 753 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 754 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 110, 171), 755 { 0 } 756 }; 757 758 /** 759 * ice_debug_cq 760 * @hw: pointer to the hardware structure 761 * @mask: debug mask 762 * @desc: pointer to control queue descriptor 763 * @buf: pointer to command buffer 764 * @buf_len: max length of buf 765 * 766 * Dumps debug log about control command with descriptor contents. 767 */ 768 void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, 769 void *buf, u16 buf_len) 770 { 771 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc; 772 u16 len; 773 774 #ifndef CONFIG_DYNAMIC_DEBUG 775 if (!(mask & hw->debug_mask)) 776 return; 777 #endif 778 779 if (!desc) 780 return; 781 782 len = le16_to_cpu(cq_desc->datalen); 783 784 ice_debug(hw, mask, 785 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 786 le16_to_cpu(cq_desc->opcode), 787 le16_to_cpu(cq_desc->flags), 788 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval)); 789 ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", 790 le32_to_cpu(cq_desc->cookie_high), 791 le32_to_cpu(cq_desc->cookie_low)); 792 ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", 793 le32_to_cpu(cq_desc->params.generic.param0), 794 le32_to_cpu(cq_desc->params.generic.param1)); 795 ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", 796 le32_to_cpu(cq_desc->params.generic.addr_high), 797 le32_to_cpu(cq_desc->params.generic.addr_low)); 798 if (buf && cq_desc->datalen != 0) { 799 ice_debug(hw, mask, "Buffer:\n"); 800 if (buf_len < len) 801 len = buf_len; 802 803 ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len); 804 } 805 } 806 807 /* FW Admin Queue command wrappers */ 808 809 /** 810 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 811 * @hw: pointer to the hw struct 812 * @desc: descriptor describing the command 813 * @buf: buffer to use for indirect commands (NULL for direct commands) 814 * @buf_size: size of buffer for indirect commands (0 for direct commands) 815 * @cd: pointer to command details structure 816 * 817 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 818 */ 819 enum ice_status 820 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 821 u16 buf_size, struct ice_sq_cd *cd) 822 { 823 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); 824 } 825 826 /** 827 * ice_aq_get_fw_ver 828 * @hw: pointer to the hw struct 829 * @cd: pointer to command details structure or NULL 830 * 831 * Get the firmware version (0x0001) from the admin queue commands 832 */ 833 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 834 { 835 struct ice_aqc_get_ver *resp; 836 struct ice_aq_desc desc; 837 enum ice_status status; 838 839 resp = &desc.params.get_ver; 840 841 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 842 843 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 844 845 if (!status) { 846 hw->fw_branch = resp->fw_branch; 847 hw->fw_maj_ver = resp->fw_major; 848 hw->fw_min_ver = resp->fw_minor; 849 hw->fw_patch = resp->fw_patch; 850 hw->fw_build = le32_to_cpu(resp->fw_build); 851 hw->api_branch = resp->api_branch; 852 hw->api_maj_ver = resp->api_major; 853 hw->api_min_ver = resp->api_minor; 854 hw->api_patch = resp->api_patch; 855 } 856 857 return status; 858 } 859 860 /** 861 * ice_aq_q_shutdown 862 * @hw: pointer to the hw struct 863 * @unloading: is the driver unloading itself 864 * 865 * Tell the Firmware that we're shutting down the AdminQ and whether 866 * or not the driver is unloading as well (0x0003). 867 */ 868 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 869 { 870 struct ice_aqc_q_shutdown *cmd; 871 struct ice_aq_desc desc; 872 873 cmd = &desc.params.q_shutdown; 874 875 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 876 877 if (unloading) 878 cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING); 879 880 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 881 } 882 883 /** 884 * ice_aq_req_res 885 * @hw: pointer to the hw struct 886 * @res: resource id 887 * @access: access type 888 * @sdp_number: resource number 889 * @timeout: the maximum time in ms that the driver may hold the resource 890 * @cd: pointer to command details structure or NULL 891 * 892 * requests common resource using the admin queue commands (0x0008) 893 */ 894 static enum ice_status 895 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 896 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 897 struct ice_sq_cd *cd) 898 { 899 struct ice_aqc_req_res *cmd_resp; 900 struct ice_aq_desc desc; 901 enum ice_status status; 902 903 cmd_resp = &desc.params.res_owner; 904 905 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 906 907 cmd_resp->res_id = cpu_to_le16(res); 908 cmd_resp->access_type = cpu_to_le16(access); 909 cmd_resp->res_number = cpu_to_le32(sdp_number); 910 911 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 912 /* The completion specifies the maximum time in ms that the driver 913 * may hold the resource in the Timeout field. 914 * If the resource is held by someone else, the command completes with 915 * busy return value and the timeout field indicates the maximum time 916 * the current owner of the resource has to free it. 917 */ 918 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 919 *timeout = le32_to_cpu(cmd_resp->timeout); 920 921 return status; 922 } 923 924 /** 925 * ice_aq_release_res 926 * @hw: pointer to the hw struct 927 * @res: resource id 928 * @sdp_number: resource number 929 * @cd: pointer to command details structure or NULL 930 * 931 * release common resource using the admin queue commands (0x0009) 932 */ 933 static enum ice_status 934 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 935 struct ice_sq_cd *cd) 936 { 937 struct ice_aqc_req_res *cmd; 938 struct ice_aq_desc desc; 939 940 cmd = &desc.params.res_owner; 941 942 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 943 944 cmd->res_id = cpu_to_le16(res); 945 cmd->res_number = cpu_to_le32(sdp_number); 946 947 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 948 } 949 950 /** 951 * ice_acquire_res 952 * @hw: pointer to the HW structure 953 * @res: resource id 954 * @access: access type (read or write) 955 * 956 * This function will attempt to acquire the ownership of a resource. 957 */ 958 enum ice_status 959 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 960 enum ice_aq_res_access_type access) 961 { 962 #define ICE_RES_POLLING_DELAY_MS 10 963 u32 delay = ICE_RES_POLLING_DELAY_MS; 964 enum ice_status status; 965 u32 time_left = 0; 966 u32 timeout; 967 968 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 969 970 /* An admin queue return code of ICE_AQ_RC_EEXIST means that another 971 * driver has previously acquired the resource and performed any 972 * necessary updates; in this case the caller does not obtain the 973 * resource and has no further work to do. 974 */ 975 if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) { 976 status = ICE_ERR_AQ_NO_WORK; 977 goto ice_acquire_res_exit; 978 } 979 980 if (status) 981 ice_debug(hw, ICE_DBG_RES, 982 "resource %d acquire type %d failed.\n", res, access); 983 984 /* If necessary, poll until the current lock owner timeouts */ 985 timeout = time_left; 986 while (status && timeout && time_left) { 987 mdelay(delay); 988 timeout = (timeout > delay) ? timeout - delay : 0; 989 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 990 991 if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) { 992 /* lock free, but no work to do */ 993 status = ICE_ERR_AQ_NO_WORK; 994 break; 995 } 996 997 if (!status) 998 /* lock acquired */ 999 break; 1000 } 1001 if (status && status != ICE_ERR_AQ_NO_WORK) 1002 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1003 1004 ice_acquire_res_exit: 1005 if (status == ICE_ERR_AQ_NO_WORK) { 1006 if (access == ICE_RES_WRITE) 1007 ice_debug(hw, ICE_DBG_RES, 1008 "resource indicates no work to do.\n"); 1009 else 1010 ice_debug(hw, ICE_DBG_RES, 1011 "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); 1012 } 1013 return status; 1014 } 1015 1016 /** 1017 * ice_release_res 1018 * @hw: pointer to the HW structure 1019 * @res: resource id 1020 * 1021 * This function will release a resource using the proper Admin Command. 1022 */ 1023 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1024 { 1025 enum ice_status status; 1026 u32 total_delay = 0; 1027 1028 status = ice_aq_release_res(hw, res, 0, NULL); 1029 1030 /* there are some rare cases when trying to release the resource 1031 * results in an admin Q timeout, so handle them correctly 1032 */ 1033 while ((status == ICE_ERR_AQ_TIMEOUT) && 1034 (total_delay < hw->adminq.sq_cmd_timeout)) { 1035 mdelay(1); 1036 status = ice_aq_release_res(hw, res, 0, NULL); 1037 total_delay++; 1038 } 1039 } 1040 1041 /** 1042 * ice_parse_caps - parse function/device capabilities 1043 * @hw: pointer to the hw struct 1044 * @buf: pointer to a buffer containing function/device capability records 1045 * @cap_count: number of capability records in the list 1046 * @opc: type of capabilities list to parse 1047 * 1048 * Helper function to parse function(0x000a)/device(0x000b) capabilities list. 1049 */ 1050 static void 1051 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, 1052 enum ice_adminq_opc opc) 1053 { 1054 struct ice_aqc_list_caps_elem *cap_resp; 1055 struct ice_hw_func_caps *func_p = NULL; 1056 struct ice_hw_dev_caps *dev_p = NULL; 1057 struct ice_hw_common_caps *caps; 1058 u32 i; 1059 1060 if (!buf) 1061 return; 1062 1063 cap_resp = (struct ice_aqc_list_caps_elem *)buf; 1064 1065 if (opc == ice_aqc_opc_list_dev_caps) { 1066 dev_p = &hw->dev_caps; 1067 caps = &dev_p->common_cap; 1068 } else if (opc == ice_aqc_opc_list_func_caps) { 1069 func_p = &hw->func_caps; 1070 caps = &func_p->common_cap; 1071 } else { 1072 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n"); 1073 return; 1074 } 1075 1076 for (i = 0; caps && i < cap_count; i++, cap_resp++) { 1077 u32 logical_id = le32_to_cpu(cap_resp->logical_id); 1078 u32 phys_id = le32_to_cpu(cap_resp->phys_id); 1079 u32 number = le32_to_cpu(cap_resp->number); 1080 u16 cap = le16_to_cpu(cap_resp->cap); 1081 1082 switch (cap) { 1083 case ICE_AQC_CAPS_VSI: 1084 if (dev_p) { 1085 dev_p->num_vsi_allocd_to_host = number; 1086 ice_debug(hw, ICE_DBG_INIT, 1087 "HW caps: Dev.VSI cnt = %d\n", 1088 dev_p->num_vsi_allocd_to_host); 1089 } else if (func_p) { 1090 func_p->guaranteed_num_vsi = number; 1091 ice_debug(hw, ICE_DBG_INIT, 1092 "HW caps: Func.VSI cnt = %d\n", 1093 func_p->guaranteed_num_vsi); 1094 } 1095 break; 1096 case ICE_AQC_CAPS_RSS: 1097 caps->rss_table_size = number; 1098 caps->rss_table_entry_width = logical_id; 1099 ice_debug(hw, ICE_DBG_INIT, 1100 "HW caps: RSS table size = %d\n", 1101 caps->rss_table_size); 1102 ice_debug(hw, ICE_DBG_INIT, 1103 "HW caps: RSS table width = %d\n", 1104 caps->rss_table_entry_width); 1105 break; 1106 case ICE_AQC_CAPS_RXQS: 1107 caps->num_rxq = number; 1108 caps->rxq_first_id = phys_id; 1109 ice_debug(hw, ICE_DBG_INIT, 1110 "HW caps: Num Rx Qs = %d\n", caps->num_rxq); 1111 ice_debug(hw, ICE_DBG_INIT, 1112 "HW caps: Rx first queue ID = %d\n", 1113 caps->rxq_first_id); 1114 break; 1115 case ICE_AQC_CAPS_TXQS: 1116 caps->num_txq = number; 1117 caps->txq_first_id = phys_id; 1118 ice_debug(hw, ICE_DBG_INIT, 1119 "HW caps: Num Tx Qs = %d\n", caps->num_txq); 1120 ice_debug(hw, ICE_DBG_INIT, 1121 "HW caps: Tx first queue ID = %d\n", 1122 caps->txq_first_id); 1123 break; 1124 case ICE_AQC_CAPS_MSIX: 1125 caps->num_msix_vectors = number; 1126 caps->msix_vector_first_id = phys_id; 1127 ice_debug(hw, ICE_DBG_INIT, 1128 "HW caps: MSIX vector count = %d\n", 1129 caps->num_msix_vectors); 1130 ice_debug(hw, ICE_DBG_INIT, 1131 "HW caps: MSIX first vector index = %d\n", 1132 caps->msix_vector_first_id); 1133 break; 1134 case ICE_AQC_CAPS_MAX_MTU: 1135 caps->max_mtu = number; 1136 if (dev_p) 1137 ice_debug(hw, ICE_DBG_INIT, 1138 "HW caps: Dev.MaxMTU = %d\n", 1139 caps->max_mtu); 1140 else if (func_p) 1141 ice_debug(hw, ICE_DBG_INIT, 1142 "HW caps: func.MaxMTU = %d\n", 1143 caps->max_mtu); 1144 break; 1145 default: 1146 ice_debug(hw, ICE_DBG_INIT, 1147 "HW caps: Unknown capability[%d]: 0x%x\n", i, 1148 cap); 1149 break; 1150 } 1151 } 1152 } 1153 1154 /** 1155 * ice_aq_discover_caps - query function/device capabilities 1156 * @hw: pointer to the hw struct 1157 * @buf: a virtual buffer to hold the capabilities 1158 * @buf_size: Size of the virtual buffer 1159 * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM 1160 * @opc: capabilities type to discover - pass in the command opcode 1161 * @cd: pointer to command details structure or NULL 1162 * 1163 * Get the function(0x000a)/device(0x000b) capabilities description from 1164 * the firmware. 1165 */ 1166 static enum ice_status 1167 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size, 1168 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 1169 { 1170 struct ice_aqc_list_caps *cmd; 1171 struct ice_aq_desc desc; 1172 enum ice_status status; 1173 1174 cmd = &desc.params.get_cap; 1175 1176 if (opc != ice_aqc_opc_list_func_caps && 1177 opc != ice_aqc_opc_list_dev_caps) 1178 return ICE_ERR_PARAM; 1179 1180 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1181 1182 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 1183 if (!status) 1184 ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc); 1185 *data_size = le16_to_cpu(desc.datalen); 1186 1187 return status; 1188 } 1189 1190 /** 1191 * ice_get_caps - get info about the HW 1192 * @hw: pointer to the hardware structure 1193 */ 1194 enum ice_status ice_get_caps(struct ice_hw *hw) 1195 { 1196 enum ice_status status; 1197 u16 data_size = 0; 1198 u16 cbuf_len; 1199 u8 retries; 1200 1201 /* The driver doesn't know how many capabilities the device will return 1202 * so the buffer size required isn't known ahead of time. The driver 1203 * starts with cbuf_len and if this turns out to be insufficient, the 1204 * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs. 1205 * The driver then allocates the buffer of this size and retries the 1206 * operation. So it follows that the retry count is 2. 1207 */ 1208 #define ICE_GET_CAP_BUF_COUNT 40 1209 #define ICE_GET_CAP_RETRY_COUNT 2 1210 1211 cbuf_len = ICE_GET_CAP_BUF_COUNT * 1212 sizeof(struct ice_aqc_list_caps_elem); 1213 1214 retries = ICE_GET_CAP_RETRY_COUNT; 1215 1216 do { 1217 void *cbuf; 1218 1219 cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL); 1220 if (!cbuf) 1221 return ICE_ERR_NO_MEMORY; 1222 1223 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size, 1224 ice_aqc_opc_list_func_caps, NULL); 1225 devm_kfree(ice_hw_to_dev(hw), cbuf); 1226 1227 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM) 1228 break; 1229 1230 /* If ENOMEM is returned, try again with bigger buffer */ 1231 cbuf_len = data_size; 1232 } while (--retries); 1233 1234 return status; 1235 } 1236 1237 /** 1238 * ice_aq_manage_mac_write - manage MAC address write command 1239 * @hw: pointer to the hw struct 1240 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 1241 * @flags: flags to control write behavior 1242 * @cd: pointer to command details structure or NULL 1243 * 1244 * This function is used to write MAC address to the NVM (0x0108). 1245 */ 1246 enum ice_status 1247 ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags, 1248 struct ice_sq_cd *cd) 1249 { 1250 struct ice_aqc_manage_mac_write *cmd; 1251 struct ice_aq_desc desc; 1252 1253 cmd = &desc.params.mac_write; 1254 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 1255 1256 cmd->flags = flags; 1257 1258 /* Prep values for flags, sah, sal */ 1259 cmd->sah = htons(*((u16 *)mac_addr)); 1260 cmd->sal = htonl(*((u32 *)(mac_addr + 2))); 1261 1262 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1263 } 1264 1265 /** 1266 * ice_aq_clear_pxe_mode 1267 * @hw: pointer to the hw struct 1268 * 1269 * Tell the firmware that the driver is taking over from PXE (0x0110). 1270 */ 1271 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) 1272 { 1273 struct ice_aq_desc desc; 1274 1275 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 1276 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 1277 1278 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1279 } 1280 1281 /** 1282 * ice_clear_pxe_mode - clear pxe operations mode 1283 * @hw: pointer to the hw struct 1284 * 1285 * Make sure all PXE mode settings are cleared, including things 1286 * like descriptor fetch/write-back mode. 1287 */ 1288 void ice_clear_pxe_mode(struct ice_hw *hw) 1289 { 1290 if (ice_check_sq_alive(hw, &hw->adminq)) 1291 ice_aq_clear_pxe_mode(hw); 1292 } 1293 1294 /** 1295 * ice_aq_set_phy_cfg 1296 * @hw: pointer to the hw struct 1297 * @lport: logical port number 1298 * @cfg: structure with PHY configuration data to be set 1299 * @cd: pointer to command details structure or NULL 1300 * 1301 * Set the various PHY configuration parameters supported on the Port. 1302 * One or more of the Set PHY config parameters may be ignored in an MFP 1303 * mode as the PF may not have the privilege to set some of the PHY Config 1304 * parameters. This status will be indicated by the command response (0x0601). 1305 */ 1306 static enum ice_status 1307 ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, 1308 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 1309 { 1310 struct ice_aqc_set_phy_cfg *cmd; 1311 struct ice_aq_desc desc; 1312 1313 if (!cfg) 1314 return ICE_ERR_PARAM; 1315 1316 cmd = &desc.params.set_phy; 1317 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 1318 cmd->lport_num = lport; 1319 1320 return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 1321 } 1322 1323 /** 1324 * ice_update_link_info - update status of the HW network link 1325 * @pi: port info structure of the interested logical port 1326 */ 1327 static enum ice_status 1328 ice_update_link_info(struct ice_port_info *pi) 1329 { 1330 struct ice_aqc_get_phy_caps_data *pcaps; 1331 struct ice_phy_info *phy_info; 1332 enum ice_status status; 1333 struct ice_hw *hw; 1334 1335 if (!pi) 1336 return ICE_ERR_PARAM; 1337 1338 hw = pi->hw; 1339 1340 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 1341 if (!pcaps) 1342 return ICE_ERR_NO_MEMORY; 1343 1344 phy_info = &pi->phy; 1345 status = ice_aq_get_link_info(pi, true, NULL, NULL); 1346 if (status) 1347 goto out; 1348 1349 if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 1350 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, 1351 pcaps, NULL); 1352 if (status) 1353 goto out; 1354 1355 memcpy(phy_info->link_info.module_type, &pcaps->module_type, 1356 sizeof(phy_info->link_info.module_type)); 1357 } 1358 out: 1359 devm_kfree(ice_hw_to_dev(hw), pcaps); 1360 return status; 1361 } 1362 1363 /** 1364 * ice_set_fc 1365 * @pi: port information structure 1366 * @aq_failures: pointer to status code, specific to ice_set_fc routine 1367 * @atomic_restart: enable automatic link update 1368 * 1369 * Set the requested flow control mode. 1370 */ 1371 enum ice_status 1372 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart) 1373 { 1374 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 1375 struct ice_aqc_get_phy_caps_data *pcaps; 1376 enum ice_status status; 1377 u8 pause_mask = 0x0; 1378 struct ice_hw *hw; 1379 1380 if (!pi) 1381 return ICE_ERR_PARAM; 1382 hw = pi->hw; 1383 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE; 1384 1385 switch (pi->fc.req_mode) { 1386 case ICE_FC_FULL: 1387 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 1388 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 1389 break; 1390 case ICE_FC_RX_PAUSE: 1391 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 1392 break; 1393 case ICE_FC_TX_PAUSE: 1394 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 1395 break; 1396 default: 1397 break; 1398 } 1399 1400 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 1401 if (!pcaps) 1402 return ICE_ERR_NO_MEMORY; 1403 1404 /* Get the current phy config */ 1405 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, 1406 NULL); 1407 if (status) { 1408 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 1409 goto out; 1410 } 1411 1412 /* clear the old pause settings */ 1413 cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 1414 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 1415 /* set the new capabilities */ 1416 cfg.caps |= pause_mask; 1417 /* If the capabilities have changed, then set the new config */ 1418 if (cfg.caps != pcaps->caps) { 1419 int retry_count, retry_max = 10; 1420 1421 /* Auto restart link so settings take effect */ 1422 if (atomic_restart) 1423 cfg.caps |= ICE_AQ_PHY_ENA_ATOMIC_LINK; 1424 /* Copy over all the old settings */ 1425 cfg.phy_type_low = pcaps->phy_type_low; 1426 cfg.low_power_ctrl = pcaps->low_power_ctrl; 1427 cfg.eee_cap = pcaps->eee_cap; 1428 cfg.eeer_value = pcaps->eeer_value; 1429 cfg.link_fec_opt = pcaps->link_fec_options; 1430 1431 status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL); 1432 if (status) { 1433 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 1434 goto out; 1435 } 1436 1437 /* Update the link info 1438 * It sometimes takes a really long time for link to 1439 * come back from the atomic reset. Thus, we wait a 1440 * little bit. 1441 */ 1442 for (retry_count = 0; retry_count < retry_max; retry_count++) { 1443 status = ice_update_link_info(pi); 1444 1445 if (!status) 1446 break; 1447 1448 mdelay(100); 1449 } 1450 1451 if (status) 1452 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 1453 } 1454 1455 out: 1456 devm_kfree(ice_hw_to_dev(hw), pcaps); 1457 return status; 1458 } 1459 1460 /** 1461 * ice_get_link_status - get status of the HW network link 1462 * @pi: port information structure 1463 * @link_up: pointer to bool (true/false = linkup/linkdown) 1464 * 1465 * Variable link_up is true if link is up, false if link is down. 1466 * The variable link_up is invalid if status is non zero. As a 1467 * result of this call, link status reporting becomes enabled 1468 */ 1469 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) 1470 { 1471 struct ice_phy_info *phy_info; 1472 enum ice_status status = 0; 1473 1474 if (!pi) 1475 return ICE_ERR_PARAM; 1476 1477 phy_info = &pi->phy; 1478 1479 if (phy_info->get_link_info) { 1480 status = ice_update_link_info(pi); 1481 1482 if (status) 1483 ice_debug(pi->hw, ICE_DBG_LINK, 1484 "get link status error, status = %d\n", 1485 status); 1486 } 1487 1488 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 1489 1490 return status; 1491 } 1492 1493 /** 1494 * ice_aq_set_link_restart_an 1495 * @pi: pointer to the port information structure 1496 * @ena_link: if true: enable link, if false: disable link 1497 * @cd: pointer to command details structure or NULL 1498 * 1499 * Sets up the link and restarts the Auto-Negotiation over the link. 1500 */ 1501 enum ice_status 1502 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 1503 struct ice_sq_cd *cd) 1504 { 1505 struct ice_aqc_restart_an *cmd; 1506 struct ice_aq_desc desc; 1507 1508 cmd = &desc.params.restart_an; 1509 1510 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 1511 1512 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 1513 cmd->lport_num = pi->lport; 1514 if (ena_link) 1515 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 1516 else 1517 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 1518 1519 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 1520 } 1521 1522 /** 1523 * ice_aq_set_event_mask 1524 * @hw: pointer to the hw struct 1525 * @port_num: port number of the physical function 1526 * @mask: event mask to be set 1527 * @cd: pointer to command details structure or NULL 1528 * 1529 * Set event mask (0x0613) 1530 */ 1531 enum ice_status 1532 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 1533 struct ice_sq_cd *cd) 1534 { 1535 struct ice_aqc_set_event_mask *cmd; 1536 struct ice_aq_desc desc; 1537 1538 cmd = &desc.params.set_event_mask; 1539 1540 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 1541 1542 cmd->lport_num = port_num; 1543 1544 cmd->event_mask = cpu_to_le16(mask); 1545 1546 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1547 } 1548 1549 /** 1550 * __ice_aq_get_set_rss_lut 1551 * @hw: pointer to the hardware structure 1552 * @vsi_id: VSI FW index 1553 * @lut_type: LUT table type 1554 * @lut: pointer to the LUT buffer provided by the caller 1555 * @lut_size: size of the LUT buffer 1556 * @glob_lut_idx: global LUT index 1557 * @set: set true to set the table, false to get the table 1558 * 1559 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 1560 */ 1561 static enum ice_status 1562 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, 1563 u16 lut_size, u8 glob_lut_idx, bool set) 1564 { 1565 struct ice_aqc_get_set_rss_lut *cmd_resp; 1566 struct ice_aq_desc desc; 1567 enum ice_status status; 1568 u16 flags = 0; 1569 1570 cmd_resp = &desc.params.get_set_rss_lut; 1571 1572 if (set) { 1573 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut); 1574 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1575 } else { 1576 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut); 1577 } 1578 1579 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 1580 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) & 1581 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) | 1582 ICE_AQC_GSET_RSS_LUT_VSI_VALID); 1583 1584 switch (lut_type) { 1585 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI: 1586 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF: 1587 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL: 1588 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) & 1589 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); 1590 break; 1591 default: 1592 status = ICE_ERR_PARAM; 1593 goto ice_aq_get_set_rss_lut_exit; 1594 } 1595 1596 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) { 1597 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) & 1598 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M); 1599 1600 if (!set) 1601 goto ice_aq_get_set_rss_lut_send; 1602 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 1603 if (!set) 1604 goto ice_aq_get_set_rss_lut_send; 1605 } else { 1606 goto ice_aq_get_set_rss_lut_send; 1607 } 1608 1609 /* LUT size is only valid for Global and PF table types */ 1610 if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) { 1611 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG << 1612 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 1613 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 1614 } else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) { 1615 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << 1616 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 1617 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 1618 } else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) && 1619 (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) { 1620 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << 1621 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 1622 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 1623 } else { 1624 status = ICE_ERR_PARAM; 1625 goto ice_aq_get_set_rss_lut_exit; 1626 } 1627 1628 ice_aq_get_set_rss_lut_send: 1629 cmd_resp->flags = cpu_to_le16(flags); 1630 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 1631 1632 ice_aq_get_set_rss_lut_exit: 1633 return status; 1634 } 1635 1636 /** 1637 * ice_aq_get_rss_lut 1638 * @hw: pointer to the hardware structure 1639 * @vsi_id: VSI FW index 1640 * @lut_type: LUT table type 1641 * @lut: pointer to the LUT buffer provided by the caller 1642 * @lut_size: size of the LUT buffer 1643 * 1644 * get the RSS lookup table, PF or VSI type 1645 */ 1646 enum ice_status 1647 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, 1648 u16 lut_size) 1649 { 1650 return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0, 1651 false); 1652 } 1653 1654 /** 1655 * ice_aq_set_rss_lut 1656 * @hw: pointer to the hardware structure 1657 * @vsi_id: VSI FW index 1658 * @lut_type: LUT table type 1659 * @lut: pointer to the LUT buffer provided by the caller 1660 * @lut_size: size of the LUT buffer 1661 * 1662 * set the RSS lookup table, PF or VSI type 1663 */ 1664 enum ice_status 1665 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, 1666 u16 lut_size) 1667 { 1668 return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0, 1669 true); 1670 } 1671 1672 /** 1673 * __ice_aq_get_set_rss_key 1674 * @hw: pointer to the hw struct 1675 * @vsi_id: VSI FW index 1676 * @key: pointer to key info struct 1677 * @set: set true to set the key, false to get the key 1678 * 1679 * get (0x0B04) or set (0x0B02) the RSS key per VSI 1680 */ 1681 static enum 1682 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 1683 struct ice_aqc_get_set_rss_keys *key, 1684 bool set) 1685 { 1686 struct ice_aqc_get_set_rss_key *cmd_resp; 1687 u16 key_size = sizeof(*key); 1688 struct ice_aq_desc desc; 1689 1690 cmd_resp = &desc.params.get_set_rss_key; 1691 1692 if (set) { 1693 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 1694 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1695 } else { 1696 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 1697 } 1698 1699 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 1700 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) & 1701 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) | 1702 ICE_AQC_GSET_RSS_KEY_VSI_VALID); 1703 1704 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 1705 } 1706 1707 /** 1708 * ice_aq_get_rss_key 1709 * @hw: pointer to the hw struct 1710 * @vsi_id: VSI FW index 1711 * @key: pointer to key info struct 1712 * 1713 * get the RSS key per VSI 1714 */ 1715 enum ice_status 1716 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id, 1717 struct ice_aqc_get_set_rss_keys *key) 1718 { 1719 return __ice_aq_get_set_rss_key(hw, vsi_id, key, false); 1720 } 1721 1722 /** 1723 * ice_aq_set_rss_key 1724 * @hw: pointer to the hw struct 1725 * @vsi_id: VSI FW index 1726 * @keys: pointer to key info struct 1727 * 1728 * set the RSS key per VSI 1729 */ 1730 enum ice_status 1731 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id, 1732 struct ice_aqc_get_set_rss_keys *keys) 1733 { 1734 return __ice_aq_get_set_rss_key(hw, vsi_id, keys, true); 1735 } 1736 1737 /** 1738 * ice_aq_add_lan_txq 1739 * @hw: pointer to the hardware structure 1740 * @num_qgrps: Number of added queue groups 1741 * @qg_list: list of queue groups to be added 1742 * @buf_size: size of buffer for indirect command 1743 * @cd: pointer to command details structure or NULL 1744 * 1745 * Add Tx LAN queue (0x0C30) 1746 * 1747 * NOTE: 1748 * Prior to calling add Tx LAN queue: 1749 * Initialize the following as part of the Tx queue context: 1750 * Completion queue ID if the queue uses Completion queue, Quanta profile, 1751 * Cache profile and Packet shaper profile. 1752 * 1753 * After add Tx LAN queue AQ command is completed: 1754 * Interrupts should be associated with specific queues, 1755 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 1756 * flow. 1757 */ 1758 static enum ice_status 1759 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 1760 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 1761 struct ice_sq_cd *cd) 1762 { 1763 u16 i, sum_header_size, sum_q_size = 0; 1764 struct ice_aqc_add_tx_qgrp *list; 1765 struct ice_aqc_add_txqs *cmd; 1766 struct ice_aq_desc desc; 1767 1768 cmd = &desc.params.add_txqs; 1769 1770 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 1771 1772 if (!qg_list) 1773 return ICE_ERR_PARAM; 1774 1775 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 1776 return ICE_ERR_PARAM; 1777 1778 sum_header_size = num_qgrps * 1779 (sizeof(*qg_list) - sizeof(*qg_list->txqs)); 1780 1781 list = qg_list; 1782 for (i = 0; i < num_qgrps; i++) { 1783 struct ice_aqc_add_txqs_perq *q = list->txqs; 1784 1785 sum_q_size += list->num_txqs * sizeof(*q); 1786 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs); 1787 } 1788 1789 if (buf_size != (sum_header_size + sum_q_size)) 1790 return ICE_ERR_PARAM; 1791 1792 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1793 1794 cmd->num_qgrps = num_qgrps; 1795 1796 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 1797 } 1798 1799 /** 1800 * ice_aq_dis_lan_txq 1801 * @hw: pointer to the hardware structure 1802 * @num_qgrps: number of groups in the list 1803 * @qg_list: the list of groups to disable 1804 * @buf_size: the total size of the qg_list buffer in bytes 1805 * @cd: pointer to command details structure or NULL 1806 * 1807 * Disable LAN Tx queue (0x0C31) 1808 */ 1809 static enum ice_status 1810 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 1811 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 1812 struct ice_sq_cd *cd) 1813 { 1814 struct ice_aqc_dis_txqs *cmd; 1815 struct ice_aq_desc desc; 1816 u16 i, sz = 0; 1817 1818 cmd = &desc.params.dis_txqs; 1819 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 1820 1821 if (!qg_list) 1822 return ICE_ERR_PARAM; 1823 1824 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 1825 return ICE_ERR_PARAM; 1826 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1827 cmd->num_entries = num_qgrps; 1828 1829 for (i = 0; i < num_qgrps; ++i) { 1830 /* Calculate the size taken up by the queue IDs in this group */ 1831 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id); 1832 1833 /* Add the size of the group header */ 1834 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id); 1835 1836 /* If the num of queues is even, add 2 bytes of padding */ 1837 if ((qg_list[i].num_qs % 2) == 0) 1838 sz += 2; 1839 } 1840 1841 if (buf_size != sz) 1842 return ICE_ERR_PARAM; 1843 1844 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 1845 } 1846 1847 /* End of FW Admin Queue command wrappers */ 1848 1849 /** 1850 * ice_write_byte - write a byte to a packed context structure 1851 * @src_ctx: the context structure to read from 1852 * @dest_ctx: the context to be written to 1853 * @ce_info: a description of the struct to be filled 1854 */ 1855 static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx, 1856 const struct ice_ctx_ele *ce_info) 1857 { 1858 u8 src_byte, dest_byte, mask; 1859 u8 *from, *dest; 1860 u16 shift_width; 1861 1862 /* copy from the next struct field */ 1863 from = src_ctx + ce_info->offset; 1864 1865 /* prepare the bits and mask */ 1866 shift_width = ce_info->lsb % 8; 1867 mask = (u8)(BIT(ce_info->width) - 1); 1868 1869 src_byte = *from; 1870 src_byte &= mask; 1871 1872 /* shift to correct alignment */ 1873 mask <<= shift_width; 1874 src_byte <<= shift_width; 1875 1876 /* get the current bits from the target bit string */ 1877 dest = dest_ctx + (ce_info->lsb / 8); 1878 1879 memcpy(&dest_byte, dest, sizeof(dest_byte)); 1880 1881 dest_byte &= ~mask; /* get the bits not changing */ 1882 dest_byte |= src_byte; /* add in the new bits */ 1883 1884 /* put it all back */ 1885 memcpy(dest, &dest_byte, sizeof(dest_byte)); 1886 } 1887 1888 /** 1889 * ice_write_word - write a word to a packed context structure 1890 * @src_ctx: the context structure to read from 1891 * @dest_ctx: the context to be written to 1892 * @ce_info: a description of the struct to be filled 1893 */ 1894 static void ice_write_word(u8 *src_ctx, u8 *dest_ctx, 1895 const struct ice_ctx_ele *ce_info) 1896 { 1897 u16 src_word, mask; 1898 __le16 dest_word; 1899 u8 *from, *dest; 1900 u16 shift_width; 1901 1902 /* copy from the next struct field */ 1903 from = src_ctx + ce_info->offset; 1904 1905 /* prepare the bits and mask */ 1906 shift_width = ce_info->lsb % 8; 1907 mask = BIT(ce_info->width) - 1; 1908 1909 /* don't swizzle the bits until after the mask because the mask bits 1910 * will be in a different bit position on big endian machines 1911 */ 1912 src_word = *(u16 *)from; 1913 src_word &= mask; 1914 1915 /* shift to correct alignment */ 1916 mask <<= shift_width; 1917 src_word <<= shift_width; 1918 1919 /* get the current bits from the target bit string */ 1920 dest = dest_ctx + (ce_info->lsb / 8); 1921 1922 memcpy(&dest_word, dest, sizeof(dest_word)); 1923 1924 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 1925 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 1926 1927 /* put it all back */ 1928 memcpy(dest, &dest_word, sizeof(dest_word)); 1929 } 1930 1931 /** 1932 * ice_write_dword - write a dword to a packed context structure 1933 * @src_ctx: the context structure to read from 1934 * @dest_ctx: the context to be written to 1935 * @ce_info: a description of the struct to be filled 1936 */ 1937 static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx, 1938 const struct ice_ctx_ele *ce_info) 1939 { 1940 u32 src_dword, mask; 1941 __le32 dest_dword; 1942 u8 *from, *dest; 1943 u16 shift_width; 1944 1945 /* copy from the next struct field */ 1946 from = src_ctx + ce_info->offset; 1947 1948 /* prepare the bits and mask */ 1949 shift_width = ce_info->lsb % 8; 1950 1951 /* if the field width is exactly 32 on an x86 machine, then the shift 1952 * operation will not work because the SHL instructions count is masked 1953 * to 5 bits so the shift will do nothing 1954 */ 1955 if (ce_info->width < 32) 1956 mask = BIT(ce_info->width) - 1; 1957 else 1958 mask = (u32)~0; 1959 1960 /* don't swizzle the bits until after the mask because the mask bits 1961 * will be in a different bit position on big endian machines 1962 */ 1963 src_dword = *(u32 *)from; 1964 src_dword &= mask; 1965 1966 /* shift to correct alignment */ 1967 mask <<= shift_width; 1968 src_dword <<= shift_width; 1969 1970 /* get the current bits from the target bit string */ 1971 dest = dest_ctx + (ce_info->lsb / 8); 1972 1973 memcpy(&dest_dword, dest, sizeof(dest_dword)); 1974 1975 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 1976 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 1977 1978 /* put it all back */ 1979 memcpy(dest, &dest_dword, sizeof(dest_dword)); 1980 } 1981 1982 /** 1983 * ice_write_qword - write a qword to a packed context structure 1984 * @src_ctx: the context structure to read from 1985 * @dest_ctx: the context to be written to 1986 * @ce_info: a description of the struct to be filled 1987 */ 1988 static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx, 1989 const struct ice_ctx_ele *ce_info) 1990 { 1991 u64 src_qword, mask; 1992 __le64 dest_qword; 1993 u8 *from, *dest; 1994 u16 shift_width; 1995 1996 /* copy from the next struct field */ 1997 from = src_ctx + ce_info->offset; 1998 1999 /* prepare the bits and mask */ 2000 shift_width = ce_info->lsb % 8; 2001 2002 /* if the field width is exactly 64 on an x86 machine, then the shift 2003 * operation will not work because the SHL instructions count is masked 2004 * to 6 bits so the shift will do nothing 2005 */ 2006 if (ce_info->width < 64) 2007 mask = BIT_ULL(ce_info->width) - 1; 2008 else 2009 mask = (u64)~0; 2010 2011 /* don't swizzle the bits until after the mask because the mask bits 2012 * will be in a different bit position on big endian machines 2013 */ 2014 src_qword = *(u64 *)from; 2015 src_qword &= mask; 2016 2017 /* shift to correct alignment */ 2018 mask <<= shift_width; 2019 src_qword <<= shift_width; 2020 2021 /* get the current bits from the target bit string */ 2022 dest = dest_ctx + (ce_info->lsb / 8); 2023 2024 memcpy(&dest_qword, dest, sizeof(dest_qword)); 2025 2026 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 2027 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 2028 2029 /* put it all back */ 2030 memcpy(dest, &dest_qword, sizeof(dest_qword)); 2031 } 2032 2033 /** 2034 * ice_set_ctx - set context bits in packed structure 2035 * @src_ctx: pointer to a generic non-packed context structure 2036 * @dest_ctx: pointer to memory for the packed structure 2037 * @ce_info: a description of the structure to be transformed 2038 */ 2039 enum ice_status 2040 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 2041 { 2042 int f; 2043 2044 for (f = 0; ce_info[f].width; f++) { 2045 /* We have to deal with each element of the FW response 2046 * using the correct size so that we are correct regardless 2047 * of the endianness of the machine. 2048 */ 2049 switch (ce_info[f].size_of) { 2050 case sizeof(u8): 2051 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); 2052 break; 2053 case sizeof(u16): 2054 ice_write_word(src_ctx, dest_ctx, &ce_info[f]); 2055 break; 2056 case sizeof(u32): 2057 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); 2058 break; 2059 case sizeof(u64): 2060 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); 2061 break; 2062 default: 2063 return ICE_ERR_INVAL_SIZE; 2064 } 2065 } 2066 2067 return 0; 2068 } 2069 2070 /** 2071 * ice_ena_vsi_txq 2072 * @pi: port information structure 2073 * @vsi_id: VSI id 2074 * @tc: tc number 2075 * @num_qgrps: Number of added queue groups 2076 * @buf: list of queue groups to be added 2077 * @buf_size: size of buffer for indirect command 2078 * @cd: pointer to command details structure or NULL 2079 * 2080 * This function adds one lan q 2081 */ 2082 enum ice_status 2083 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps, 2084 struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 2085 struct ice_sq_cd *cd) 2086 { 2087 struct ice_aqc_txsched_elem_data node = { 0 }; 2088 struct ice_sched_node *parent; 2089 enum ice_status status; 2090 struct ice_hw *hw; 2091 2092 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 2093 return ICE_ERR_CFG; 2094 2095 if (num_qgrps > 1 || buf->num_txqs > 1) 2096 return ICE_ERR_MAX_LIMIT; 2097 2098 hw = pi->hw; 2099 2100 mutex_lock(&pi->sched_lock); 2101 2102 /* find a parent node */ 2103 parent = ice_sched_get_free_qparent(pi, vsi_id, tc, 2104 ICE_SCHED_NODE_OWNER_LAN); 2105 if (!parent) { 2106 status = ICE_ERR_PARAM; 2107 goto ena_txq_exit; 2108 } 2109 buf->parent_teid = parent->info.node_teid; 2110 node.parent_teid = parent->info.node_teid; 2111 /* Mark that the values in the "generic" section as valid. The default 2112 * value in the "generic" section is zero. This means that : 2113 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 2114 * - 0 priority among siblings, indicated by Bit 1-3. 2115 * - WFQ, indicated by Bit 4. 2116 * - 0 Adjustment value is used in PSM credit update flow, indicated by 2117 * Bit 5-6. 2118 * - Bit 7 is reserved. 2119 * Without setting the generic section as valid in valid_sections, the 2120 * Admin Q command will fail with error code ICE_AQ_RC_EINVAL. 2121 */ 2122 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC; 2123 2124 /* add the lan q */ 2125 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 2126 if (status) 2127 goto ena_txq_exit; 2128 2129 node.node_teid = buf->txqs[0].q_teid; 2130 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 2131 2132 /* add a leaf node into schduler tree q layer */ 2133 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); 2134 2135 ena_txq_exit: 2136 mutex_unlock(&pi->sched_lock); 2137 return status; 2138 } 2139 2140 /** 2141 * ice_dis_vsi_txq 2142 * @pi: port information structure 2143 * @num_queues: number of queues 2144 * @q_ids: pointer to the q_id array 2145 * @q_teids: pointer to queue node teids 2146 * @cd: pointer to command details structure or NULL 2147 * 2148 * This function removes queues and their corresponding nodes in SW DB 2149 */ 2150 enum ice_status 2151 ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, 2152 u32 *q_teids, struct ice_sq_cd *cd) 2153 { 2154 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 2155 struct ice_aqc_dis_txq_item qg_list; 2156 u16 i; 2157 2158 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 2159 return ICE_ERR_CFG; 2160 2161 mutex_lock(&pi->sched_lock); 2162 2163 for (i = 0; i < num_queues; i++) { 2164 struct ice_sched_node *node; 2165 2166 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 2167 if (!node) 2168 continue; 2169 qg_list.parent_teid = node->info.parent_teid; 2170 qg_list.num_qs = 1; 2171 qg_list.q_id[0] = cpu_to_le16(q_ids[i]); 2172 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list, 2173 sizeof(qg_list), cd); 2174 2175 if (status) 2176 break; 2177 ice_free_sched_node(pi, node); 2178 } 2179 mutex_unlock(&pi->sched_lock); 2180 return status; 2181 } 2182 2183 /** 2184 * ice_cfg_vsi_qs - configure the new/exisiting VSI queues 2185 * @pi: port information structure 2186 * @vsi_id: VSI Id 2187 * @tc_bitmap: TC bitmap 2188 * @maxqs: max queues array per TC 2189 * @owner: lan or rdma 2190 * 2191 * This function adds/updates the VSI queues per TC. 2192 */ 2193 static enum ice_status 2194 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap, 2195 u16 *maxqs, u8 owner) 2196 { 2197 enum ice_status status = 0; 2198 u8 i; 2199 2200 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 2201 return ICE_ERR_CFG; 2202 2203 mutex_lock(&pi->sched_lock); 2204 2205 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { 2206 /* configuration is possible only if TC node is present */ 2207 if (!ice_sched_get_tc_node(pi, i)) 2208 continue; 2209 2210 status = ice_sched_cfg_vsi(pi, vsi_id, i, maxqs[i], owner, 2211 ice_is_tc_ena(tc_bitmap, i)); 2212 if (status) 2213 break; 2214 } 2215 2216 mutex_unlock(&pi->sched_lock); 2217 return status; 2218 } 2219 2220 /** 2221 * ice_cfg_vsi_lan - configure VSI lan queues 2222 * @pi: port information structure 2223 * @vsi_id: VSI Id 2224 * @tc_bitmap: TC bitmap 2225 * @max_lanqs: max lan queues array per TC 2226 * 2227 * This function adds/updates the VSI lan queues per TC. 2228 */ 2229 enum ice_status 2230 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap, 2231 u16 *max_lanqs) 2232 { 2233 return ice_cfg_vsi_qs(pi, vsi_id, tc_bitmap, max_lanqs, 2234 ICE_SCHED_NODE_OWNER_LAN); 2235 } 2236