1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 8 #define ICE_PF_RESET_WAIT_COUNT 200 9 10 #define ICE_NIC_FLX_ENTRY(hw, mdid, idx) \ 11 wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(ICE_RXDID_FLEX_NIC), \ 12 ((ICE_RX_OPC_MDID << \ 13 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \ 14 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \ 15 (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \ 16 GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M)) 17 18 #define ICE_NIC_FLX_FLG_ENTRY(hw, flg_0, flg_1, flg_2, flg_3, idx) \ 19 wr32((hw), GLFLXP_RXDID_FLAGS(ICE_RXDID_FLEX_NIC, idx), \ 20 (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \ 21 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \ 22 (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \ 23 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \ 24 (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \ 25 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \ 26 (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \ 27 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M)) 28 29 /** 30 * ice_set_mac_type - Sets MAC type 31 * @hw: pointer to the HW structure 32 * 33 * This function sets the MAC type of the adapter based on the 34 * vendor ID and device ID stored in the hw structure. 35 */ 36 static enum ice_status ice_set_mac_type(struct ice_hw *hw) 37 { 38 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 39 return ICE_ERR_DEVICE_NOT_SUPPORTED; 40 41 hw->mac_type = ICE_MAC_GENERIC; 42 return 0; 43 } 44 45 /** 46 * ice_clear_pf_cfg - Clear PF configuration 47 * @hw: pointer to the hardware structure 48 */ 49 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) 50 { 51 struct ice_aq_desc desc; 52 53 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 54 55 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 56 } 57 58 /** 59 * ice_aq_manage_mac_read - manage MAC address read command 60 * @hw: pointer to the hw struct 61 * @buf: a virtual buffer to hold the manage MAC read response 62 * @buf_size: Size of the virtual buffer 63 * @cd: pointer to command details structure or NULL 64 * 65 * This function is used to return per PF station MAC address (0x0107). 66 * NOTE: Upon successful completion of this command, MAC address information 67 * is returned in user specified buffer. Please interpret user specified 68 * buffer as "manage_mac_read" response. 69 * Response such as various MAC addresses are stored in HW struct (port.mac) 70 * ice_aq_discover_caps is expected to be called before this function is called. 71 */ 72 static enum ice_status 73 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 74 struct ice_sq_cd *cd) 75 { 76 struct ice_aqc_manage_mac_read_resp *resp; 77 struct ice_aqc_manage_mac_read *cmd; 78 struct ice_aq_desc desc; 79 enum ice_status status; 80 u16 flags; 81 82 cmd = &desc.params.mac_read; 83 84 if (buf_size < sizeof(*resp)) 85 return ICE_ERR_BUF_TOO_SHORT; 86 87 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 88 89 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 90 if (status) 91 return status; 92 93 resp = (struct ice_aqc_manage_mac_read_resp *)buf; 94 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 95 96 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 97 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 98 return ICE_ERR_CFG; 99 } 100 101 ether_addr_copy(hw->port_info->mac.lan_addr, resp->mac_addr); 102 ether_addr_copy(hw->port_info->mac.perm_addr, resp->mac_addr); 103 return 0; 104 } 105 106 /** 107 * ice_aq_get_phy_caps - returns PHY capabilities 108 * @pi: port information structure 109 * @qual_mods: report qualified modules 110 * @report_mode: report mode capabilities 111 * @pcaps: structure for PHY capabilities to be filled 112 * @cd: pointer to command details structure or NULL 113 * 114 * Returns the various PHY capabilities supported on the Port (0x0600) 115 */ 116 static enum ice_status 117 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 118 struct ice_aqc_get_phy_caps_data *pcaps, 119 struct ice_sq_cd *cd) 120 { 121 struct ice_aqc_get_phy_caps *cmd; 122 u16 pcaps_size = sizeof(*pcaps); 123 struct ice_aq_desc desc; 124 enum ice_status status; 125 126 cmd = &desc.params.get_phy; 127 128 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 129 return ICE_ERR_PARAM; 130 131 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 132 133 if (qual_mods) 134 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 135 136 cmd->param0 |= cpu_to_le16(report_mode); 137 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd); 138 139 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) 140 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 141 142 return status; 143 } 144 145 /** 146 * ice_get_media_type - Gets media type 147 * @pi: port information structure 148 */ 149 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 150 { 151 struct ice_link_status *hw_link_info; 152 153 if (!pi) 154 return ICE_MEDIA_UNKNOWN; 155 156 hw_link_info = &pi->phy.link_info; 157 158 if (hw_link_info->phy_type_low) { 159 switch (hw_link_info->phy_type_low) { 160 case ICE_PHY_TYPE_LOW_1000BASE_SX: 161 case ICE_PHY_TYPE_LOW_1000BASE_LX: 162 case ICE_PHY_TYPE_LOW_10GBASE_SR: 163 case ICE_PHY_TYPE_LOW_10GBASE_LR: 164 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 165 case ICE_PHY_TYPE_LOW_25GBASE_SR: 166 case ICE_PHY_TYPE_LOW_25GBASE_LR: 167 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 168 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 169 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 170 return ICE_MEDIA_FIBER; 171 case ICE_PHY_TYPE_LOW_100BASE_TX: 172 case ICE_PHY_TYPE_LOW_1000BASE_T: 173 case ICE_PHY_TYPE_LOW_2500BASE_T: 174 case ICE_PHY_TYPE_LOW_5GBASE_T: 175 case ICE_PHY_TYPE_LOW_10GBASE_T: 176 case ICE_PHY_TYPE_LOW_25GBASE_T: 177 return ICE_MEDIA_BASET; 178 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 179 case ICE_PHY_TYPE_LOW_25GBASE_CR: 180 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 181 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 182 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 183 return ICE_MEDIA_DA; 184 case ICE_PHY_TYPE_LOW_1000BASE_KX: 185 case ICE_PHY_TYPE_LOW_2500BASE_KX: 186 case ICE_PHY_TYPE_LOW_2500BASE_X: 187 case ICE_PHY_TYPE_LOW_5GBASE_KR: 188 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 189 case ICE_PHY_TYPE_LOW_25GBASE_KR: 190 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 191 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 192 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 193 return ICE_MEDIA_BACKPLANE; 194 } 195 } 196 197 return ICE_MEDIA_UNKNOWN; 198 } 199 200 /** 201 * ice_aq_get_link_info 202 * @pi: port information structure 203 * @ena_lse: enable/disable LinkStatusEvent reporting 204 * @link: pointer to link status structure - optional 205 * @cd: pointer to command details structure or NULL 206 * 207 * Get Link Status (0x607). Returns the link status of the adapter. 208 */ 209 enum ice_status 210 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 211 struct ice_link_status *link, struct ice_sq_cd *cd) 212 { 213 struct ice_link_status *hw_link_info_old, *hw_link_info; 214 struct ice_aqc_get_link_status_data link_data = { 0 }; 215 struct ice_aqc_get_link_status *resp; 216 enum ice_media_type *hw_media_type; 217 struct ice_fc_info *hw_fc_info; 218 bool tx_pause, rx_pause; 219 struct ice_aq_desc desc; 220 enum ice_status status; 221 u16 cmd_flags; 222 223 if (!pi) 224 return ICE_ERR_PARAM; 225 hw_link_info_old = &pi->phy.link_info_old; 226 hw_media_type = &pi->phy.media_type; 227 hw_link_info = &pi->phy.link_info; 228 hw_fc_info = &pi->fc; 229 230 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 231 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 232 resp = &desc.params.get_link_status; 233 resp->cmd_flags = cpu_to_le16(cmd_flags); 234 resp->lport_num = pi->lport; 235 236 status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data), 237 cd); 238 239 if (status) 240 return status; 241 242 /* save off old link status information */ 243 *hw_link_info_old = *hw_link_info; 244 245 /* update current link status information */ 246 hw_link_info->link_speed = le16_to_cpu(link_data.link_speed); 247 hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low); 248 *hw_media_type = ice_get_media_type(pi); 249 hw_link_info->link_info = link_data.link_info; 250 hw_link_info->an_info = link_data.an_info; 251 hw_link_info->ext_info = link_data.ext_info; 252 hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size); 253 hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M; 254 255 /* update fc info */ 256 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 257 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 258 if (tx_pause && rx_pause) 259 hw_fc_info->current_mode = ICE_FC_FULL; 260 else if (tx_pause) 261 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 262 else if (rx_pause) 263 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 264 else 265 hw_fc_info->current_mode = ICE_FC_NONE; 266 267 hw_link_info->lse_ena = 268 !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 269 270 /* save link status information */ 271 if (link) 272 *link = *hw_link_info; 273 274 /* flag cleared so calling functions don't call AQ again */ 275 pi->phy.get_link_info = false; 276 277 return status; 278 } 279 280 /** 281 * ice_init_flex_parser - initialize rx flex parser 282 * @hw: pointer to the hardware structure 283 * 284 * Function to initialize flex descriptors 285 */ 286 static void ice_init_flex_parser(struct ice_hw *hw) 287 { 288 u8 idx = 0; 289 290 ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_LOW, 0); 291 ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_HIGH, 1); 292 ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_LOWER, 2); 293 ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_HIGH, 3); 294 ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_FRG, ICE_RXFLG_UDP_GRE, 295 ICE_RXFLG_PKT_DSI, ICE_RXFLG_FIN, idx++); 296 ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_SYN, ICE_RXFLG_RST, 297 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++); 298 ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, 299 ICE_RXFLG_EVLAN_x8100, ICE_RXFLG_EVLAN_x9100, 300 idx++); 301 ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_VLAN_x8100, ICE_RXFLG_TNL_VLAN, 302 ICE_RXFLG_TNL_MAC, ICE_RXFLG_TNL0, idx++); 303 ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2, 304 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx); 305 } 306 307 /** 308 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 309 * @hw: pointer to the hw struct 310 */ 311 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) 312 { 313 struct ice_switch_info *sw; 314 315 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 316 sizeof(*hw->switch_info), GFP_KERNEL); 317 sw = hw->switch_info; 318 319 if (!sw) 320 return ICE_ERR_NO_MEMORY; 321 322 INIT_LIST_HEAD(&sw->vsi_list_map_head); 323 324 mutex_init(&sw->mac_list_lock); 325 INIT_LIST_HEAD(&sw->mac_list_head); 326 327 mutex_init(&sw->vlan_list_lock); 328 INIT_LIST_HEAD(&sw->vlan_list_head); 329 330 mutex_init(&sw->eth_m_list_lock); 331 INIT_LIST_HEAD(&sw->eth_m_list_head); 332 333 mutex_init(&sw->promisc_list_lock); 334 INIT_LIST_HEAD(&sw->promisc_list_head); 335 336 mutex_init(&sw->mac_vlan_list_lock); 337 INIT_LIST_HEAD(&sw->mac_vlan_list_head); 338 339 return 0; 340 } 341 342 /** 343 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 344 * @hw: pointer to the hw struct 345 */ 346 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 347 { 348 struct ice_switch_info *sw = hw->switch_info; 349 struct ice_vsi_list_map_info *v_pos_map; 350 struct ice_vsi_list_map_info *v_tmp_map; 351 352 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 353 list_entry) { 354 list_del(&v_pos_map->list_entry); 355 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 356 } 357 358 mutex_destroy(&sw->mac_list_lock); 359 mutex_destroy(&sw->vlan_list_lock); 360 mutex_destroy(&sw->eth_m_list_lock); 361 mutex_destroy(&sw->promisc_list_lock); 362 mutex_destroy(&sw->mac_vlan_list_lock); 363 364 devm_kfree(ice_hw_to_dev(hw), sw); 365 } 366 367 /** 368 * ice_init_hw - main hardware initialization routine 369 * @hw: pointer to the hardware structure 370 */ 371 enum ice_status ice_init_hw(struct ice_hw *hw) 372 { 373 struct ice_aqc_get_phy_caps_data *pcaps; 374 enum ice_status status; 375 u16 mac_buf_len; 376 void *mac_buf; 377 378 /* Set MAC type based on DeviceID */ 379 status = ice_set_mac_type(hw); 380 if (status) 381 return status; 382 383 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & 384 PF_FUNC_RID_FUNC_NUM_M) >> 385 PF_FUNC_RID_FUNC_NUM_S; 386 387 status = ice_reset(hw, ICE_RESET_PFR); 388 if (status) 389 return status; 390 391 /* set these values to minimum allowed */ 392 hw->itr_gran_200 = ICE_ITR_GRAN_MIN_200; 393 hw->itr_gran_100 = ICE_ITR_GRAN_MIN_100; 394 hw->itr_gran_50 = ICE_ITR_GRAN_MIN_50; 395 hw->itr_gran_25 = ICE_ITR_GRAN_MIN_25; 396 397 status = ice_init_all_ctrlq(hw); 398 if (status) 399 goto err_unroll_cqinit; 400 401 status = ice_clear_pf_cfg(hw); 402 if (status) 403 goto err_unroll_cqinit; 404 405 ice_clear_pxe_mode(hw); 406 407 status = ice_init_nvm(hw); 408 if (status) 409 goto err_unroll_cqinit; 410 411 status = ice_get_caps(hw); 412 if (status) 413 goto err_unroll_cqinit; 414 415 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 416 sizeof(*hw->port_info), GFP_KERNEL); 417 if (!hw->port_info) { 418 status = ICE_ERR_NO_MEMORY; 419 goto err_unroll_cqinit; 420 } 421 422 /* set the back pointer to hw */ 423 hw->port_info->hw = hw; 424 425 /* Initialize port_info struct with switch configuration data */ 426 status = ice_get_initial_sw_cfg(hw); 427 if (status) 428 goto err_unroll_alloc; 429 430 hw->evb_veb = true; 431 432 /* Query the allocated resources for tx scheduler */ 433 status = ice_sched_query_res_alloc(hw); 434 if (status) { 435 ice_debug(hw, ICE_DBG_SCHED, 436 "Failed to get scheduler allocated resources\n"); 437 goto err_unroll_alloc; 438 } 439 440 /* Initialize port_info struct with scheduler data */ 441 status = ice_sched_init_port(hw->port_info); 442 if (status) 443 goto err_unroll_sched; 444 445 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 446 if (!pcaps) { 447 status = ICE_ERR_NO_MEMORY; 448 goto err_unroll_sched; 449 } 450 451 /* Initialize port_info struct with PHY capabilities */ 452 status = ice_aq_get_phy_caps(hw->port_info, false, 453 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); 454 devm_kfree(ice_hw_to_dev(hw), pcaps); 455 if (status) 456 goto err_unroll_sched; 457 458 /* Initialize port_info struct with link information */ 459 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 460 if (status) 461 goto err_unroll_sched; 462 463 status = ice_init_fltr_mgmt_struct(hw); 464 if (status) 465 goto err_unroll_sched; 466 467 /* Get port MAC information */ 468 mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp); 469 mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL); 470 471 if (!mac_buf) 472 goto err_unroll_fltr_mgmt_struct; 473 474 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 475 devm_kfree(ice_hw_to_dev(hw), mac_buf); 476 477 if (status) 478 goto err_unroll_fltr_mgmt_struct; 479 480 ice_init_flex_parser(hw); 481 482 return 0; 483 484 err_unroll_fltr_mgmt_struct: 485 ice_cleanup_fltr_mgmt_struct(hw); 486 err_unroll_sched: 487 ice_sched_cleanup_all(hw); 488 err_unroll_alloc: 489 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 490 err_unroll_cqinit: 491 ice_shutdown_all_ctrlq(hw); 492 return status; 493 } 494 495 /** 496 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 497 * @hw: pointer to the hardware structure 498 */ 499 void ice_deinit_hw(struct ice_hw *hw) 500 { 501 ice_sched_cleanup_all(hw); 502 ice_shutdown_all_ctrlq(hw); 503 504 if (hw->port_info) { 505 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 506 hw->port_info = NULL; 507 } 508 509 ice_cleanup_fltr_mgmt_struct(hw); 510 } 511 512 /** 513 * ice_check_reset - Check to see if a global reset is complete 514 * @hw: pointer to the hardware structure 515 */ 516 enum ice_status ice_check_reset(struct ice_hw *hw) 517 { 518 u32 cnt, reg = 0, grst_delay; 519 520 /* Poll for Device Active state in case a recent CORER, GLOBR, 521 * or EMPR has occurred. The grst delay value is in 100ms units. 522 * Add 1sec for outstanding AQ commands that can take a long time. 523 */ 524 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> 525 GLGEN_RSTCTL_GRSTDEL_S) + 10; 526 527 for (cnt = 0; cnt < grst_delay; cnt++) { 528 mdelay(100); 529 reg = rd32(hw, GLGEN_RSTAT); 530 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 531 break; 532 } 533 534 if (cnt == grst_delay) { 535 ice_debug(hw, ICE_DBG_INIT, 536 "Global reset polling failed to complete.\n"); 537 return ICE_ERR_RESET_FAILED; 538 } 539 540 #define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \ 541 GLNVM_ULD_GLOBR_DONE_M) 542 543 /* Device is Active; check Global Reset processes are done */ 544 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 545 reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK; 546 if (reg == ICE_RESET_DONE_MASK) { 547 ice_debug(hw, ICE_DBG_INIT, 548 "Global reset processes done. %d\n", cnt); 549 break; 550 } 551 mdelay(10); 552 } 553 554 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 555 ice_debug(hw, ICE_DBG_INIT, 556 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 557 reg); 558 return ICE_ERR_RESET_FAILED; 559 } 560 561 return 0; 562 } 563 564 /** 565 * ice_pf_reset - Reset the PF 566 * @hw: pointer to the hardware structure 567 * 568 * If a global reset has been triggered, this function checks 569 * for its completion and then issues the PF reset 570 */ 571 static enum ice_status ice_pf_reset(struct ice_hw *hw) 572 { 573 u32 cnt, reg; 574 575 /* If at function entry a global reset was already in progress, i.e. 576 * state is not 'device active' or any of the reset done bits are not 577 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 578 * global reset is done. 579 */ 580 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 581 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 582 /* poll on global reset currently in progress until done */ 583 if (ice_check_reset(hw)) 584 return ICE_ERR_RESET_FAILED; 585 586 return 0; 587 } 588 589 /* Reset the PF */ 590 reg = rd32(hw, PFGEN_CTRL); 591 592 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 593 594 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 595 reg = rd32(hw, PFGEN_CTRL); 596 if (!(reg & PFGEN_CTRL_PFSWR_M)) 597 break; 598 599 mdelay(1); 600 } 601 602 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 603 ice_debug(hw, ICE_DBG_INIT, 604 "PF reset polling failed to complete.\n"); 605 return ICE_ERR_RESET_FAILED; 606 } 607 608 return 0; 609 } 610 611 /** 612 * ice_reset - Perform different types of reset 613 * @hw: pointer to the hardware structure 614 * @req: reset request 615 * 616 * This function triggers a reset as specified by the req parameter. 617 * 618 * Note: 619 * If anything other than a PF reset is triggered, PXE mode is restored. 620 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 621 * interface has been restored in the rebuild flow. 622 */ 623 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) 624 { 625 u32 val = 0; 626 627 switch (req) { 628 case ICE_RESET_PFR: 629 return ice_pf_reset(hw); 630 case ICE_RESET_CORER: 631 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 632 val = GLGEN_RTRIG_CORER_M; 633 break; 634 case ICE_RESET_GLOBR: 635 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 636 val = GLGEN_RTRIG_GLOBR_M; 637 break; 638 } 639 640 val |= rd32(hw, GLGEN_RTRIG); 641 wr32(hw, GLGEN_RTRIG, val); 642 ice_flush(hw); 643 644 /* wait for the FW to be ready */ 645 return ice_check_reset(hw); 646 } 647 648 /** 649 * ice_copy_rxq_ctx_to_hw 650 * @hw: pointer to the hardware structure 651 * @ice_rxq_ctx: pointer to the rxq context 652 * @rxq_index: the index of the rx queue 653 * 654 * Copies rxq context from dense structure to hw register space 655 */ 656 static enum ice_status 657 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 658 { 659 u8 i; 660 661 if (!ice_rxq_ctx) 662 return ICE_ERR_BAD_PTR; 663 664 if (rxq_index > QRX_CTRL_MAX_INDEX) 665 return ICE_ERR_PARAM; 666 667 /* Copy each dword separately to hw */ 668 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 669 wr32(hw, QRX_CONTEXT(i, rxq_index), 670 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 671 672 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 673 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 674 } 675 676 return 0; 677 } 678 679 /* LAN Rx Queue Context */ 680 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 681 /* Field Width LSB */ 682 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 683 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 684 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 685 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 686 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 687 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 688 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 689 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 690 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 691 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 692 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 693 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 694 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 695 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 696 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 697 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 698 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 699 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 700 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 701 { 0 } 702 }; 703 704 /** 705 * ice_write_rxq_ctx 706 * @hw: pointer to the hardware structure 707 * @rlan_ctx: pointer to the rxq context 708 * @rxq_index: the index of the rx queue 709 * 710 * Converts rxq context from sparse to dense structure and then writes 711 * it to hw register space 712 */ 713 enum ice_status 714 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 715 u32 rxq_index) 716 { 717 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 718 719 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 720 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 721 } 722 723 /* LAN Tx Queue Context */ 724 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 725 /* Field Width LSB */ 726 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 727 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 728 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 729 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 730 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 731 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 732 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 733 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 734 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 735 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 736 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 737 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 738 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 739 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 740 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 741 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 742 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 743 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 744 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 745 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 746 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 747 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 748 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 749 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 750 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 751 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 752 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 110, 171), 753 { 0 } 754 }; 755 756 /** 757 * ice_debug_cq 758 * @hw: pointer to the hardware structure 759 * @mask: debug mask 760 * @desc: pointer to control queue descriptor 761 * @buf: pointer to command buffer 762 * @buf_len: max length of buf 763 * 764 * Dumps debug log about control command with descriptor contents. 765 */ 766 void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, 767 void *buf, u16 buf_len) 768 { 769 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc; 770 u16 len; 771 772 #ifndef CONFIG_DYNAMIC_DEBUG 773 if (!(mask & hw->debug_mask)) 774 return; 775 #endif 776 777 if (!desc) 778 return; 779 780 len = le16_to_cpu(cq_desc->datalen); 781 782 ice_debug(hw, mask, 783 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 784 le16_to_cpu(cq_desc->opcode), 785 le16_to_cpu(cq_desc->flags), 786 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval)); 787 ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", 788 le32_to_cpu(cq_desc->cookie_high), 789 le32_to_cpu(cq_desc->cookie_low)); 790 ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", 791 le32_to_cpu(cq_desc->params.generic.param0), 792 le32_to_cpu(cq_desc->params.generic.param1)); 793 ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", 794 le32_to_cpu(cq_desc->params.generic.addr_high), 795 le32_to_cpu(cq_desc->params.generic.addr_low)); 796 if (buf && cq_desc->datalen != 0) { 797 ice_debug(hw, mask, "Buffer:\n"); 798 if (buf_len < len) 799 len = buf_len; 800 801 ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len); 802 } 803 } 804 805 /* FW Admin Queue command wrappers */ 806 807 /** 808 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 809 * @hw: pointer to the hw struct 810 * @desc: descriptor describing the command 811 * @buf: buffer to use for indirect commands (NULL for direct commands) 812 * @buf_size: size of buffer for indirect commands (0 for direct commands) 813 * @cd: pointer to command details structure 814 * 815 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 816 */ 817 enum ice_status 818 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 819 u16 buf_size, struct ice_sq_cd *cd) 820 { 821 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); 822 } 823 824 /** 825 * ice_aq_get_fw_ver 826 * @hw: pointer to the hw struct 827 * @cd: pointer to command details structure or NULL 828 * 829 * Get the firmware version (0x0001) from the admin queue commands 830 */ 831 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 832 { 833 struct ice_aqc_get_ver *resp; 834 struct ice_aq_desc desc; 835 enum ice_status status; 836 837 resp = &desc.params.get_ver; 838 839 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 840 841 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 842 843 if (!status) { 844 hw->fw_branch = resp->fw_branch; 845 hw->fw_maj_ver = resp->fw_major; 846 hw->fw_min_ver = resp->fw_minor; 847 hw->fw_patch = resp->fw_patch; 848 hw->fw_build = le32_to_cpu(resp->fw_build); 849 hw->api_branch = resp->api_branch; 850 hw->api_maj_ver = resp->api_major; 851 hw->api_min_ver = resp->api_minor; 852 hw->api_patch = resp->api_patch; 853 } 854 855 return status; 856 } 857 858 /** 859 * ice_aq_q_shutdown 860 * @hw: pointer to the hw struct 861 * @unloading: is the driver unloading itself 862 * 863 * Tell the Firmware that we're shutting down the AdminQ and whether 864 * or not the driver is unloading as well (0x0003). 865 */ 866 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 867 { 868 struct ice_aqc_q_shutdown *cmd; 869 struct ice_aq_desc desc; 870 871 cmd = &desc.params.q_shutdown; 872 873 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 874 875 if (unloading) 876 cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING); 877 878 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 879 } 880 881 /** 882 * ice_aq_req_res 883 * @hw: pointer to the hw struct 884 * @res: resource id 885 * @access: access type 886 * @sdp_number: resource number 887 * @timeout: the maximum time in ms that the driver may hold the resource 888 * @cd: pointer to command details structure or NULL 889 * 890 * requests common resource using the admin queue commands (0x0008) 891 */ 892 static enum ice_status 893 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 894 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 895 struct ice_sq_cd *cd) 896 { 897 struct ice_aqc_req_res *cmd_resp; 898 struct ice_aq_desc desc; 899 enum ice_status status; 900 901 cmd_resp = &desc.params.res_owner; 902 903 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 904 905 cmd_resp->res_id = cpu_to_le16(res); 906 cmd_resp->access_type = cpu_to_le16(access); 907 cmd_resp->res_number = cpu_to_le32(sdp_number); 908 909 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 910 /* The completion specifies the maximum time in ms that the driver 911 * may hold the resource in the Timeout field. 912 * If the resource is held by someone else, the command completes with 913 * busy return value and the timeout field indicates the maximum time 914 * the current owner of the resource has to free it. 915 */ 916 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 917 *timeout = le32_to_cpu(cmd_resp->timeout); 918 919 return status; 920 } 921 922 /** 923 * ice_aq_release_res 924 * @hw: pointer to the hw struct 925 * @res: resource id 926 * @sdp_number: resource number 927 * @cd: pointer to command details structure or NULL 928 * 929 * release common resource using the admin queue commands (0x0009) 930 */ 931 static enum ice_status 932 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 933 struct ice_sq_cd *cd) 934 { 935 struct ice_aqc_req_res *cmd; 936 struct ice_aq_desc desc; 937 938 cmd = &desc.params.res_owner; 939 940 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 941 942 cmd->res_id = cpu_to_le16(res); 943 cmd->res_number = cpu_to_le32(sdp_number); 944 945 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 946 } 947 948 /** 949 * ice_acquire_res 950 * @hw: pointer to the HW structure 951 * @res: resource id 952 * @access: access type (read or write) 953 * 954 * This function will attempt to acquire the ownership of a resource. 955 */ 956 enum ice_status 957 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 958 enum ice_aq_res_access_type access) 959 { 960 #define ICE_RES_POLLING_DELAY_MS 10 961 u32 delay = ICE_RES_POLLING_DELAY_MS; 962 enum ice_status status; 963 u32 time_left = 0; 964 u32 timeout; 965 966 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 967 968 /* An admin queue return code of ICE_AQ_RC_EEXIST means that another 969 * driver has previously acquired the resource and performed any 970 * necessary updates; in this case the caller does not obtain the 971 * resource and has no further work to do. 972 */ 973 if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) { 974 status = ICE_ERR_AQ_NO_WORK; 975 goto ice_acquire_res_exit; 976 } 977 978 if (status) 979 ice_debug(hw, ICE_DBG_RES, 980 "resource %d acquire type %d failed.\n", res, access); 981 982 /* If necessary, poll until the current lock owner timeouts */ 983 timeout = time_left; 984 while (status && timeout && time_left) { 985 mdelay(delay); 986 timeout = (timeout > delay) ? timeout - delay : 0; 987 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 988 989 if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) { 990 /* lock free, but no work to do */ 991 status = ICE_ERR_AQ_NO_WORK; 992 break; 993 } 994 995 if (!status) 996 /* lock acquired */ 997 break; 998 } 999 if (status && status != ICE_ERR_AQ_NO_WORK) 1000 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1001 1002 ice_acquire_res_exit: 1003 if (status == ICE_ERR_AQ_NO_WORK) { 1004 if (access == ICE_RES_WRITE) 1005 ice_debug(hw, ICE_DBG_RES, 1006 "resource indicates no work to do.\n"); 1007 else 1008 ice_debug(hw, ICE_DBG_RES, 1009 "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); 1010 } 1011 return status; 1012 } 1013 1014 /** 1015 * ice_release_res 1016 * @hw: pointer to the HW structure 1017 * @res: resource id 1018 * 1019 * This function will release a resource using the proper Admin Command. 1020 */ 1021 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1022 { 1023 enum ice_status status; 1024 u32 total_delay = 0; 1025 1026 status = ice_aq_release_res(hw, res, 0, NULL); 1027 1028 /* there are some rare cases when trying to release the resource 1029 * results in an admin Q timeout, so handle them correctly 1030 */ 1031 while ((status == ICE_ERR_AQ_TIMEOUT) && 1032 (total_delay < hw->adminq.sq_cmd_timeout)) { 1033 mdelay(1); 1034 status = ice_aq_release_res(hw, res, 0, NULL); 1035 total_delay++; 1036 } 1037 } 1038 1039 /** 1040 * ice_parse_caps - parse function/device capabilities 1041 * @hw: pointer to the hw struct 1042 * @buf: pointer to a buffer containing function/device capability records 1043 * @cap_count: number of capability records in the list 1044 * @opc: type of capabilities list to parse 1045 * 1046 * Helper function to parse function(0x000a)/device(0x000b) capabilities list. 1047 */ 1048 static void 1049 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, 1050 enum ice_adminq_opc opc) 1051 { 1052 struct ice_aqc_list_caps_elem *cap_resp; 1053 struct ice_hw_func_caps *func_p = NULL; 1054 struct ice_hw_dev_caps *dev_p = NULL; 1055 struct ice_hw_common_caps *caps; 1056 u32 i; 1057 1058 if (!buf) 1059 return; 1060 1061 cap_resp = (struct ice_aqc_list_caps_elem *)buf; 1062 1063 if (opc == ice_aqc_opc_list_dev_caps) { 1064 dev_p = &hw->dev_caps; 1065 caps = &dev_p->common_cap; 1066 } else if (opc == ice_aqc_opc_list_func_caps) { 1067 func_p = &hw->func_caps; 1068 caps = &func_p->common_cap; 1069 } else { 1070 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n"); 1071 return; 1072 } 1073 1074 for (i = 0; caps && i < cap_count; i++, cap_resp++) { 1075 u32 logical_id = le32_to_cpu(cap_resp->logical_id); 1076 u32 phys_id = le32_to_cpu(cap_resp->phys_id); 1077 u32 number = le32_to_cpu(cap_resp->number); 1078 u16 cap = le16_to_cpu(cap_resp->cap); 1079 1080 switch (cap) { 1081 case ICE_AQC_CAPS_VSI: 1082 if (dev_p) { 1083 dev_p->num_vsi_allocd_to_host = number; 1084 ice_debug(hw, ICE_DBG_INIT, 1085 "HW caps: Dev.VSI cnt = %d\n", 1086 dev_p->num_vsi_allocd_to_host); 1087 } else if (func_p) { 1088 func_p->guaranteed_num_vsi = number; 1089 ice_debug(hw, ICE_DBG_INIT, 1090 "HW caps: Func.VSI cnt = %d\n", 1091 func_p->guaranteed_num_vsi); 1092 } 1093 break; 1094 case ICE_AQC_CAPS_RSS: 1095 caps->rss_table_size = number; 1096 caps->rss_table_entry_width = logical_id; 1097 ice_debug(hw, ICE_DBG_INIT, 1098 "HW caps: RSS table size = %d\n", 1099 caps->rss_table_size); 1100 ice_debug(hw, ICE_DBG_INIT, 1101 "HW caps: RSS table width = %d\n", 1102 caps->rss_table_entry_width); 1103 break; 1104 case ICE_AQC_CAPS_RXQS: 1105 caps->num_rxq = number; 1106 caps->rxq_first_id = phys_id; 1107 ice_debug(hw, ICE_DBG_INIT, 1108 "HW caps: Num Rx Qs = %d\n", caps->num_rxq); 1109 ice_debug(hw, ICE_DBG_INIT, 1110 "HW caps: Rx first queue ID = %d\n", 1111 caps->rxq_first_id); 1112 break; 1113 case ICE_AQC_CAPS_TXQS: 1114 caps->num_txq = number; 1115 caps->txq_first_id = phys_id; 1116 ice_debug(hw, ICE_DBG_INIT, 1117 "HW caps: Num Tx Qs = %d\n", caps->num_txq); 1118 ice_debug(hw, ICE_DBG_INIT, 1119 "HW caps: Tx first queue ID = %d\n", 1120 caps->txq_first_id); 1121 break; 1122 case ICE_AQC_CAPS_MSIX: 1123 caps->num_msix_vectors = number; 1124 caps->msix_vector_first_id = phys_id; 1125 ice_debug(hw, ICE_DBG_INIT, 1126 "HW caps: MSIX vector count = %d\n", 1127 caps->num_msix_vectors); 1128 ice_debug(hw, ICE_DBG_INIT, 1129 "HW caps: MSIX first vector index = %d\n", 1130 caps->msix_vector_first_id); 1131 break; 1132 case ICE_AQC_CAPS_MAX_MTU: 1133 caps->max_mtu = number; 1134 if (dev_p) 1135 ice_debug(hw, ICE_DBG_INIT, 1136 "HW caps: Dev.MaxMTU = %d\n", 1137 caps->max_mtu); 1138 else if (func_p) 1139 ice_debug(hw, ICE_DBG_INIT, 1140 "HW caps: func.MaxMTU = %d\n", 1141 caps->max_mtu); 1142 break; 1143 default: 1144 ice_debug(hw, ICE_DBG_INIT, 1145 "HW caps: Unknown capability[%d]: 0x%x\n", i, 1146 cap); 1147 break; 1148 } 1149 } 1150 } 1151 1152 /** 1153 * ice_aq_discover_caps - query function/device capabilities 1154 * @hw: pointer to the hw struct 1155 * @buf: a virtual buffer to hold the capabilities 1156 * @buf_size: Size of the virtual buffer 1157 * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM 1158 * @opc: capabilities type to discover - pass in the command opcode 1159 * @cd: pointer to command details structure or NULL 1160 * 1161 * Get the function(0x000a)/device(0x000b) capabilities description from 1162 * the firmware. 1163 */ 1164 static enum ice_status 1165 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size, 1166 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 1167 { 1168 struct ice_aqc_list_caps *cmd; 1169 struct ice_aq_desc desc; 1170 enum ice_status status; 1171 1172 cmd = &desc.params.get_cap; 1173 1174 if (opc != ice_aqc_opc_list_func_caps && 1175 opc != ice_aqc_opc_list_dev_caps) 1176 return ICE_ERR_PARAM; 1177 1178 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1179 1180 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 1181 if (!status) 1182 ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc); 1183 *data_size = le16_to_cpu(desc.datalen); 1184 1185 return status; 1186 } 1187 1188 /** 1189 * ice_get_caps - get info about the HW 1190 * @hw: pointer to the hardware structure 1191 */ 1192 enum ice_status ice_get_caps(struct ice_hw *hw) 1193 { 1194 enum ice_status status; 1195 u16 data_size = 0; 1196 u16 cbuf_len; 1197 u8 retries; 1198 1199 /* The driver doesn't know how many capabilities the device will return 1200 * so the buffer size required isn't known ahead of time. The driver 1201 * starts with cbuf_len and if this turns out to be insufficient, the 1202 * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs. 1203 * The driver then allocates the buffer of this size and retries the 1204 * operation. So it follows that the retry count is 2. 1205 */ 1206 #define ICE_GET_CAP_BUF_COUNT 40 1207 #define ICE_GET_CAP_RETRY_COUNT 2 1208 1209 cbuf_len = ICE_GET_CAP_BUF_COUNT * 1210 sizeof(struct ice_aqc_list_caps_elem); 1211 1212 retries = ICE_GET_CAP_RETRY_COUNT; 1213 1214 do { 1215 void *cbuf; 1216 1217 cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL); 1218 if (!cbuf) 1219 return ICE_ERR_NO_MEMORY; 1220 1221 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size, 1222 ice_aqc_opc_list_func_caps, NULL); 1223 devm_kfree(ice_hw_to_dev(hw), cbuf); 1224 1225 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM) 1226 break; 1227 1228 /* If ENOMEM is returned, try again with bigger buffer */ 1229 cbuf_len = data_size; 1230 } while (--retries); 1231 1232 return status; 1233 } 1234 1235 /** 1236 * ice_aq_clear_pxe_mode 1237 * @hw: pointer to the hw struct 1238 * 1239 * Tell the firmware that the driver is taking over from PXE (0x0110). 1240 */ 1241 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) 1242 { 1243 struct ice_aq_desc desc; 1244 1245 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 1246 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 1247 1248 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1249 } 1250 1251 /** 1252 * ice_clear_pxe_mode - clear pxe operations mode 1253 * @hw: pointer to the hw struct 1254 * 1255 * Make sure all PXE mode settings are cleared, including things 1256 * like descriptor fetch/write-back mode. 1257 */ 1258 void ice_clear_pxe_mode(struct ice_hw *hw) 1259 { 1260 if (ice_check_sq_alive(hw, &hw->adminq)) 1261 ice_aq_clear_pxe_mode(hw); 1262 } 1263 1264 /** 1265 * ice_aq_set_phy_cfg 1266 * @hw: pointer to the hw struct 1267 * @lport: logical port number 1268 * @cfg: structure with PHY configuration data to be set 1269 * @cd: pointer to command details structure or NULL 1270 * 1271 * Set the various PHY configuration parameters supported on the Port. 1272 * One or more of the Set PHY config parameters may be ignored in an MFP 1273 * mode as the PF may not have the privilege to set some of the PHY Config 1274 * parameters. This status will be indicated by the command response (0x0601). 1275 */ 1276 static enum ice_status 1277 ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, 1278 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 1279 { 1280 struct ice_aqc_set_phy_cfg *cmd; 1281 struct ice_aq_desc desc; 1282 1283 if (!cfg) 1284 return ICE_ERR_PARAM; 1285 1286 cmd = &desc.params.set_phy; 1287 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 1288 cmd->lport_num = lport; 1289 1290 return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 1291 } 1292 1293 /** 1294 * ice_update_link_info - update status of the HW network link 1295 * @pi: port info structure of the interested logical port 1296 */ 1297 static enum ice_status 1298 ice_update_link_info(struct ice_port_info *pi) 1299 { 1300 struct ice_aqc_get_phy_caps_data *pcaps; 1301 struct ice_phy_info *phy_info; 1302 enum ice_status status; 1303 struct ice_hw *hw; 1304 1305 if (!pi) 1306 return ICE_ERR_PARAM; 1307 1308 hw = pi->hw; 1309 1310 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 1311 if (!pcaps) 1312 return ICE_ERR_NO_MEMORY; 1313 1314 phy_info = &pi->phy; 1315 status = ice_aq_get_link_info(pi, true, NULL, NULL); 1316 if (status) 1317 goto out; 1318 1319 if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 1320 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, 1321 pcaps, NULL); 1322 if (status) 1323 goto out; 1324 1325 memcpy(phy_info->link_info.module_type, &pcaps->module_type, 1326 sizeof(phy_info->link_info.module_type)); 1327 } 1328 out: 1329 devm_kfree(ice_hw_to_dev(hw), pcaps); 1330 return status; 1331 } 1332 1333 /** 1334 * ice_set_fc 1335 * @pi: port information structure 1336 * @aq_failures: pointer to status code, specific to ice_set_fc routine 1337 * @atomic_restart: enable automatic link update 1338 * 1339 * Set the requested flow control mode. 1340 */ 1341 enum ice_status 1342 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart) 1343 { 1344 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 1345 struct ice_aqc_get_phy_caps_data *pcaps; 1346 enum ice_status status; 1347 u8 pause_mask = 0x0; 1348 struct ice_hw *hw; 1349 1350 if (!pi) 1351 return ICE_ERR_PARAM; 1352 hw = pi->hw; 1353 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE; 1354 1355 switch (pi->fc.req_mode) { 1356 case ICE_FC_FULL: 1357 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 1358 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 1359 break; 1360 case ICE_FC_RX_PAUSE: 1361 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 1362 break; 1363 case ICE_FC_TX_PAUSE: 1364 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 1365 break; 1366 default: 1367 break; 1368 } 1369 1370 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 1371 if (!pcaps) 1372 return ICE_ERR_NO_MEMORY; 1373 1374 /* Get the current phy config */ 1375 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, 1376 NULL); 1377 if (status) { 1378 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 1379 goto out; 1380 } 1381 1382 /* clear the old pause settings */ 1383 cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 1384 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 1385 /* set the new capabilities */ 1386 cfg.caps |= pause_mask; 1387 /* If the capabilities have changed, then set the new config */ 1388 if (cfg.caps != pcaps->caps) { 1389 int retry_count, retry_max = 10; 1390 1391 /* Auto restart link so settings take effect */ 1392 if (atomic_restart) 1393 cfg.caps |= ICE_AQ_PHY_ENA_ATOMIC_LINK; 1394 /* Copy over all the old settings */ 1395 cfg.phy_type_low = pcaps->phy_type_low; 1396 cfg.low_power_ctrl = pcaps->low_power_ctrl; 1397 cfg.eee_cap = pcaps->eee_cap; 1398 cfg.eeer_value = pcaps->eeer_value; 1399 cfg.link_fec_opt = pcaps->link_fec_options; 1400 1401 status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL); 1402 if (status) { 1403 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 1404 goto out; 1405 } 1406 1407 /* Update the link info 1408 * It sometimes takes a really long time for link to 1409 * come back from the atomic reset. Thus, we wait a 1410 * little bit. 1411 */ 1412 for (retry_count = 0; retry_count < retry_max; retry_count++) { 1413 status = ice_update_link_info(pi); 1414 1415 if (!status) 1416 break; 1417 1418 mdelay(100); 1419 } 1420 1421 if (status) 1422 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 1423 } 1424 1425 out: 1426 devm_kfree(ice_hw_to_dev(hw), pcaps); 1427 return status; 1428 } 1429 1430 /** 1431 * ice_get_link_status - get status of the HW network link 1432 * @pi: port information structure 1433 * @link_up: pointer to bool (true/false = linkup/linkdown) 1434 * 1435 * Variable link_up is true if link is up, false if link is down. 1436 * The variable link_up is invalid if status is non zero. As a 1437 * result of this call, link status reporting becomes enabled 1438 */ 1439 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) 1440 { 1441 struct ice_phy_info *phy_info; 1442 enum ice_status status = 0; 1443 1444 if (!pi) 1445 return ICE_ERR_PARAM; 1446 1447 phy_info = &pi->phy; 1448 1449 if (phy_info->get_link_info) { 1450 status = ice_update_link_info(pi); 1451 1452 if (status) 1453 ice_debug(pi->hw, ICE_DBG_LINK, 1454 "get link status error, status = %d\n", 1455 status); 1456 } 1457 1458 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 1459 1460 return status; 1461 } 1462 1463 /** 1464 * ice_aq_set_link_restart_an 1465 * @pi: pointer to the port information structure 1466 * @ena_link: if true: enable link, if false: disable link 1467 * @cd: pointer to command details structure or NULL 1468 * 1469 * Sets up the link and restarts the Auto-Negotiation over the link. 1470 */ 1471 enum ice_status 1472 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 1473 struct ice_sq_cd *cd) 1474 { 1475 struct ice_aqc_restart_an *cmd; 1476 struct ice_aq_desc desc; 1477 1478 cmd = &desc.params.restart_an; 1479 1480 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 1481 1482 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 1483 cmd->lport_num = pi->lport; 1484 if (ena_link) 1485 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 1486 else 1487 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 1488 1489 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 1490 } 1491 1492 /** 1493 * ice_aq_set_event_mask 1494 * @hw: pointer to the hw struct 1495 * @port_num: port number of the physical function 1496 * @mask: event mask to be set 1497 * @cd: pointer to command details structure or NULL 1498 * 1499 * Set event mask (0x0613) 1500 */ 1501 enum ice_status 1502 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 1503 struct ice_sq_cd *cd) 1504 { 1505 struct ice_aqc_set_event_mask *cmd; 1506 struct ice_aq_desc desc; 1507 1508 cmd = &desc.params.set_event_mask; 1509 1510 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 1511 1512 cmd->lport_num = port_num; 1513 1514 cmd->event_mask = cpu_to_le16(mask); 1515 1516 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1517 } 1518 1519 /** 1520 * __ice_aq_get_set_rss_lut 1521 * @hw: pointer to the hardware structure 1522 * @vsi_id: VSI FW index 1523 * @lut_type: LUT table type 1524 * @lut: pointer to the LUT buffer provided by the caller 1525 * @lut_size: size of the LUT buffer 1526 * @glob_lut_idx: global LUT index 1527 * @set: set true to set the table, false to get the table 1528 * 1529 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 1530 */ 1531 static enum ice_status 1532 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, 1533 u16 lut_size, u8 glob_lut_idx, bool set) 1534 { 1535 struct ice_aqc_get_set_rss_lut *cmd_resp; 1536 struct ice_aq_desc desc; 1537 enum ice_status status; 1538 u16 flags = 0; 1539 1540 cmd_resp = &desc.params.get_set_rss_lut; 1541 1542 if (set) { 1543 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut); 1544 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1545 } else { 1546 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut); 1547 } 1548 1549 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 1550 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) & 1551 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) | 1552 ICE_AQC_GSET_RSS_LUT_VSI_VALID); 1553 1554 switch (lut_type) { 1555 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI: 1556 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF: 1557 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL: 1558 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) & 1559 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); 1560 break; 1561 default: 1562 status = ICE_ERR_PARAM; 1563 goto ice_aq_get_set_rss_lut_exit; 1564 } 1565 1566 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) { 1567 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) & 1568 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M); 1569 1570 if (!set) 1571 goto ice_aq_get_set_rss_lut_send; 1572 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 1573 if (!set) 1574 goto ice_aq_get_set_rss_lut_send; 1575 } else { 1576 goto ice_aq_get_set_rss_lut_send; 1577 } 1578 1579 /* LUT size is only valid for Global and PF table types */ 1580 if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) { 1581 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG << 1582 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 1583 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 1584 } else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) { 1585 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << 1586 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 1587 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 1588 } else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) && 1589 (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) { 1590 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << 1591 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 1592 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 1593 } else { 1594 status = ICE_ERR_PARAM; 1595 goto ice_aq_get_set_rss_lut_exit; 1596 } 1597 1598 ice_aq_get_set_rss_lut_send: 1599 cmd_resp->flags = cpu_to_le16(flags); 1600 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 1601 1602 ice_aq_get_set_rss_lut_exit: 1603 return status; 1604 } 1605 1606 /** 1607 * ice_aq_get_rss_lut 1608 * @hw: pointer to the hardware structure 1609 * @vsi_id: VSI FW index 1610 * @lut_type: LUT table type 1611 * @lut: pointer to the LUT buffer provided by the caller 1612 * @lut_size: size of the LUT buffer 1613 * 1614 * get the RSS lookup table, PF or VSI type 1615 */ 1616 enum ice_status 1617 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, 1618 u16 lut_size) 1619 { 1620 return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0, 1621 false); 1622 } 1623 1624 /** 1625 * ice_aq_set_rss_lut 1626 * @hw: pointer to the hardware structure 1627 * @vsi_id: VSI FW index 1628 * @lut_type: LUT table type 1629 * @lut: pointer to the LUT buffer provided by the caller 1630 * @lut_size: size of the LUT buffer 1631 * 1632 * set the RSS lookup table, PF or VSI type 1633 */ 1634 enum ice_status 1635 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, 1636 u16 lut_size) 1637 { 1638 return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0, 1639 true); 1640 } 1641 1642 /** 1643 * __ice_aq_get_set_rss_key 1644 * @hw: pointer to the hw struct 1645 * @vsi_id: VSI FW index 1646 * @key: pointer to key info struct 1647 * @set: set true to set the key, false to get the key 1648 * 1649 * get (0x0B04) or set (0x0B02) the RSS key per VSI 1650 */ 1651 static enum 1652 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 1653 struct ice_aqc_get_set_rss_keys *key, 1654 bool set) 1655 { 1656 struct ice_aqc_get_set_rss_key *cmd_resp; 1657 u16 key_size = sizeof(*key); 1658 struct ice_aq_desc desc; 1659 1660 cmd_resp = &desc.params.get_set_rss_key; 1661 1662 if (set) { 1663 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 1664 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1665 } else { 1666 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 1667 } 1668 1669 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 1670 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) & 1671 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) | 1672 ICE_AQC_GSET_RSS_KEY_VSI_VALID); 1673 1674 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 1675 } 1676 1677 /** 1678 * ice_aq_get_rss_key 1679 * @hw: pointer to the hw struct 1680 * @vsi_id: VSI FW index 1681 * @key: pointer to key info struct 1682 * 1683 * get the RSS key per VSI 1684 */ 1685 enum ice_status 1686 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id, 1687 struct ice_aqc_get_set_rss_keys *key) 1688 { 1689 return __ice_aq_get_set_rss_key(hw, vsi_id, key, false); 1690 } 1691 1692 /** 1693 * ice_aq_set_rss_key 1694 * @hw: pointer to the hw struct 1695 * @vsi_id: VSI FW index 1696 * @keys: pointer to key info struct 1697 * 1698 * set the RSS key per VSI 1699 */ 1700 enum ice_status 1701 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id, 1702 struct ice_aqc_get_set_rss_keys *keys) 1703 { 1704 return __ice_aq_get_set_rss_key(hw, vsi_id, keys, true); 1705 } 1706 1707 /** 1708 * ice_aq_add_lan_txq 1709 * @hw: pointer to the hardware structure 1710 * @num_qgrps: Number of added queue groups 1711 * @qg_list: list of queue groups to be added 1712 * @buf_size: size of buffer for indirect command 1713 * @cd: pointer to command details structure or NULL 1714 * 1715 * Add Tx LAN queue (0x0C30) 1716 * 1717 * NOTE: 1718 * Prior to calling add Tx LAN queue: 1719 * Initialize the following as part of the Tx queue context: 1720 * Completion queue ID if the queue uses Completion queue, Quanta profile, 1721 * Cache profile and Packet shaper profile. 1722 * 1723 * After add Tx LAN queue AQ command is completed: 1724 * Interrupts should be associated with specific queues, 1725 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 1726 * flow. 1727 */ 1728 static enum ice_status 1729 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 1730 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 1731 struct ice_sq_cd *cd) 1732 { 1733 u16 i, sum_header_size, sum_q_size = 0; 1734 struct ice_aqc_add_tx_qgrp *list; 1735 struct ice_aqc_add_txqs *cmd; 1736 struct ice_aq_desc desc; 1737 1738 cmd = &desc.params.add_txqs; 1739 1740 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 1741 1742 if (!qg_list) 1743 return ICE_ERR_PARAM; 1744 1745 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 1746 return ICE_ERR_PARAM; 1747 1748 sum_header_size = num_qgrps * 1749 (sizeof(*qg_list) - sizeof(*qg_list->txqs)); 1750 1751 list = qg_list; 1752 for (i = 0; i < num_qgrps; i++) { 1753 struct ice_aqc_add_txqs_perq *q = list->txqs; 1754 1755 sum_q_size += list->num_txqs * sizeof(*q); 1756 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs); 1757 } 1758 1759 if (buf_size != (sum_header_size + sum_q_size)) 1760 return ICE_ERR_PARAM; 1761 1762 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1763 1764 cmd->num_qgrps = num_qgrps; 1765 1766 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 1767 } 1768 1769 /** 1770 * ice_aq_dis_lan_txq 1771 * @hw: pointer to the hardware structure 1772 * @num_qgrps: number of groups in the list 1773 * @qg_list: the list of groups to disable 1774 * @buf_size: the total size of the qg_list buffer in bytes 1775 * @cd: pointer to command details structure or NULL 1776 * 1777 * Disable LAN Tx queue (0x0C31) 1778 */ 1779 static enum ice_status 1780 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 1781 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 1782 struct ice_sq_cd *cd) 1783 { 1784 struct ice_aqc_dis_txqs *cmd; 1785 struct ice_aq_desc desc; 1786 u16 i, sz = 0; 1787 1788 cmd = &desc.params.dis_txqs; 1789 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 1790 1791 if (!qg_list) 1792 return ICE_ERR_PARAM; 1793 1794 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 1795 return ICE_ERR_PARAM; 1796 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1797 cmd->num_entries = num_qgrps; 1798 1799 for (i = 0; i < num_qgrps; ++i) { 1800 /* Calculate the size taken up by the queue IDs in this group */ 1801 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id); 1802 1803 /* Add the size of the group header */ 1804 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id); 1805 1806 /* If the num of queues is even, add 2 bytes of padding */ 1807 if ((qg_list[i].num_qs % 2) == 0) 1808 sz += 2; 1809 } 1810 1811 if (buf_size != sz) 1812 return ICE_ERR_PARAM; 1813 1814 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 1815 } 1816 1817 /* End of FW Admin Queue command wrappers */ 1818 1819 /** 1820 * ice_write_byte - write a byte to a packed context structure 1821 * @src_ctx: the context structure to read from 1822 * @dest_ctx: the context to be written to 1823 * @ce_info: a description of the struct to be filled 1824 */ 1825 static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx, 1826 const struct ice_ctx_ele *ce_info) 1827 { 1828 u8 src_byte, dest_byte, mask; 1829 u8 *from, *dest; 1830 u16 shift_width; 1831 1832 /* copy from the next struct field */ 1833 from = src_ctx + ce_info->offset; 1834 1835 /* prepare the bits and mask */ 1836 shift_width = ce_info->lsb % 8; 1837 mask = (u8)(BIT(ce_info->width) - 1); 1838 1839 src_byte = *from; 1840 src_byte &= mask; 1841 1842 /* shift to correct alignment */ 1843 mask <<= shift_width; 1844 src_byte <<= shift_width; 1845 1846 /* get the current bits from the target bit string */ 1847 dest = dest_ctx + (ce_info->lsb / 8); 1848 1849 memcpy(&dest_byte, dest, sizeof(dest_byte)); 1850 1851 dest_byte &= ~mask; /* get the bits not changing */ 1852 dest_byte |= src_byte; /* add in the new bits */ 1853 1854 /* put it all back */ 1855 memcpy(dest, &dest_byte, sizeof(dest_byte)); 1856 } 1857 1858 /** 1859 * ice_write_word - write a word to a packed context structure 1860 * @src_ctx: the context structure to read from 1861 * @dest_ctx: the context to be written to 1862 * @ce_info: a description of the struct to be filled 1863 */ 1864 static void ice_write_word(u8 *src_ctx, u8 *dest_ctx, 1865 const struct ice_ctx_ele *ce_info) 1866 { 1867 u16 src_word, mask; 1868 __le16 dest_word; 1869 u8 *from, *dest; 1870 u16 shift_width; 1871 1872 /* copy from the next struct field */ 1873 from = src_ctx + ce_info->offset; 1874 1875 /* prepare the bits and mask */ 1876 shift_width = ce_info->lsb % 8; 1877 mask = BIT(ce_info->width) - 1; 1878 1879 /* don't swizzle the bits until after the mask because the mask bits 1880 * will be in a different bit position on big endian machines 1881 */ 1882 src_word = *(u16 *)from; 1883 src_word &= mask; 1884 1885 /* shift to correct alignment */ 1886 mask <<= shift_width; 1887 src_word <<= shift_width; 1888 1889 /* get the current bits from the target bit string */ 1890 dest = dest_ctx + (ce_info->lsb / 8); 1891 1892 memcpy(&dest_word, dest, sizeof(dest_word)); 1893 1894 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 1895 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 1896 1897 /* put it all back */ 1898 memcpy(dest, &dest_word, sizeof(dest_word)); 1899 } 1900 1901 /** 1902 * ice_write_dword - write a dword to a packed context structure 1903 * @src_ctx: the context structure to read from 1904 * @dest_ctx: the context to be written to 1905 * @ce_info: a description of the struct to be filled 1906 */ 1907 static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx, 1908 const struct ice_ctx_ele *ce_info) 1909 { 1910 u32 src_dword, mask; 1911 __le32 dest_dword; 1912 u8 *from, *dest; 1913 u16 shift_width; 1914 1915 /* copy from the next struct field */ 1916 from = src_ctx + ce_info->offset; 1917 1918 /* prepare the bits and mask */ 1919 shift_width = ce_info->lsb % 8; 1920 1921 /* if the field width is exactly 32 on an x86 machine, then the shift 1922 * operation will not work because the SHL instructions count is masked 1923 * to 5 bits so the shift will do nothing 1924 */ 1925 if (ce_info->width < 32) 1926 mask = BIT(ce_info->width) - 1; 1927 else 1928 mask = (u32)~0; 1929 1930 /* don't swizzle the bits until after the mask because the mask bits 1931 * will be in a different bit position on big endian machines 1932 */ 1933 src_dword = *(u32 *)from; 1934 src_dword &= mask; 1935 1936 /* shift to correct alignment */ 1937 mask <<= shift_width; 1938 src_dword <<= shift_width; 1939 1940 /* get the current bits from the target bit string */ 1941 dest = dest_ctx + (ce_info->lsb / 8); 1942 1943 memcpy(&dest_dword, dest, sizeof(dest_dword)); 1944 1945 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 1946 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 1947 1948 /* put it all back */ 1949 memcpy(dest, &dest_dword, sizeof(dest_dword)); 1950 } 1951 1952 /** 1953 * ice_write_qword - write a qword to a packed context structure 1954 * @src_ctx: the context structure to read from 1955 * @dest_ctx: the context to be written to 1956 * @ce_info: a description of the struct to be filled 1957 */ 1958 static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx, 1959 const struct ice_ctx_ele *ce_info) 1960 { 1961 u64 src_qword, mask; 1962 __le64 dest_qword; 1963 u8 *from, *dest; 1964 u16 shift_width; 1965 1966 /* copy from the next struct field */ 1967 from = src_ctx + ce_info->offset; 1968 1969 /* prepare the bits and mask */ 1970 shift_width = ce_info->lsb % 8; 1971 1972 /* if the field width is exactly 64 on an x86 machine, then the shift 1973 * operation will not work because the SHL instructions count is masked 1974 * to 6 bits so the shift will do nothing 1975 */ 1976 if (ce_info->width < 64) 1977 mask = BIT_ULL(ce_info->width) - 1; 1978 else 1979 mask = (u64)~0; 1980 1981 /* don't swizzle the bits until after the mask because the mask bits 1982 * will be in a different bit position on big endian machines 1983 */ 1984 src_qword = *(u64 *)from; 1985 src_qword &= mask; 1986 1987 /* shift to correct alignment */ 1988 mask <<= shift_width; 1989 src_qword <<= shift_width; 1990 1991 /* get the current bits from the target bit string */ 1992 dest = dest_ctx + (ce_info->lsb / 8); 1993 1994 memcpy(&dest_qword, dest, sizeof(dest_qword)); 1995 1996 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 1997 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 1998 1999 /* put it all back */ 2000 memcpy(dest, &dest_qword, sizeof(dest_qword)); 2001 } 2002 2003 /** 2004 * ice_set_ctx - set context bits in packed structure 2005 * @src_ctx: pointer to a generic non-packed context structure 2006 * @dest_ctx: pointer to memory for the packed structure 2007 * @ce_info: a description of the structure to be transformed 2008 */ 2009 enum ice_status 2010 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 2011 { 2012 int f; 2013 2014 for (f = 0; ce_info[f].width; f++) { 2015 /* We have to deal with each element of the FW response 2016 * using the correct size so that we are correct regardless 2017 * of the endianness of the machine. 2018 */ 2019 switch (ce_info[f].size_of) { 2020 case sizeof(u8): 2021 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); 2022 break; 2023 case sizeof(u16): 2024 ice_write_word(src_ctx, dest_ctx, &ce_info[f]); 2025 break; 2026 case sizeof(u32): 2027 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); 2028 break; 2029 case sizeof(u64): 2030 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); 2031 break; 2032 default: 2033 return ICE_ERR_INVAL_SIZE; 2034 } 2035 } 2036 2037 return 0; 2038 } 2039 2040 /** 2041 * ice_ena_vsi_txq 2042 * @pi: port information structure 2043 * @vsi_id: VSI id 2044 * @tc: tc number 2045 * @num_qgrps: Number of added queue groups 2046 * @buf: list of queue groups to be added 2047 * @buf_size: size of buffer for indirect command 2048 * @cd: pointer to command details structure or NULL 2049 * 2050 * This function adds one lan q 2051 */ 2052 enum ice_status 2053 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps, 2054 struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 2055 struct ice_sq_cd *cd) 2056 { 2057 struct ice_aqc_txsched_elem_data node = { 0 }; 2058 struct ice_sched_node *parent; 2059 enum ice_status status; 2060 struct ice_hw *hw; 2061 2062 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 2063 return ICE_ERR_CFG; 2064 2065 if (num_qgrps > 1 || buf->num_txqs > 1) 2066 return ICE_ERR_MAX_LIMIT; 2067 2068 hw = pi->hw; 2069 2070 mutex_lock(&pi->sched_lock); 2071 2072 /* find a parent node */ 2073 parent = ice_sched_get_free_qparent(pi, vsi_id, tc, 2074 ICE_SCHED_NODE_OWNER_LAN); 2075 if (!parent) { 2076 status = ICE_ERR_PARAM; 2077 goto ena_txq_exit; 2078 } 2079 buf->parent_teid = parent->info.node_teid; 2080 node.parent_teid = parent->info.node_teid; 2081 /* Mark that the values in the "generic" section as valid. The default 2082 * value in the "generic" section is zero. This means that : 2083 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 2084 * - 0 priority among siblings, indicated by Bit 1-3. 2085 * - WFQ, indicated by Bit 4. 2086 * - 0 Adjustment value is used in PSM credit update flow, indicated by 2087 * Bit 5-6. 2088 * - Bit 7 is reserved. 2089 * Without setting the generic section as valid in valid_sections, the 2090 * Admin Q command will fail with error code ICE_AQ_RC_EINVAL. 2091 */ 2092 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC; 2093 2094 /* add the lan q */ 2095 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 2096 if (status) 2097 goto ena_txq_exit; 2098 2099 node.node_teid = buf->txqs[0].q_teid; 2100 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 2101 2102 /* add a leaf node into schduler tree q layer */ 2103 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); 2104 2105 ena_txq_exit: 2106 mutex_unlock(&pi->sched_lock); 2107 return status; 2108 } 2109 2110 /** 2111 * ice_dis_vsi_txq 2112 * @pi: port information structure 2113 * @num_queues: number of queues 2114 * @q_ids: pointer to the q_id array 2115 * @q_teids: pointer to queue node teids 2116 * @cd: pointer to command details structure or NULL 2117 * 2118 * This function removes queues and their corresponding nodes in SW DB 2119 */ 2120 enum ice_status 2121 ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, 2122 u32 *q_teids, struct ice_sq_cd *cd) 2123 { 2124 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 2125 struct ice_aqc_dis_txq_item qg_list; 2126 u16 i; 2127 2128 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 2129 return ICE_ERR_CFG; 2130 2131 mutex_lock(&pi->sched_lock); 2132 2133 for (i = 0; i < num_queues; i++) { 2134 struct ice_sched_node *node; 2135 2136 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 2137 if (!node) 2138 continue; 2139 qg_list.parent_teid = node->info.parent_teid; 2140 qg_list.num_qs = 1; 2141 qg_list.q_id[0] = cpu_to_le16(q_ids[i]); 2142 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list, 2143 sizeof(qg_list), cd); 2144 2145 if (status) 2146 break; 2147 ice_free_sched_node(pi, node); 2148 } 2149 mutex_unlock(&pi->sched_lock); 2150 return status; 2151 } 2152 2153 /** 2154 * ice_cfg_vsi_qs - configure the new/exisiting VSI queues 2155 * @pi: port information structure 2156 * @vsi_id: VSI Id 2157 * @tc_bitmap: TC bitmap 2158 * @maxqs: max queues array per TC 2159 * @owner: lan or rdma 2160 * 2161 * This function adds/updates the VSI queues per TC. 2162 */ 2163 static enum ice_status 2164 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap, 2165 u16 *maxqs, u8 owner) 2166 { 2167 enum ice_status status = 0; 2168 u8 i; 2169 2170 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 2171 return ICE_ERR_CFG; 2172 2173 mutex_lock(&pi->sched_lock); 2174 2175 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { 2176 /* configuration is possible only if TC node is present */ 2177 if (!ice_sched_get_tc_node(pi, i)) 2178 continue; 2179 2180 status = ice_sched_cfg_vsi(pi, vsi_id, i, maxqs[i], owner, 2181 ice_is_tc_ena(tc_bitmap, i)); 2182 if (status) 2183 break; 2184 } 2185 2186 mutex_unlock(&pi->sched_lock); 2187 return status; 2188 } 2189 2190 /** 2191 * ice_cfg_vsi_lan - configure VSI lan queues 2192 * @pi: port information structure 2193 * @vsi_id: VSI Id 2194 * @tc_bitmap: TC bitmap 2195 * @max_lanqs: max lan queues array per TC 2196 * 2197 * This function adds/updates the VSI lan queues per TC. 2198 */ 2199 enum ice_status 2200 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap, 2201 u16 *max_lanqs) 2202 { 2203 return ice_cfg_vsi_qs(pi, vsi_id, tc_bitmap, max_lanqs, 2204 ICE_SCHED_NODE_OWNER_LAN); 2205 } 2206