1e2cb1decSSalil Mehta // SPDX-License-Identifier: GPL-2.0+ 2e2cb1decSSalil Mehta // Copyright (c) 2016-2017 Hisilicon Limited. 3e2cb1decSSalil Mehta 4e2cb1decSSalil Mehta #include <linux/etherdevice.h> 5aa5c4f17SHuazhong Tan #include <linux/iopoll.h> 66988eb2aSSalil Mehta #include <net/rtnetlink.h> 7e2cb1decSSalil Mehta #include "hclgevf_cmd.h" 8e2cb1decSSalil Mehta #include "hclgevf_main.h" 9e2cb1decSSalil Mehta #include "hclge_mbx.h" 10e2cb1decSSalil Mehta #include "hnae3.h" 11e2cb1decSSalil Mehta 12e2cb1decSSalil Mehta #define HCLGEVF_NAME "hclgevf" 13e2cb1decSSalil Mehta 14bbe6540eSHuazhong Tan #define HCLGEVF_RESET_MAX_FAIL_CNT 5 15bbe6540eSHuazhong Tan 169c6f7085SHuazhong Tan static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 17e2cb1decSSalil Mehta static struct hnae3_ae_algo ae_algovf; 18e2cb1decSSalil Mehta 190ea68902SYunsheng Lin static struct workqueue_struct *hclgevf_wq; 200ea68902SYunsheng Lin 21e2cb1decSSalil Mehta static const struct pci_device_id ae_algovf_pci_tbl[] = { 22e2cb1decSSalil Mehta {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 23e2cb1decSSalil Mehta {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 24e2cb1decSSalil Mehta /* required last entry */ 25e2cb1decSSalil Mehta {0, } 26e2cb1decSSalil Mehta }; 27e2cb1decSSalil Mehta 28472d7eceSJian Shen static const u8 hclgevf_hash_key[] = { 29472d7eceSJian Shen 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 30472d7eceSJian Shen 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 31472d7eceSJian Shen 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 32472d7eceSJian Shen 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 33472d7eceSJian Shen 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 34472d7eceSJian Shen }; 35472d7eceSJian Shen 362f550a46SYunsheng Lin MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 372f550a46SYunsheng Lin 381600c3e5SJian Shen static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 391600c3e5SJian Shen HCLGEVF_CMDQ_TX_ADDR_H_REG, 401600c3e5SJian Shen HCLGEVF_CMDQ_TX_DEPTH_REG, 411600c3e5SJian Shen HCLGEVF_CMDQ_TX_TAIL_REG, 421600c3e5SJian Shen HCLGEVF_CMDQ_TX_HEAD_REG, 431600c3e5SJian Shen HCLGEVF_CMDQ_RX_ADDR_L_REG, 441600c3e5SJian Shen HCLGEVF_CMDQ_RX_ADDR_H_REG, 451600c3e5SJian Shen HCLGEVF_CMDQ_RX_DEPTH_REG, 461600c3e5SJian Shen HCLGEVF_CMDQ_RX_TAIL_REG, 471600c3e5SJian Shen HCLGEVF_CMDQ_RX_HEAD_REG, 481600c3e5SJian Shen HCLGEVF_VECTOR0_CMDQ_SRC_REG, 491600c3e5SJian Shen HCLGEVF_CMDQ_INTR_STS_REG, 501600c3e5SJian Shen HCLGEVF_CMDQ_INTR_EN_REG, 511600c3e5SJian Shen HCLGEVF_CMDQ_INTR_GEN_REG}; 521600c3e5SJian Shen 531600c3e5SJian Shen static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 541600c3e5SJian Shen HCLGEVF_RST_ING, 551600c3e5SJian Shen HCLGEVF_GRO_EN_REG}; 561600c3e5SJian Shen 571600c3e5SJian Shen static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 581600c3e5SJian Shen HCLGEVF_RING_RX_ADDR_H_REG, 591600c3e5SJian Shen HCLGEVF_RING_RX_BD_NUM_REG, 601600c3e5SJian Shen HCLGEVF_RING_RX_BD_LENGTH_REG, 611600c3e5SJian Shen HCLGEVF_RING_RX_MERGE_EN_REG, 621600c3e5SJian Shen HCLGEVF_RING_RX_TAIL_REG, 631600c3e5SJian Shen HCLGEVF_RING_RX_HEAD_REG, 641600c3e5SJian Shen HCLGEVF_RING_RX_FBD_NUM_REG, 651600c3e5SJian Shen HCLGEVF_RING_RX_OFFSET_REG, 661600c3e5SJian Shen HCLGEVF_RING_RX_FBD_OFFSET_REG, 671600c3e5SJian Shen HCLGEVF_RING_RX_STASH_REG, 681600c3e5SJian Shen HCLGEVF_RING_RX_BD_ERR_REG, 691600c3e5SJian Shen HCLGEVF_RING_TX_ADDR_L_REG, 701600c3e5SJian Shen HCLGEVF_RING_TX_ADDR_H_REG, 711600c3e5SJian Shen HCLGEVF_RING_TX_BD_NUM_REG, 721600c3e5SJian Shen HCLGEVF_RING_TX_PRIORITY_REG, 731600c3e5SJian Shen HCLGEVF_RING_TX_TC_REG, 741600c3e5SJian Shen HCLGEVF_RING_TX_MERGE_EN_REG, 751600c3e5SJian Shen HCLGEVF_RING_TX_TAIL_REG, 761600c3e5SJian Shen HCLGEVF_RING_TX_HEAD_REG, 771600c3e5SJian Shen HCLGEVF_RING_TX_FBD_NUM_REG, 781600c3e5SJian Shen HCLGEVF_RING_TX_OFFSET_REG, 791600c3e5SJian Shen HCLGEVF_RING_TX_EBD_NUM_REG, 801600c3e5SJian Shen HCLGEVF_RING_TX_EBD_OFFSET_REG, 811600c3e5SJian Shen HCLGEVF_RING_TX_BD_ERR_REG, 821600c3e5SJian Shen HCLGEVF_RING_EN_REG}; 831600c3e5SJian Shen 841600c3e5SJian Shen static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 851600c3e5SJian Shen HCLGEVF_TQP_INTR_GL0_REG, 861600c3e5SJian Shen HCLGEVF_TQP_INTR_GL1_REG, 871600c3e5SJian Shen HCLGEVF_TQP_INTR_GL2_REG, 881600c3e5SJian Shen HCLGEVF_TQP_INTR_RL_REG}; 891600c3e5SJian Shen 909b2f3477SWeihang Li static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 91e2cb1decSSalil Mehta { 92eed9535fSPeng Li if (!handle->client) 93eed9535fSPeng Li return container_of(handle, struct hclgevf_dev, nic); 94eed9535fSPeng Li else if (handle->client->type == HNAE3_CLIENT_ROCE) 95eed9535fSPeng Li return container_of(handle, struct hclgevf_dev, roce); 96eed9535fSPeng Li else 97e2cb1decSSalil Mehta return container_of(handle, struct hclgevf_dev, nic); 98e2cb1decSSalil Mehta } 99e2cb1decSSalil Mehta 100e2cb1decSSalil Mehta static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 101e2cb1decSSalil Mehta { 102b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 103e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 104e2cb1decSSalil Mehta struct hclgevf_desc desc; 105e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 106e2cb1decSSalil Mehta int status; 107e2cb1decSSalil Mehta int i; 108e2cb1decSSalil Mehta 109b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 110b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 111e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, 112e2cb1decSSalil Mehta HCLGEVF_OPC_QUERY_RX_STATUS, 113e2cb1decSSalil Mehta true); 114e2cb1decSSalil Mehta 115e2cb1decSSalil Mehta desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 116e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 117e2cb1decSSalil Mehta if (status) { 118e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 119e2cb1decSSalil Mehta "Query tqp stat fail, status = %d,queue = %d\n", 120e2cb1decSSalil Mehta status, i); 121e2cb1decSSalil Mehta return status; 122e2cb1decSSalil Mehta } 123e2cb1decSSalil Mehta tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 124cf72fa63SJian Shen le32_to_cpu(desc.data[1]); 125e2cb1decSSalil Mehta 126e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 127e2cb1decSSalil Mehta true); 128e2cb1decSSalil Mehta 129e2cb1decSSalil Mehta desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 130e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 131e2cb1decSSalil Mehta if (status) { 132e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 133e2cb1decSSalil Mehta "Query tqp stat fail, status = %d,queue = %d\n", 134e2cb1decSSalil Mehta status, i); 135e2cb1decSSalil Mehta return status; 136e2cb1decSSalil Mehta } 137e2cb1decSSalil Mehta tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 138cf72fa63SJian Shen le32_to_cpu(desc.data[1]); 139e2cb1decSSalil Mehta } 140e2cb1decSSalil Mehta 141e2cb1decSSalil Mehta return 0; 142e2cb1decSSalil Mehta } 143e2cb1decSSalil Mehta 144e2cb1decSSalil Mehta static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 145e2cb1decSSalil Mehta { 146e2cb1decSSalil Mehta struct hnae3_knic_private_info *kinfo = &handle->kinfo; 147e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 148e2cb1decSSalil Mehta u64 *buff = data; 149e2cb1decSSalil Mehta int i; 150e2cb1decSSalil Mehta 151b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 152b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 153e2cb1decSSalil Mehta *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 154e2cb1decSSalil Mehta } 155e2cb1decSSalil Mehta for (i = 0; i < kinfo->num_tqps; i++) { 156b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 157e2cb1decSSalil Mehta *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 158e2cb1decSSalil Mehta } 159e2cb1decSSalil Mehta 160e2cb1decSSalil Mehta return buff; 161e2cb1decSSalil Mehta } 162e2cb1decSSalil Mehta 163e2cb1decSSalil Mehta static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 164e2cb1decSSalil Mehta { 165b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 166e2cb1decSSalil Mehta 167b4f1d303SJian Shen return kinfo->num_tqps * 2; 168e2cb1decSSalil Mehta } 169e2cb1decSSalil Mehta 170e2cb1decSSalil Mehta static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 171e2cb1decSSalil Mehta { 172b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 173e2cb1decSSalil Mehta u8 *buff = data; 174e2cb1decSSalil Mehta int i = 0; 175e2cb1decSSalil Mehta 176b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 177b4f1d303SJian Shen struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 178e2cb1decSSalil Mehta struct hclgevf_tqp, q); 1790c218123SJian Shen snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 180e2cb1decSSalil Mehta tqp->index); 181e2cb1decSSalil Mehta buff += ETH_GSTRING_LEN; 182e2cb1decSSalil Mehta } 183e2cb1decSSalil Mehta 184b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 185b4f1d303SJian Shen struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 186e2cb1decSSalil Mehta struct hclgevf_tqp, q); 1870c218123SJian Shen snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 188e2cb1decSSalil Mehta tqp->index); 189e2cb1decSSalil Mehta buff += ETH_GSTRING_LEN; 190e2cb1decSSalil Mehta } 191e2cb1decSSalil Mehta 192e2cb1decSSalil Mehta return buff; 193e2cb1decSSalil Mehta } 194e2cb1decSSalil Mehta 195e2cb1decSSalil Mehta static void hclgevf_update_stats(struct hnae3_handle *handle, 196e2cb1decSSalil Mehta struct net_device_stats *net_stats) 197e2cb1decSSalil Mehta { 198e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 199e2cb1decSSalil Mehta int status; 200e2cb1decSSalil Mehta 201e2cb1decSSalil Mehta status = hclgevf_tqps_update_stats(handle); 202e2cb1decSSalil Mehta if (status) 203e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 204e2cb1decSSalil Mehta "VF update of TQPS stats fail, status = %d.\n", 205e2cb1decSSalil Mehta status); 206e2cb1decSSalil Mehta } 207e2cb1decSSalil Mehta 208e2cb1decSSalil Mehta static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 209e2cb1decSSalil Mehta { 210e2cb1decSSalil Mehta if (strset == ETH_SS_TEST) 211e2cb1decSSalil Mehta return -EOPNOTSUPP; 212e2cb1decSSalil Mehta else if (strset == ETH_SS_STATS) 213e2cb1decSSalil Mehta return hclgevf_tqps_get_sset_count(handle, strset); 214e2cb1decSSalil Mehta 215e2cb1decSSalil Mehta return 0; 216e2cb1decSSalil Mehta } 217e2cb1decSSalil Mehta 218e2cb1decSSalil Mehta static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 219e2cb1decSSalil Mehta u8 *data) 220e2cb1decSSalil Mehta { 221e2cb1decSSalil Mehta u8 *p = (char *)data; 222e2cb1decSSalil Mehta 223e2cb1decSSalil Mehta if (strset == ETH_SS_STATS) 224e2cb1decSSalil Mehta p = hclgevf_tqps_get_strings(handle, p); 225e2cb1decSSalil Mehta } 226e2cb1decSSalil Mehta 227e2cb1decSSalil Mehta static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 228e2cb1decSSalil Mehta { 229e2cb1decSSalil Mehta hclgevf_tqps_get_stats(handle, data); 230e2cb1decSSalil Mehta } 231e2cb1decSSalil Mehta 232d3410018SYufeng Mo static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 233d3410018SYufeng Mo u8 subcode) 234d3410018SYufeng Mo { 235d3410018SYufeng Mo if (msg) { 236d3410018SYufeng Mo memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 237d3410018SYufeng Mo msg->code = code; 238d3410018SYufeng Mo msg->subcode = subcode; 239d3410018SYufeng Mo } 240d3410018SYufeng Mo } 241d3410018SYufeng Mo 242e2cb1decSSalil Mehta static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 243e2cb1decSSalil Mehta { 244d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 245e2cb1decSSalil Mehta u8 resp_msg; 246e2cb1decSSalil Mehta int status; 247e2cb1decSSalil Mehta 248d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0); 249d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 250d3410018SYufeng Mo sizeof(resp_msg)); 251e2cb1decSSalil Mehta if (status) { 252e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 253e2cb1decSSalil Mehta "VF request to get TC info from PF failed %d", 254e2cb1decSSalil Mehta status); 255e2cb1decSSalil Mehta return status; 256e2cb1decSSalil Mehta } 257e2cb1decSSalil Mehta 258e2cb1decSSalil Mehta hdev->hw_tc_map = resp_msg; 259e2cb1decSSalil Mehta 260e2cb1decSSalil Mehta return 0; 261e2cb1decSSalil Mehta } 262e2cb1decSSalil Mehta 26392f11ea1SJian Shen static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 26492f11ea1SJian Shen { 26592f11ea1SJian Shen struct hnae3_handle *nic = &hdev->nic; 266d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 26792f11ea1SJian Shen u8 resp_msg; 26892f11ea1SJian Shen int ret; 26992f11ea1SJian Shen 270d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 271d3410018SYufeng Mo HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 272d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 273d3410018SYufeng Mo sizeof(u8)); 27492f11ea1SJian Shen if (ret) { 27592f11ea1SJian Shen dev_err(&hdev->pdev->dev, 27692f11ea1SJian Shen "VF request to get port based vlan state failed %d", 27792f11ea1SJian Shen ret); 27892f11ea1SJian Shen return ret; 27992f11ea1SJian Shen } 28092f11ea1SJian Shen 28192f11ea1SJian Shen nic->port_base_vlan_state = resp_msg; 28292f11ea1SJian Shen 28392f11ea1SJian Shen return 0; 28492f11ea1SJian Shen } 28592f11ea1SJian Shen 2866cee6fc3SJian Shen static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 287e2cb1decSSalil Mehta { 288c0425944SPeng Li #define HCLGEVF_TQPS_RSS_INFO_LEN 6 289d3410018SYufeng Mo #define HCLGEVF_TQPS_ALLOC_OFFSET 0 290d3410018SYufeng Mo #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 291d3410018SYufeng Mo #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 292d3410018SYufeng Mo 293e2cb1decSSalil Mehta u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 294d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 295e2cb1decSSalil Mehta int status; 296e2cb1decSSalil Mehta 297d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 298d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 299e2cb1decSSalil Mehta HCLGEVF_TQPS_RSS_INFO_LEN); 300e2cb1decSSalil Mehta if (status) { 301e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 302e2cb1decSSalil Mehta "VF request to get tqp info from PF failed %d", 303e2cb1decSSalil Mehta status); 304e2cb1decSSalil Mehta return status; 305e2cb1decSSalil Mehta } 306e2cb1decSSalil Mehta 307d3410018SYufeng Mo memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], 308d3410018SYufeng Mo sizeof(u16)); 309d3410018SYufeng Mo memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], 310d3410018SYufeng Mo sizeof(u16)); 311d3410018SYufeng Mo memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], 312d3410018SYufeng Mo sizeof(u16)); 313c0425944SPeng Li 314c0425944SPeng Li return 0; 315c0425944SPeng Li } 316c0425944SPeng Li 317c0425944SPeng Li static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 318c0425944SPeng Li { 319c0425944SPeng Li #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 320d3410018SYufeng Mo #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 321d3410018SYufeng Mo #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 322d3410018SYufeng Mo 323c0425944SPeng Li u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 324d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 325c0425944SPeng Li int ret; 326c0425944SPeng Li 327d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 328d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 329c0425944SPeng Li HCLGEVF_TQPS_DEPTH_INFO_LEN); 330c0425944SPeng Li if (ret) { 331c0425944SPeng Li dev_err(&hdev->pdev->dev, 332c0425944SPeng Li "VF request to get tqp depth info from PF failed %d", 333c0425944SPeng Li ret); 334c0425944SPeng Li return ret; 335c0425944SPeng Li } 336c0425944SPeng Li 337d3410018SYufeng Mo memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], 338d3410018SYufeng Mo sizeof(u16)); 339d3410018SYufeng Mo memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], 340d3410018SYufeng Mo sizeof(u16)); 341e2cb1decSSalil Mehta 342e2cb1decSSalil Mehta return 0; 343e2cb1decSSalil Mehta } 344e2cb1decSSalil Mehta 3450c29d191Sliuzhongzhu static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 3460c29d191Sliuzhongzhu { 3470c29d191Sliuzhongzhu struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 348d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 3490c29d191Sliuzhongzhu u16 qid_in_pf = 0; 350d3410018SYufeng Mo u8 resp_data[2]; 3510c29d191Sliuzhongzhu int ret; 3520c29d191Sliuzhongzhu 353d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 354d3410018SYufeng Mo memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 355d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 35663cbf7a9SYufeng Mo sizeof(resp_data)); 3570c29d191Sliuzhongzhu if (!ret) 3580c29d191Sliuzhongzhu qid_in_pf = *(u16 *)resp_data; 3590c29d191Sliuzhongzhu 3600c29d191Sliuzhongzhu return qid_in_pf; 3610c29d191Sliuzhongzhu } 3620c29d191Sliuzhongzhu 3639c3e7130Sliuzhongzhu static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 3649c3e7130Sliuzhongzhu { 365d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 36688d10bd6SJian Shen u8 resp_msg[2]; 3679c3e7130Sliuzhongzhu int ret; 3689c3e7130Sliuzhongzhu 369d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 370d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 371d3410018SYufeng Mo sizeof(resp_msg)); 3729c3e7130Sliuzhongzhu if (ret) { 3739c3e7130Sliuzhongzhu dev_err(&hdev->pdev->dev, 3749c3e7130Sliuzhongzhu "VF request to get the pf port media type failed %d", 3759c3e7130Sliuzhongzhu ret); 3769c3e7130Sliuzhongzhu return ret; 3779c3e7130Sliuzhongzhu } 3789c3e7130Sliuzhongzhu 37988d10bd6SJian Shen hdev->hw.mac.media_type = resp_msg[0]; 38088d10bd6SJian Shen hdev->hw.mac.module_type = resp_msg[1]; 3819c3e7130Sliuzhongzhu 3829c3e7130Sliuzhongzhu return 0; 3839c3e7130Sliuzhongzhu } 3849c3e7130Sliuzhongzhu 385e2cb1decSSalil Mehta static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 386e2cb1decSSalil Mehta { 387e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 388e2cb1decSSalil Mehta int i; 389e2cb1decSSalil Mehta 390e2cb1decSSalil Mehta hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 391e2cb1decSSalil Mehta sizeof(struct hclgevf_tqp), GFP_KERNEL); 392e2cb1decSSalil Mehta if (!hdev->htqp) 393e2cb1decSSalil Mehta return -ENOMEM; 394e2cb1decSSalil Mehta 395e2cb1decSSalil Mehta tqp = hdev->htqp; 396e2cb1decSSalil Mehta 397e2cb1decSSalil Mehta for (i = 0; i < hdev->num_tqps; i++) { 398e2cb1decSSalil Mehta tqp->dev = &hdev->pdev->dev; 399e2cb1decSSalil Mehta tqp->index = i; 400e2cb1decSSalil Mehta 401e2cb1decSSalil Mehta tqp->q.ae_algo = &ae_algovf; 402e2cb1decSSalil Mehta tqp->q.buf_size = hdev->rx_buf_len; 403c0425944SPeng Li tqp->q.tx_desc_num = hdev->num_tx_desc; 404c0425944SPeng Li tqp->q.rx_desc_num = hdev->num_rx_desc; 405e2cb1decSSalil Mehta tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 406e2cb1decSSalil Mehta i * HCLGEVF_TQP_REG_SIZE; 407e2cb1decSSalil Mehta 408e2cb1decSSalil Mehta tqp++; 409e2cb1decSSalil Mehta } 410e2cb1decSSalil Mehta 411e2cb1decSSalil Mehta return 0; 412e2cb1decSSalil Mehta } 413e2cb1decSSalil Mehta 414e2cb1decSSalil Mehta static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 415e2cb1decSSalil Mehta { 416e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 417e2cb1decSSalil Mehta struct hnae3_knic_private_info *kinfo; 418e2cb1decSSalil Mehta u16 new_tqps = hdev->num_tqps; 419ebaf1908SWeihang Li unsigned int i; 420e2cb1decSSalil Mehta 421e2cb1decSSalil Mehta kinfo = &nic->kinfo; 422e2cb1decSSalil Mehta kinfo->num_tc = 0; 423c0425944SPeng Li kinfo->num_tx_desc = hdev->num_tx_desc; 424c0425944SPeng Li kinfo->num_rx_desc = hdev->num_rx_desc; 425e2cb1decSSalil Mehta kinfo->rx_buf_len = hdev->rx_buf_len; 426e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 427e2cb1decSSalil Mehta if (hdev->hw_tc_map & BIT(i)) 428e2cb1decSSalil Mehta kinfo->num_tc++; 429e2cb1decSSalil Mehta 430e2cb1decSSalil Mehta kinfo->rss_size 431e2cb1decSSalil Mehta = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 432e2cb1decSSalil Mehta new_tqps = kinfo->rss_size * kinfo->num_tc; 433e2cb1decSSalil Mehta kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 434e2cb1decSSalil Mehta 435e2cb1decSSalil Mehta kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 436e2cb1decSSalil Mehta sizeof(struct hnae3_queue *), GFP_KERNEL); 437e2cb1decSSalil Mehta if (!kinfo->tqp) 438e2cb1decSSalil Mehta return -ENOMEM; 439e2cb1decSSalil Mehta 440e2cb1decSSalil Mehta for (i = 0; i < kinfo->num_tqps; i++) { 441e2cb1decSSalil Mehta hdev->htqp[i].q.handle = &hdev->nic; 442e2cb1decSSalil Mehta hdev->htqp[i].q.tqp_index = i; 443e2cb1decSSalil Mehta kinfo->tqp[i] = &hdev->htqp[i].q; 444e2cb1decSSalil Mehta } 445e2cb1decSSalil Mehta 446580a05f9SYonglong Liu /* after init the max rss_size and tqps, adjust the default tqp numbers 447580a05f9SYonglong Liu * and rss size with the actual vector numbers 448580a05f9SYonglong Liu */ 449580a05f9SYonglong Liu kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 450580a05f9SYonglong Liu kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc, 451580a05f9SYonglong Liu kinfo->rss_size); 452580a05f9SYonglong Liu 453e2cb1decSSalil Mehta return 0; 454e2cb1decSSalil Mehta } 455e2cb1decSSalil Mehta 456e2cb1decSSalil Mehta static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 457e2cb1decSSalil Mehta { 458d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 459e2cb1decSSalil Mehta int status; 460e2cb1decSSalil Mehta 461d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 462d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 463e2cb1decSSalil Mehta if (status) 464e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 465e2cb1decSSalil Mehta "VF failed to fetch link status(%d) from PF", status); 466e2cb1decSSalil Mehta } 467e2cb1decSSalil Mehta 468e2cb1decSSalil Mehta void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 469e2cb1decSSalil Mehta { 47045e92b7eSPeng Li struct hnae3_handle *rhandle = &hdev->roce; 471e2cb1decSSalil Mehta struct hnae3_handle *handle = &hdev->nic; 47245e92b7eSPeng Li struct hnae3_client *rclient; 473e2cb1decSSalil Mehta struct hnae3_client *client; 474e2cb1decSSalil Mehta 475ff200099SYunsheng Lin if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 476ff200099SYunsheng Lin return; 477ff200099SYunsheng Lin 478e2cb1decSSalil Mehta client = handle->client; 47945e92b7eSPeng Li rclient = hdev->roce_client; 480e2cb1decSSalil Mehta 481582d37bbSPeng Li link_state = 482582d37bbSPeng Li test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 483582d37bbSPeng Li 484e2cb1decSSalil Mehta if (link_state != hdev->hw.mac.link) { 485e2cb1decSSalil Mehta client->ops->link_status_change(handle, !!link_state); 48645e92b7eSPeng Li if (rclient && rclient->ops->link_status_change) 48745e92b7eSPeng Li rclient->ops->link_status_change(rhandle, !!link_state); 488e2cb1decSSalil Mehta hdev->hw.mac.link = link_state; 489e2cb1decSSalil Mehta } 490ff200099SYunsheng Lin 491ff200099SYunsheng Lin clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 492e2cb1decSSalil Mehta } 493e2cb1decSSalil Mehta 494538abaf3SYueHaibing static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 4959194d18bSliuzhongzhu { 4969194d18bSliuzhongzhu #define HCLGEVF_ADVERTISING 0 4979194d18bSliuzhongzhu #define HCLGEVF_SUPPORTED 1 4989194d18bSliuzhongzhu 499d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 500d3410018SYufeng Mo 501d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 502d3410018SYufeng Mo send_msg.data[0] = HCLGEVF_ADVERTISING; 503d3410018SYufeng Mo hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 504d3410018SYufeng Mo send_msg.data[0] = HCLGEVF_SUPPORTED; 505d3410018SYufeng Mo hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 5069194d18bSliuzhongzhu } 5079194d18bSliuzhongzhu 508e2cb1decSSalil Mehta static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 509e2cb1decSSalil Mehta { 510e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 511e2cb1decSSalil Mehta int ret; 512e2cb1decSSalil Mehta 513e2cb1decSSalil Mehta nic->ae_algo = &ae_algovf; 514e2cb1decSSalil Mehta nic->pdev = hdev->pdev; 515e2cb1decSSalil Mehta nic->numa_node_mask = hdev->numa_node_mask; 516424eb834SSalil Mehta nic->flags |= HNAE3_SUPPORT_VF; 517e2cb1decSSalil Mehta 518e2cb1decSSalil Mehta ret = hclgevf_knic_setup(hdev); 519e2cb1decSSalil Mehta if (ret) 520e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 521e2cb1decSSalil Mehta ret); 522e2cb1decSSalil Mehta return ret; 523e2cb1decSSalil Mehta } 524e2cb1decSSalil Mehta 525e2cb1decSSalil Mehta static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 526e2cb1decSSalil Mehta { 52736cbbdf6SPeng Li if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 52836cbbdf6SPeng Li dev_warn(&hdev->pdev->dev, 52936cbbdf6SPeng Li "vector(vector_id %d) has been freed.\n", vector_id); 53036cbbdf6SPeng Li return; 53136cbbdf6SPeng Li } 53236cbbdf6SPeng Li 533e2cb1decSSalil Mehta hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 534e2cb1decSSalil Mehta hdev->num_msi_left += 1; 535e2cb1decSSalil Mehta hdev->num_msi_used -= 1; 536e2cb1decSSalil Mehta } 537e2cb1decSSalil Mehta 538e2cb1decSSalil Mehta static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 539e2cb1decSSalil Mehta struct hnae3_vector_info *vector_info) 540e2cb1decSSalil Mehta { 541e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 542e2cb1decSSalil Mehta struct hnae3_vector_info *vector = vector_info; 543e2cb1decSSalil Mehta int alloc = 0; 544e2cb1decSSalil Mehta int i, j; 545e2cb1decSSalil Mehta 546580a05f9SYonglong Liu vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 547e2cb1decSSalil Mehta vector_num = min(hdev->num_msi_left, vector_num); 548e2cb1decSSalil Mehta 549e2cb1decSSalil Mehta for (j = 0; j < vector_num; j++) { 550e2cb1decSSalil Mehta for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 551e2cb1decSSalil Mehta if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 552e2cb1decSSalil Mehta vector->vector = pci_irq_vector(hdev->pdev, i); 553e2cb1decSSalil Mehta vector->io_addr = hdev->hw.io_base + 554e2cb1decSSalil Mehta HCLGEVF_VECTOR_REG_BASE + 555e2cb1decSSalil Mehta (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 556e2cb1decSSalil Mehta hdev->vector_status[i] = 0; 557e2cb1decSSalil Mehta hdev->vector_irq[i] = vector->vector; 558e2cb1decSSalil Mehta 559e2cb1decSSalil Mehta vector++; 560e2cb1decSSalil Mehta alloc++; 561e2cb1decSSalil Mehta 562e2cb1decSSalil Mehta break; 563e2cb1decSSalil Mehta } 564e2cb1decSSalil Mehta } 565e2cb1decSSalil Mehta } 566e2cb1decSSalil Mehta hdev->num_msi_left -= alloc; 567e2cb1decSSalil Mehta hdev->num_msi_used += alloc; 568e2cb1decSSalil Mehta 569e2cb1decSSalil Mehta return alloc; 570e2cb1decSSalil Mehta } 571e2cb1decSSalil Mehta 572e2cb1decSSalil Mehta static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 573e2cb1decSSalil Mehta { 574e2cb1decSSalil Mehta int i; 575e2cb1decSSalil Mehta 576e2cb1decSSalil Mehta for (i = 0; i < hdev->num_msi; i++) 577e2cb1decSSalil Mehta if (vector == hdev->vector_irq[i]) 578e2cb1decSSalil Mehta return i; 579e2cb1decSSalil Mehta 580e2cb1decSSalil Mehta return -EINVAL; 581e2cb1decSSalil Mehta } 582e2cb1decSSalil Mehta 583374ad291SJian Shen static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 584374ad291SJian Shen const u8 hfunc, const u8 *key) 585374ad291SJian Shen { 586374ad291SJian Shen struct hclgevf_rss_config_cmd *req; 587ebaf1908SWeihang Li unsigned int key_offset = 0; 588374ad291SJian Shen struct hclgevf_desc desc; 5893caf772bSYufeng Mo int key_counts; 590374ad291SJian Shen int key_size; 591374ad291SJian Shen int ret; 592374ad291SJian Shen 5933caf772bSYufeng Mo key_counts = HCLGEVF_RSS_KEY_SIZE; 594374ad291SJian Shen req = (struct hclgevf_rss_config_cmd *)desc.data; 595374ad291SJian Shen 5963caf772bSYufeng Mo while (key_counts) { 597374ad291SJian Shen hclgevf_cmd_setup_basic_desc(&desc, 598374ad291SJian Shen HCLGEVF_OPC_RSS_GENERIC_CONFIG, 599374ad291SJian Shen false); 600374ad291SJian Shen 601374ad291SJian Shen req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 602374ad291SJian Shen req->hash_config |= 603374ad291SJian Shen (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 604374ad291SJian Shen 6053caf772bSYufeng Mo key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 606374ad291SJian Shen memcpy(req->hash_key, 607374ad291SJian Shen key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 608374ad291SJian Shen 6093caf772bSYufeng Mo key_counts -= key_size; 6103caf772bSYufeng Mo key_offset++; 611374ad291SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 612374ad291SJian Shen if (ret) { 613374ad291SJian Shen dev_err(&hdev->pdev->dev, 614374ad291SJian Shen "Configure RSS config fail, status = %d\n", 615374ad291SJian Shen ret); 616374ad291SJian Shen return ret; 617374ad291SJian Shen } 618374ad291SJian Shen } 619374ad291SJian Shen 620374ad291SJian Shen return 0; 621374ad291SJian Shen } 622374ad291SJian Shen 623e2cb1decSSalil Mehta static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 624e2cb1decSSalil Mehta { 625e2cb1decSSalil Mehta return HCLGEVF_RSS_KEY_SIZE; 626e2cb1decSSalil Mehta } 627e2cb1decSSalil Mehta 628e2cb1decSSalil Mehta static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 629e2cb1decSSalil Mehta { 630e2cb1decSSalil Mehta return HCLGEVF_RSS_IND_TBL_SIZE; 631e2cb1decSSalil Mehta } 632e2cb1decSSalil Mehta 633e2cb1decSSalil Mehta static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 634e2cb1decSSalil Mehta { 635e2cb1decSSalil Mehta const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 636e2cb1decSSalil Mehta struct hclgevf_rss_indirection_table_cmd *req; 637e2cb1decSSalil Mehta struct hclgevf_desc desc; 638e2cb1decSSalil Mehta int status; 639e2cb1decSSalil Mehta int i, j; 640e2cb1decSSalil Mehta 641e2cb1decSSalil Mehta req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 642e2cb1decSSalil Mehta 643e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 644e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 645e2cb1decSSalil Mehta false); 646e2cb1decSSalil Mehta req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 647e2cb1decSSalil Mehta req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 648e2cb1decSSalil Mehta for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 649e2cb1decSSalil Mehta req->rss_result[j] = 650e2cb1decSSalil Mehta indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 651e2cb1decSSalil Mehta 652e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 653e2cb1decSSalil Mehta if (status) { 654e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 655e2cb1decSSalil Mehta "VF failed(=%d) to set RSS indirection table\n", 656e2cb1decSSalil Mehta status); 657e2cb1decSSalil Mehta return status; 658e2cb1decSSalil Mehta } 659e2cb1decSSalil Mehta } 660e2cb1decSSalil Mehta 661e2cb1decSSalil Mehta return 0; 662e2cb1decSSalil Mehta } 663e2cb1decSSalil Mehta 664e2cb1decSSalil Mehta static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 665e2cb1decSSalil Mehta { 666e2cb1decSSalil Mehta struct hclgevf_rss_tc_mode_cmd *req; 667e2cb1decSSalil Mehta u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 668e2cb1decSSalil Mehta u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 669e2cb1decSSalil Mehta u16 tc_size[HCLGEVF_MAX_TC_NUM]; 670e2cb1decSSalil Mehta struct hclgevf_desc desc; 671e2cb1decSSalil Mehta u16 roundup_size; 672e2cb1decSSalil Mehta int status; 673ebaf1908SWeihang Li unsigned int i; 674e2cb1decSSalil Mehta 675e2cb1decSSalil Mehta req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 676e2cb1decSSalil Mehta 677e2cb1decSSalil Mehta roundup_size = roundup_pow_of_two(rss_size); 678e2cb1decSSalil Mehta roundup_size = ilog2(roundup_size); 679e2cb1decSSalil Mehta 680e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 681e2cb1decSSalil Mehta tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 682e2cb1decSSalil Mehta tc_size[i] = roundup_size; 683e2cb1decSSalil Mehta tc_offset[i] = rss_size * i; 684e2cb1decSSalil Mehta } 685e2cb1decSSalil Mehta 686e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 687e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 688e4e87715SPeng Li hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 689e2cb1decSSalil Mehta (tc_valid[i] & 0x1)); 690e4e87715SPeng Li hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 691e2cb1decSSalil Mehta HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 692e4e87715SPeng Li hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 693e2cb1decSSalil Mehta HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 694e2cb1decSSalil Mehta } 695e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 696e2cb1decSSalil Mehta if (status) 697e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 698e2cb1decSSalil Mehta "VF failed(=%d) to set rss tc mode\n", status); 699e2cb1decSSalil Mehta 700e2cb1decSSalil Mehta return status; 701e2cb1decSSalil Mehta } 702e2cb1decSSalil Mehta 703a638b1d8SJian Shen /* for revision 0x20, vf shared the same rss config with pf */ 704a638b1d8SJian Shen static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 705a638b1d8SJian Shen { 706a638b1d8SJian Shen #define HCLGEVF_RSS_MBX_RESP_LEN 8 707a638b1d8SJian Shen struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 708a638b1d8SJian Shen u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 709d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 710a638b1d8SJian Shen u16 msg_num, hash_key_index; 711a638b1d8SJian Shen u8 index; 712a638b1d8SJian Shen int ret; 713a638b1d8SJian Shen 714d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 715a638b1d8SJian Shen msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 716a638b1d8SJian Shen HCLGEVF_RSS_MBX_RESP_LEN; 717a638b1d8SJian Shen for (index = 0; index < msg_num; index++) { 718d3410018SYufeng Mo send_msg.data[0] = index; 719d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 720a638b1d8SJian Shen HCLGEVF_RSS_MBX_RESP_LEN); 721a638b1d8SJian Shen if (ret) { 722a638b1d8SJian Shen dev_err(&hdev->pdev->dev, 723a638b1d8SJian Shen "VF get rss hash key from PF failed, ret=%d", 724a638b1d8SJian Shen ret); 725a638b1d8SJian Shen return ret; 726a638b1d8SJian Shen } 727a638b1d8SJian Shen 728a638b1d8SJian Shen hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 729a638b1d8SJian Shen if (index == msg_num - 1) 730a638b1d8SJian Shen memcpy(&rss_cfg->rss_hash_key[hash_key_index], 731a638b1d8SJian Shen &resp_msg[0], 732a638b1d8SJian Shen HCLGEVF_RSS_KEY_SIZE - hash_key_index); 733a638b1d8SJian Shen else 734a638b1d8SJian Shen memcpy(&rss_cfg->rss_hash_key[hash_key_index], 735a638b1d8SJian Shen &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 736a638b1d8SJian Shen } 737a638b1d8SJian Shen 738a638b1d8SJian Shen return 0; 739a638b1d8SJian Shen } 740a638b1d8SJian Shen 741e2cb1decSSalil Mehta static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 742e2cb1decSSalil Mehta u8 *hfunc) 743e2cb1decSSalil Mehta { 744e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 745e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 746a638b1d8SJian Shen int i, ret; 747e2cb1decSSalil Mehta 748374ad291SJian Shen if (handle->pdev->revision >= 0x21) { 749374ad291SJian Shen /* Get hash algorithm */ 750374ad291SJian Shen if (hfunc) { 751374ad291SJian Shen switch (rss_cfg->hash_algo) { 752374ad291SJian Shen case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 753374ad291SJian Shen *hfunc = ETH_RSS_HASH_TOP; 754374ad291SJian Shen break; 755374ad291SJian Shen case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 756374ad291SJian Shen *hfunc = ETH_RSS_HASH_XOR; 757374ad291SJian Shen break; 758374ad291SJian Shen default: 759374ad291SJian Shen *hfunc = ETH_RSS_HASH_UNKNOWN; 760374ad291SJian Shen break; 761374ad291SJian Shen } 762374ad291SJian Shen } 763374ad291SJian Shen 764374ad291SJian Shen /* Get the RSS Key required by the user */ 765374ad291SJian Shen if (key) 766374ad291SJian Shen memcpy(key, rss_cfg->rss_hash_key, 767374ad291SJian Shen HCLGEVF_RSS_KEY_SIZE); 768a638b1d8SJian Shen } else { 769a638b1d8SJian Shen if (hfunc) 770a638b1d8SJian Shen *hfunc = ETH_RSS_HASH_TOP; 771a638b1d8SJian Shen if (key) { 772a638b1d8SJian Shen ret = hclgevf_get_rss_hash_key(hdev); 773a638b1d8SJian Shen if (ret) 774a638b1d8SJian Shen return ret; 775a638b1d8SJian Shen memcpy(key, rss_cfg->rss_hash_key, 776a638b1d8SJian Shen HCLGEVF_RSS_KEY_SIZE); 777a638b1d8SJian Shen } 778374ad291SJian Shen } 779374ad291SJian Shen 780e2cb1decSSalil Mehta if (indir) 781e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 782e2cb1decSSalil Mehta indir[i] = rss_cfg->rss_indirection_tbl[i]; 783e2cb1decSSalil Mehta 784374ad291SJian Shen return 0; 785e2cb1decSSalil Mehta } 786e2cb1decSSalil Mehta 787e2cb1decSSalil Mehta static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 788e2cb1decSSalil Mehta const u8 *key, const u8 hfunc) 789e2cb1decSSalil Mehta { 790e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 791e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 792374ad291SJian Shen int ret, i; 793374ad291SJian Shen 794374ad291SJian Shen if (handle->pdev->revision >= 0x21) { 795374ad291SJian Shen /* Set the RSS Hash Key if specififed by the user */ 796374ad291SJian Shen if (key) { 797374ad291SJian Shen switch (hfunc) { 798374ad291SJian Shen case ETH_RSS_HASH_TOP: 799374ad291SJian Shen rss_cfg->hash_algo = 800374ad291SJian Shen HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 801374ad291SJian Shen break; 802374ad291SJian Shen case ETH_RSS_HASH_XOR: 803374ad291SJian Shen rss_cfg->hash_algo = 804374ad291SJian Shen HCLGEVF_RSS_HASH_ALGO_SIMPLE; 805374ad291SJian Shen break; 806374ad291SJian Shen case ETH_RSS_HASH_NO_CHANGE: 807374ad291SJian Shen break; 808374ad291SJian Shen default: 809374ad291SJian Shen return -EINVAL; 810374ad291SJian Shen } 811374ad291SJian Shen 812374ad291SJian Shen ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 813374ad291SJian Shen key); 814374ad291SJian Shen if (ret) 815374ad291SJian Shen return ret; 816374ad291SJian Shen 817374ad291SJian Shen /* Update the shadow RSS key with user specified qids */ 818374ad291SJian Shen memcpy(rss_cfg->rss_hash_key, key, 819374ad291SJian Shen HCLGEVF_RSS_KEY_SIZE); 820374ad291SJian Shen } 821374ad291SJian Shen } 822e2cb1decSSalil Mehta 823e2cb1decSSalil Mehta /* update the shadow RSS table with user specified qids */ 824e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 825e2cb1decSSalil Mehta rss_cfg->rss_indirection_tbl[i] = indir[i]; 826e2cb1decSSalil Mehta 827e2cb1decSSalil Mehta /* update the hardware */ 828e2cb1decSSalil Mehta return hclgevf_set_rss_indir_table(hdev); 829e2cb1decSSalil Mehta } 830e2cb1decSSalil Mehta 831d97b3072SJian Shen static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 832d97b3072SJian Shen { 833d97b3072SJian Shen u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 834d97b3072SJian Shen 835d97b3072SJian Shen if (nfc->data & RXH_L4_B_2_3) 836d97b3072SJian Shen hash_sets |= HCLGEVF_D_PORT_BIT; 837d97b3072SJian Shen else 838d97b3072SJian Shen hash_sets &= ~HCLGEVF_D_PORT_BIT; 839d97b3072SJian Shen 840d97b3072SJian Shen if (nfc->data & RXH_IP_SRC) 841d97b3072SJian Shen hash_sets |= HCLGEVF_S_IP_BIT; 842d97b3072SJian Shen else 843d97b3072SJian Shen hash_sets &= ~HCLGEVF_S_IP_BIT; 844d97b3072SJian Shen 845d97b3072SJian Shen if (nfc->data & RXH_IP_DST) 846d97b3072SJian Shen hash_sets |= HCLGEVF_D_IP_BIT; 847d97b3072SJian Shen else 848d97b3072SJian Shen hash_sets &= ~HCLGEVF_D_IP_BIT; 849d97b3072SJian Shen 850d97b3072SJian Shen if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 851d97b3072SJian Shen hash_sets |= HCLGEVF_V_TAG_BIT; 852d97b3072SJian Shen 853d97b3072SJian Shen return hash_sets; 854d97b3072SJian Shen } 855d97b3072SJian Shen 856d97b3072SJian Shen static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 857d97b3072SJian Shen struct ethtool_rxnfc *nfc) 858d97b3072SJian Shen { 859d97b3072SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 860d97b3072SJian Shen struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 861d97b3072SJian Shen struct hclgevf_rss_input_tuple_cmd *req; 862d97b3072SJian Shen struct hclgevf_desc desc; 863d97b3072SJian Shen u8 tuple_sets; 864d97b3072SJian Shen int ret; 865d97b3072SJian Shen 866d97b3072SJian Shen if (handle->pdev->revision == 0x20) 867d97b3072SJian Shen return -EOPNOTSUPP; 868d97b3072SJian Shen 869d97b3072SJian Shen if (nfc->data & 870d97b3072SJian Shen ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 871d97b3072SJian Shen return -EINVAL; 872d97b3072SJian Shen 873d97b3072SJian Shen req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 874d97b3072SJian Shen hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 875d97b3072SJian Shen 876d97b3072SJian Shen req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 877d97b3072SJian Shen req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 878d97b3072SJian Shen req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 879d97b3072SJian Shen req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 880d97b3072SJian Shen req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 881d97b3072SJian Shen req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 882d97b3072SJian Shen req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 883d97b3072SJian Shen req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 884d97b3072SJian Shen 885d97b3072SJian Shen tuple_sets = hclgevf_get_rss_hash_bits(nfc); 886d97b3072SJian Shen switch (nfc->flow_type) { 887d97b3072SJian Shen case TCP_V4_FLOW: 888d97b3072SJian Shen req->ipv4_tcp_en = tuple_sets; 889d97b3072SJian Shen break; 890d97b3072SJian Shen case TCP_V6_FLOW: 891d97b3072SJian Shen req->ipv6_tcp_en = tuple_sets; 892d97b3072SJian Shen break; 893d97b3072SJian Shen case UDP_V4_FLOW: 894d97b3072SJian Shen req->ipv4_udp_en = tuple_sets; 895d97b3072SJian Shen break; 896d97b3072SJian Shen case UDP_V6_FLOW: 897d97b3072SJian Shen req->ipv6_udp_en = tuple_sets; 898d97b3072SJian Shen break; 899d97b3072SJian Shen case SCTP_V4_FLOW: 900d97b3072SJian Shen req->ipv4_sctp_en = tuple_sets; 901d97b3072SJian Shen break; 902d97b3072SJian Shen case SCTP_V6_FLOW: 903d97b3072SJian Shen if ((nfc->data & RXH_L4_B_0_1) || 904d97b3072SJian Shen (nfc->data & RXH_L4_B_2_3)) 905d97b3072SJian Shen return -EINVAL; 906d97b3072SJian Shen 907d97b3072SJian Shen req->ipv6_sctp_en = tuple_sets; 908d97b3072SJian Shen break; 909d97b3072SJian Shen case IPV4_FLOW: 910d97b3072SJian Shen req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 911d97b3072SJian Shen break; 912d97b3072SJian Shen case IPV6_FLOW: 913d97b3072SJian Shen req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 914d97b3072SJian Shen break; 915d97b3072SJian Shen default: 916d97b3072SJian Shen return -EINVAL; 917d97b3072SJian Shen } 918d97b3072SJian Shen 919d97b3072SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 920d97b3072SJian Shen if (ret) { 921d97b3072SJian Shen dev_err(&hdev->pdev->dev, 922d97b3072SJian Shen "Set rss tuple fail, status = %d\n", ret); 923d97b3072SJian Shen return ret; 924d97b3072SJian Shen } 925d97b3072SJian Shen 926d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 927d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 928d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 929d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 930d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 931d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 932d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 933d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 934d97b3072SJian Shen return 0; 935d97b3072SJian Shen } 936d97b3072SJian Shen 937d97b3072SJian Shen static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 938d97b3072SJian Shen struct ethtool_rxnfc *nfc) 939d97b3072SJian Shen { 940d97b3072SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 941d97b3072SJian Shen struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 942d97b3072SJian Shen u8 tuple_sets; 943d97b3072SJian Shen 944d97b3072SJian Shen if (handle->pdev->revision == 0x20) 945d97b3072SJian Shen return -EOPNOTSUPP; 946d97b3072SJian Shen 947d97b3072SJian Shen nfc->data = 0; 948d97b3072SJian Shen 949d97b3072SJian Shen switch (nfc->flow_type) { 950d97b3072SJian Shen case TCP_V4_FLOW: 951d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 952d97b3072SJian Shen break; 953d97b3072SJian Shen case UDP_V4_FLOW: 954d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 955d97b3072SJian Shen break; 956d97b3072SJian Shen case TCP_V6_FLOW: 957d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 958d97b3072SJian Shen break; 959d97b3072SJian Shen case UDP_V6_FLOW: 960d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 961d97b3072SJian Shen break; 962d97b3072SJian Shen case SCTP_V4_FLOW: 963d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 964d97b3072SJian Shen break; 965d97b3072SJian Shen case SCTP_V6_FLOW: 966d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 967d97b3072SJian Shen break; 968d97b3072SJian Shen case IPV4_FLOW: 969d97b3072SJian Shen case IPV6_FLOW: 970d97b3072SJian Shen tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 971d97b3072SJian Shen break; 972d97b3072SJian Shen default: 973d97b3072SJian Shen return -EINVAL; 974d97b3072SJian Shen } 975d97b3072SJian Shen 976d97b3072SJian Shen if (!tuple_sets) 977d97b3072SJian Shen return 0; 978d97b3072SJian Shen 979d97b3072SJian Shen if (tuple_sets & HCLGEVF_D_PORT_BIT) 980d97b3072SJian Shen nfc->data |= RXH_L4_B_2_3; 981d97b3072SJian Shen if (tuple_sets & HCLGEVF_S_PORT_BIT) 982d97b3072SJian Shen nfc->data |= RXH_L4_B_0_1; 983d97b3072SJian Shen if (tuple_sets & HCLGEVF_D_IP_BIT) 984d97b3072SJian Shen nfc->data |= RXH_IP_DST; 985d97b3072SJian Shen if (tuple_sets & HCLGEVF_S_IP_BIT) 986d97b3072SJian Shen nfc->data |= RXH_IP_SRC; 987d97b3072SJian Shen 988d97b3072SJian Shen return 0; 989d97b3072SJian Shen } 990d97b3072SJian Shen 991d97b3072SJian Shen static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 992d97b3072SJian Shen struct hclgevf_rss_cfg *rss_cfg) 993d97b3072SJian Shen { 994d97b3072SJian Shen struct hclgevf_rss_input_tuple_cmd *req; 995d97b3072SJian Shen struct hclgevf_desc desc; 996d97b3072SJian Shen int ret; 997d97b3072SJian Shen 998d97b3072SJian Shen hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 999d97b3072SJian Shen 1000d97b3072SJian Shen req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 1001d97b3072SJian Shen 1002d97b3072SJian Shen req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 1003d97b3072SJian Shen req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 1004d97b3072SJian Shen req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 1005d97b3072SJian Shen req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 1006d97b3072SJian Shen req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 1007d97b3072SJian Shen req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 1008d97b3072SJian Shen req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 1009d97b3072SJian Shen req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 1010d97b3072SJian Shen 1011d97b3072SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1012d97b3072SJian Shen if (ret) 1013d97b3072SJian Shen dev_err(&hdev->pdev->dev, 1014d97b3072SJian Shen "Configure rss input fail, status = %d\n", ret); 1015d97b3072SJian Shen return ret; 1016d97b3072SJian Shen } 1017d97b3072SJian Shen 1018e2cb1decSSalil Mehta static int hclgevf_get_tc_size(struct hnae3_handle *handle) 1019e2cb1decSSalil Mehta { 1020e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1021e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1022e2cb1decSSalil Mehta 1023e2cb1decSSalil Mehta return rss_cfg->rss_size; 1024e2cb1decSSalil Mehta } 1025e2cb1decSSalil Mehta 1026e2cb1decSSalil Mehta static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 1027b204bc74SPeng Li int vector_id, 1028e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 1029e2cb1decSSalil Mehta { 1030e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1031d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1032e2cb1decSSalil Mehta struct hnae3_ring_chain_node *node; 1033e2cb1decSSalil Mehta int status; 1034d3410018SYufeng Mo int i = 0; 1035e2cb1decSSalil Mehta 1036d3410018SYufeng Mo memset(&send_msg, 0, sizeof(send_msg)); 1037d3410018SYufeng Mo send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 1038c09ba484SPeng Li HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1039d3410018SYufeng Mo send_msg.vector_id = vector_id; 1040e2cb1decSSalil Mehta 1041e2cb1decSSalil Mehta for (node = ring_chain; node; node = node->next) { 1042d3410018SYufeng Mo send_msg.param[i].ring_type = 1043e4e87715SPeng Li hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1044d3410018SYufeng Mo 1045d3410018SYufeng Mo send_msg.param[i].tqp_index = node->tqp_index; 1046d3410018SYufeng Mo send_msg.param[i].int_gl_index = 1047d3410018SYufeng Mo hnae3_get_field(node->int_gl_idx, 104879eee410SFuyun Liang HNAE3_RING_GL_IDX_M, 104979eee410SFuyun Liang HNAE3_RING_GL_IDX_S); 105079eee410SFuyun Liang 10515d02a58dSYunsheng Lin i++; 1052d3410018SYufeng Mo if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 1053d3410018SYufeng Mo send_msg.ring_num = i; 1054e2cb1decSSalil Mehta 1055d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 1056d3410018SYufeng Mo NULL, 0); 1057e2cb1decSSalil Mehta if (status) { 1058e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 1059e2cb1decSSalil Mehta "Map TQP fail, status is %d.\n", 1060e2cb1decSSalil Mehta status); 1061e2cb1decSSalil Mehta return status; 1062e2cb1decSSalil Mehta } 1063e2cb1decSSalil Mehta i = 0; 1064e2cb1decSSalil Mehta } 1065e2cb1decSSalil Mehta } 1066e2cb1decSSalil Mehta 1067e2cb1decSSalil Mehta return 0; 1068e2cb1decSSalil Mehta } 1069e2cb1decSSalil Mehta 1070e2cb1decSSalil Mehta static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1071e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 1072e2cb1decSSalil Mehta { 1073b204bc74SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1074b204bc74SPeng Li int vector_id; 1075b204bc74SPeng Li 1076b204bc74SPeng Li vector_id = hclgevf_get_vector_index(hdev, vector); 1077b204bc74SPeng Li if (vector_id < 0) { 1078b204bc74SPeng Li dev_err(&handle->pdev->dev, 1079b204bc74SPeng Li "Get vector index fail. ret =%d\n", vector_id); 1080b204bc74SPeng Li return vector_id; 1081b204bc74SPeng Li } 1082b204bc74SPeng Li 1083b204bc74SPeng Li return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1084e2cb1decSSalil Mehta } 1085e2cb1decSSalil Mehta 1086e2cb1decSSalil Mehta static int hclgevf_unmap_ring_from_vector( 1087e2cb1decSSalil Mehta struct hnae3_handle *handle, 1088e2cb1decSSalil Mehta int vector, 1089e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 1090e2cb1decSSalil Mehta { 1091e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1092e2cb1decSSalil Mehta int ret, vector_id; 1093e2cb1decSSalil Mehta 1094dea846e8SHuazhong Tan if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1095dea846e8SHuazhong Tan return 0; 1096dea846e8SHuazhong Tan 1097e2cb1decSSalil Mehta vector_id = hclgevf_get_vector_index(hdev, vector); 1098e2cb1decSSalil Mehta if (vector_id < 0) { 1099e2cb1decSSalil Mehta dev_err(&handle->pdev->dev, 1100e2cb1decSSalil Mehta "Get vector index fail. ret =%d\n", vector_id); 1101e2cb1decSSalil Mehta return vector_id; 1102e2cb1decSSalil Mehta } 1103e2cb1decSSalil Mehta 1104b204bc74SPeng Li ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 11050d3e6631SYunsheng Lin if (ret) 1106e2cb1decSSalil Mehta dev_err(&handle->pdev->dev, 1107e2cb1decSSalil Mehta "Unmap ring from vector fail. vector=%d, ret =%d\n", 1108e2cb1decSSalil Mehta vector_id, 1109e2cb1decSSalil Mehta ret); 11100d3e6631SYunsheng Lin 1111e2cb1decSSalil Mehta return ret; 1112e2cb1decSSalil Mehta } 1113e2cb1decSSalil Mehta 11140d3e6631SYunsheng Lin static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 11150d3e6631SYunsheng Lin { 11160d3e6631SYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 111703718db9SYunsheng Lin int vector_id; 11180d3e6631SYunsheng Lin 111903718db9SYunsheng Lin vector_id = hclgevf_get_vector_index(hdev, vector); 112003718db9SYunsheng Lin if (vector_id < 0) { 112103718db9SYunsheng Lin dev_err(&handle->pdev->dev, 112203718db9SYunsheng Lin "hclgevf_put_vector get vector index fail. ret =%d\n", 112303718db9SYunsheng Lin vector_id); 112403718db9SYunsheng Lin return vector_id; 112503718db9SYunsheng Lin } 112603718db9SYunsheng Lin 112703718db9SYunsheng Lin hclgevf_free_vector(hdev, vector_id); 1128e2cb1decSSalil Mehta 1129e2cb1decSSalil Mehta return 0; 1130e2cb1decSSalil Mehta } 1131e2cb1decSSalil Mehta 11323b75c3dfSPeng Li static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1133e196ec75SJian Shen bool en_uc_pmc, bool en_mc_pmc, 1134f01f5559SJian Shen bool en_bc_pmc) 1135e2cb1decSSalil Mehta { 1136d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1137f01f5559SJian Shen int ret; 1138e2cb1decSSalil Mehta 1139d3410018SYufeng Mo memset(&send_msg, 0, sizeof(send_msg)); 1140d3410018SYufeng Mo send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 1141d3410018SYufeng Mo send_msg.en_bc = en_bc_pmc ? 1 : 0; 1142d3410018SYufeng Mo send_msg.en_uc = en_uc_pmc ? 1 : 0; 1143d3410018SYufeng Mo send_msg.en_mc = en_mc_pmc ? 1 : 0; 1144e2cb1decSSalil Mehta 1145d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1146d3410018SYufeng Mo 1147f01f5559SJian Shen if (ret) 1148e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 1149f01f5559SJian Shen "Set promisc mode fail, status is %d.\n", ret); 1150e2cb1decSSalil Mehta 1151f01f5559SJian Shen return ret; 1152e2cb1decSSalil Mehta } 1153e2cb1decSSalil Mehta 1154e196ec75SJian Shen static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 1155e196ec75SJian Shen bool en_mc_pmc) 1156e2cb1decSSalil Mehta { 1157e196ec75SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1158e196ec75SJian Shen struct pci_dev *pdev = hdev->pdev; 1159e196ec75SJian Shen bool en_bc_pmc; 1160e196ec75SJian Shen 1161e196ec75SJian Shen en_bc_pmc = pdev->revision != 0x20; 1162e196ec75SJian Shen 1163e196ec75SJian Shen return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 1164e196ec75SJian Shen en_bc_pmc); 1165e2cb1decSSalil Mehta } 1166e2cb1decSSalil Mehta 1167c631c696SJian Shen static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) 1168c631c696SJian Shen { 1169c631c696SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1170c631c696SJian Shen 1171c631c696SJian Shen set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1172c631c696SJian Shen } 1173c631c696SJian Shen 1174c631c696SJian Shen static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) 1175c631c696SJian Shen { 1176c631c696SJian Shen struct hnae3_handle *handle = &hdev->nic; 1177c631c696SJian Shen bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; 1178c631c696SJian Shen bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; 1179c631c696SJian Shen int ret; 1180c631c696SJian Shen 1181c631c696SJian Shen if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { 1182c631c696SJian Shen ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); 1183c631c696SJian Shen if (!ret) 1184c631c696SJian Shen clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1185c631c696SJian Shen } 1186c631c696SJian Shen } 1187c631c696SJian Shen 1188ebaf1908SWeihang Li static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, 1189e2cb1decSSalil Mehta int stream_id, bool enable) 1190e2cb1decSSalil Mehta { 1191e2cb1decSSalil Mehta struct hclgevf_cfg_com_tqp_queue_cmd *req; 1192e2cb1decSSalil Mehta struct hclgevf_desc desc; 1193e2cb1decSSalil Mehta int status; 1194e2cb1decSSalil Mehta 1195e2cb1decSSalil Mehta req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1196e2cb1decSSalil Mehta 1197e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1198e2cb1decSSalil Mehta false); 1199e2cb1decSSalil Mehta req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1200e2cb1decSSalil Mehta req->stream_id = cpu_to_le16(stream_id); 1201ebaf1908SWeihang Li if (enable) 1202ebaf1908SWeihang Li req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1203e2cb1decSSalil Mehta 1204e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1205e2cb1decSSalil Mehta if (status) 1206e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 1207e2cb1decSSalil Mehta "TQP enable fail, status =%d.\n", status); 1208e2cb1decSSalil Mehta 1209e2cb1decSSalil Mehta return status; 1210e2cb1decSSalil Mehta } 1211e2cb1decSSalil Mehta 1212e2cb1decSSalil Mehta static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1213e2cb1decSSalil Mehta { 1214b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1215e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 1216e2cb1decSSalil Mehta int i; 1217e2cb1decSSalil Mehta 1218b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 1219b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1220e2cb1decSSalil Mehta memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1221e2cb1decSSalil Mehta } 1222e2cb1decSSalil Mehta } 1223e2cb1decSSalil Mehta 12248e6de441SHuazhong Tan static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 12258e6de441SHuazhong Tan { 1226d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 12278e6de441SHuazhong Tan u8 host_mac[ETH_ALEN]; 12288e6de441SHuazhong Tan int status; 12298e6de441SHuazhong Tan 1230d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 1231d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 1232d3410018SYufeng Mo ETH_ALEN); 12338e6de441SHuazhong Tan if (status) { 12348e6de441SHuazhong Tan dev_err(&hdev->pdev->dev, 12358e6de441SHuazhong Tan "fail to get VF MAC from host %d", status); 12368e6de441SHuazhong Tan return status; 12378e6de441SHuazhong Tan } 12388e6de441SHuazhong Tan 12398e6de441SHuazhong Tan ether_addr_copy(p, host_mac); 12408e6de441SHuazhong Tan 12418e6de441SHuazhong Tan return 0; 12428e6de441SHuazhong Tan } 12438e6de441SHuazhong Tan 1244e2cb1decSSalil Mehta static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1245e2cb1decSSalil Mehta { 1246e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 12478e6de441SHuazhong Tan u8 host_mac_addr[ETH_ALEN]; 1248e2cb1decSSalil Mehta 12498e6de441SHuazhong Tan if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 12508e6de441SHuazhong Tan return; 12518e6de441SHuazhong Tan 12528e6de441SHuazhong Tan hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 12538e6de441SHuazhong Tan if (hdev->has_pf_mac) 12548e6de441SHuazhong Tan ether_addr_copy(p, host_mac_addr); 12558e6de441SHuazhong Tan else 1256e2cb1decSSalil Mehta ether_addr_copy(p, hdev->hw.mac.mac_addr); 1257e2cb1decSSalil Mehta } 1258e2cb1decSSalil Mehta 125959098055SFuyun Liang static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 126059098055SFuyun Liang bool is_first) 1261e2cb1decSSalil Mehta { 1262e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1263e2cb1decSSalil Mehta u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1264d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1265e2cb1decSSalil Mehta u8 *new_mac_addr = (u8 *)p; 1266e2cb1decSSalil Mehta int status; 1267e2cb1decSSalil Mehta 1268d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 1269ee4bcd3bSJian Shen send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1270d3410018SYufeng Mo ether_addr_copy(send_msg.data, new_mac_addr); 1271ee4bcd3bSJian Shen if (is_first && !hdev->has_pf_mac) 1272ee4bcd3bSJian Shen eth_zero_addr(&send_msg.data[ETH_ALEN]); 1273ee4bcd3bSJian Shen else 1274d3410018SYufeng Mo ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 1275d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1276e2cb1decSSalil Mehta if (!status) 1277e2cb1decSSalil Mehta ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1278e2cb1decSSalil Mehta 1279e2cb1decSSalil Mehta return status; 1280e2cb1decSSalil Mehta } 1281e2cb1decSSalil Mehta 1282ee4bcd3bSJian Shen static struct hclgevf_mac_addr_node * 1283ee4bcd3bSJian Shen hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) 1284ee4bcd3bSJian Shen { 1285ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp; 1286ee4bcd3bSJian Shen 1287ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, list, node) 1288ee4bcd3bSJian Shen if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 1289ee4bcd3bSJian Shen return mac_node; 1290ee4bcd3bSJian Shen 1291ee4bcd3bSJian Shen return NULL; 1292ee4bcd3bSJian Shen } 1293ee4bcd3bSJian Shen 1294ee4bcd3bSJian Shen static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, 1295ee4bcd3bSJian Shen enum HCLGEVF_MAC_NODE_STATE state) 1296ee4bcd3bSJian Shen { 1297ee4bcd3bSJian Shen switch (state) { 1298ee4bcd3bSJian Shen /* from set_rx_mode or tmp_add_list */ 1299ee4bcd3bSJian Shen case HCLGEVF_MAC_TO_ADD: 1300ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_DEL) 1301ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_ACTIVE; 1302ee4bcd3bSJian Shen break; 1303ee4bcd3bSJian Shen /* only from set_rx_mode */ 1304ee4bcd3bSJian Shen case HCLGEVF_MAC_TO_DEL: 1305ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1306ee4bcd3bSJian Shen list_del(&mac_node->node); 1307ee4bcd3bSJian Shen kfree(mac_node); 1308ee4bcd3bSJian Shen } else { 1309ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_TO_DEL; 1310ee4bcd3bSJian Shen } 1311ee4bcd3bSJian Shen break; 1312ee4bcd3bSJian Shen /* only from tmp_add_list, the mac_node->state won't be 1313ee4bcd3bSJian Shen * HCLGEVF_MAC_ACTIVE 1314ee4bcd3bSJian Shen */ 1315ee4bcd3bSJian Shen case HCLGEVF_MAC_ACTIVE: 1316ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1317ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_ACTIVE; 1318ee4bcd3bSJian Shen break; 1319ee4bcd3bSJian Shen } 1320ee4bcd3bSJian Shen } 1321ee4bcd3bSJian Shen 1322ee4bcd3bSJian Shen static int hclgevf_update_mac_list(struct hnae3_handle *handle, 1323ee4bcd3bSJian Shen enum HCLGEVF_MAC_NODE_STATE state, 1324ee4bcd3bSJian Shen enum HCLGEVF_MAC_ADDR_TYPE mac_type, 1325e2cb1decSSalil Mehta const unsigned char *addr) 1326e2cb1decSSalil Mehta { 1327e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1328ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node; 1329ee4bcd3bSJian Shen struct list_head *list; 1330e2cb1decSSalil Mehta 1331ee4bcd3bSJian Shen list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1332ee4bcd3bSJian Shen &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1333ee4bcd3bSJian Shen 1334ee4bcd3bSJian Shen spin_lock_bh(&hdev->mac_table.mac_list_lock); 1335ee4bcd3bSJian Shen 1336ee4bcd3bSJian Shen /* if the mac addr is already in the mac list, no need to add a new 1337ee4bcd3bSJian Shen * one into it, just check the mac addr state, convert it to a new 1338ee4bcd3bSJian Shen * new state, or just remove it, or do nothing. 1339ee4bcd3bSJian Shen */ 1340ee4bcd3bSJian Shen mac_node = hclgevf_find_mac_node(list, addr); 1341ee4bcd3bSJian Shen if (mac_node) { 1342ee4bcd3bSJian Shen hclgevf_update_mac_node(mac_node, state); 1343ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1344ee4bcd3bSJian Shen return 0; 1345ee4bcd3bSJian Shen } 1346ee4bcd3bSJian Shen /* if this address is never added, unnecessary to delete */ 1347ee4bcd3bSJian Shen if (state == HCLGEVF_MAC_TO_DEL) { 1348ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1349ee4bcd3bSJian Shen return -ENOENT; 1350ee4bcd3bSJian Shen } 1351ee4bcd3bSJian Shen 1352ee4bcd3bSJian Shen mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 1353ee4bcd3bSJian Shen if (!mac_node) { 1354ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1355ee4bcd3bSJian Shen return -ENOMEM; 1356ee4bcd3bSJian Shen } 1357ee4bcd3bSJian Shen 1358ee4bcd3bSJian Shen mac_node->state = state; 1359ee4bcd3bSJian Shen ether_addr_copy(mac_node->mac_addr, addr); 1360ee4bcd3bSJian Shen list_add_tail(&mac_node->node, list); 1361ee4bcd3bSJian Shen 1362ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1363ee4bcd3bSJian Shen return 0; 1364ee4bcd3bSJian Shen } 1365ee4bcd3bSJian Shen 1366ee4bcd3bSJian Shen static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1367ee4bcd3bSJian Shen const unsigned char *addr) 1368ee4bcd3bSJian Shen { 1369ee4bcd3bSJian Shen return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1370ee4bcd3bSJian Shen HCLGEVF_MAC_ADDR_UC, addr); 1371e2cb1decSSalil Mehta } 1372e2cb1decSSalil Mehta 1373e2cb1decSSalil Mehta static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1374e2cb1decSSalil Mehta const unsigned char *addr) 1375e2cb1decSSalil Mehta { 1376ee4bcd3bSJian Shen return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1377ee4bcd3bSJian Shen HCLGEVF_MAC_ADDR_UC, addr); 1378e2cb1decSSalil Mehta } 1379e2cb1decSSalil Mehta 1380e2cb1decSSalil Mehta static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1381e2cb1decSSalil Mehta const unsigned char *addr) 1382e2cb1decSSalil Mehta { 1383ee4bcd3bSJian Shen return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1384ee4bcd3bSJian Shen HCLGEVF_MAC_ADDR_MC, addr); 1385e2cb1decSSalil Mehta } 1386e2cb1decSSalil Mehta 1387e2cb1decSSalil Mehta static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1388e2cb1decSSalil Mehta const unsigned char *addr) 1389e2cb1decSSalil Mehta { 1390ee4bcd3bSJian Shen return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1391ee4bcd3bSJian Shen HCLGEVF_MAC_ADDR_MC, addr); 1392ee4bcd3bSJian Shen } 1393e2cb1decSSalil Mehta 1394ee4bcd3bSJian Shen static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, 1395ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, 1396ee4bcd3bSJian Shen enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1397ee4bcd3bSJian Shen { 1398ee4bcd3bSJian Shen struct hclge_vf_to_pf_msg send_msg; 1399ee4bcd3bSJian Shen u8 code, subcode; 1400ee4bcd3bSJian Shen 1401ee4bcd3bSJian Shen if (mac_type == HCLGEVF_MAC_ADDR_UC) { 1402ee4bcd3bSJian Shen code = HCLGE_MBX_SET_UNICAST; 1403ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1404ee4bcd3bSJian Shen subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; 1405ee4bcd3bSJian Shen else 1406ee4bcd3bSJian Shen subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; 1407ee4bcd3bSJian Shen } else { 1408ee4bcd3bSJian Shen code = HCLGE_MBX_SET_MULTICAST; 1409ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1410ee4bcd3bSJian Shen subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; 1411ee4bcd3bSJian Shen else 1412ee4bcd3bSJian Shen subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; 1413ee4bcd3bSJian Shen } 1414ee4bcd3bSJian Shen 1415ee4bcd3bSJian Shen hclgevf_build_send_msg(&send_msg, code, subcode); 1416ee4bcd3bSJian Shen ether_addr_copy(send_msg.data, mac_node->mac_addr); 1417d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1418e2cb1decSSalil Mehta } 1419e2cb1decSSalil Mehta 1420ee4bcd3bSJian Shen static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, 1421ee4bcd3bSJian Shen struct list_head *list, 1422ee4bcd3bSJian Shen enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1423ee4bcd3bSJian Shen { 1424ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp; 1425ee4bcd3bSJian Shen int ret; 1426ee4bcd3bSJian Shen 1427ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, list, node) { 1428ee4bcd3bSJian Shen ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); 1429ee4bcd3bSJian Shen if (ret) { 1430ee4bcd3bSJian Shen dev_err(&hdev->pdev->dev, 1431ee4bcd3bSJian Shen "failed to configure mac %pM, state = %d, ret = %d\n", 1432ee4bcd3bSJian Shen mac_node->mac_addr, mac_node->state, ret); 1433ee4bcd3bSJian Shen return; 1434ee4bcd3bSJian Shen } 1435ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1436ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_ACTIVE; 1437ee4bcd3bSJian Shen } else { 1438ee4bcd3bSJian Shen list_del(&mac_node->node); 1439ee4bcd3bSJian Shen kfree(mac_node); 1440ee4bcd3bSJian Shen } 1441ee4bcd3bSJian Shen } 1442ee4bcd3bSJian Shen } 1443ee4bcd3bSJian Shen 1444ee4bcd3bSJian Shen static void hclgevf_sync_from_add_list(struct list_head *add_list, 1445ee4bcd3bSJian Shen struct list_head *mac_list) 1446ee4bcd3bSJian Shen { 1447ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1448ee4bcd3bSJian Shen 1449ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, add_list, node) { 1450ee4bcd3bSJian Shen /* if the mac address from tmp_add_list is not in the 1451ee4bcd3bSJian Shen * uc/mc_mac_list, it means have received a TO_DEL request 1452ee4bcd3bSJian Shen * during the time window of sending mac config request to PF 1453ee4bcd3bSJian Shen * If mac_node state is ACTIVE, then change its state to TO_DEL, 1454ee4bcd3bSJian Shen * then it will be removed at next time. If is TO_ADD, it means 1455ee4bcd3bSJian Shen * send TO_ADD request failed, so just remove the mac node. 1456ee4bcd3bSJian Shen */ 1457ee4bcd3bSJian Shen new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1458ee4bcd3bSJian Shen if (new_node) { 1459ee4bcd3bSJian Shen hclgevf_update_mac_node(new_node, mac_node->state); 1460ee4bcd3bSJian Shen list_del(&mac_node->node); 1461ee4bcd3bSJian Shen kfree(mac_node); 1462ee4bcd3bSJian Shen } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { 1463ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_TO_DEL; 1464ee4bcd3bSJian Shen list_del(&mac_node->node); 1465ee4bcd3bSJian Shen list_add_tail(&mac_node->node, mac_list); 1466ee4bcd3bSJian Shen } else { 1467ee4bcd3bSJian Shen list_del(&mac_node->node); 1468ee4bcd3bSJian Shen kfree(mac_node); 1469ee4bcd3bSJian Shen } 1470ee4bcd3bSJian Shen } 1471ee4bcd3bSJian Shen } 1472ee4bcd3bSJian Shen 1473ee4bcd3bSJian Shen static void hclgevf_sync_from_del_list(struct list_head *del_list, 1474ee4bcd3bSJian Shen struct list_head *mac_list) 1475ee4bcd3bSJian Shen { 1476ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1477ee4bcd3bSJian Shen 1478ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, del_list, node) { 1479ee4bcd3bSJian Shen new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1480ee4bcd3bSJian Shen if (new_node) { 1481ee4bcd3bSJian Shen /* If the mac addr is exist in the mac list, it means 1482ee4bcd3bSJian Shen * received a new request TO_ADD during the time window 1483ee4bcd3bSJian Shen * of sending mac addr configurrequest to PF, so just 1484ee4bcd3bSJian Shen * change the mac state to ACTIVE. 1485ee4bcd3bSJian Shen */ 1486ee4bcd3bSJian Shen new_node->state = HCLGEVF_MAC_ACTIVE; 1487ee4bcd3bSJian Shen list_del(&mac_node->node); 1488ee4bcd3bSJian Shen kfree(mac_node); 1489ee4bcd3bSJian Shen } else { 1490ee4bcd3bSJian Shen list_del(&mac_node->node); 1491ee4bcd3bSJian Shen list_add_tail(&mac_node->node, mac_list); 1492ee4bcd3bSJian Shen } 1493ee4bcd3bSJian Shen } 1494ee4bcd3bSJian Shen } 1495ee4bcd3bSJian Shen 1496ee4bcd3bSJian Shen static void hclgevf_clear_list(struct list_head *list) 1497ee4bcd3bSJian Shen { 1498ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp; 1499ee4bcd3bSJian Shen 1500ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, list, node) { 1501ee4bcd3bSJian Shen list_del(&mac_node->node); 1502ee4bcd3bSJian Shen kfree(mac_node); 1503ee4bcd3bSJian Shen } 1504ee4bcd3bSJian Shen } 1505ee4bcd3bSJian Shen 1506ee4bcd3bSJian Shen static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, 1507ee4bcd3bSJian Shen enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1508ee4bcd3bSJian Shen { 1509ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1510ee4bcd3bSJian Shen struct list_head tmp_add_list, tmp_del_list; 1511ee4bcd3bSJian Shen struct list_head *list; 1512ee4bcd3bSJian Shen 1513ee4bcd3bSJian Shen INIT_LIST_HEAD(&tmp_add_list); 1514ee4bcd3bSJian Shen INIT_LIST_HEAD(&tmp_del_list); 1515ee4bcd3bSJian Shen 1516ee4bcd3bSJian Shen /* move the mac addr to the tmp_add_list and tmp_del_list, then 1517ee4bcd3bSJian Shen * we can add/delete these mac addr outside the spin lock 1518ee4bcd3bSJian Shen */ 1519ee4bcd3bSJian Shen list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1520ee4bcd3bSJian Shen &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1521ee4bcd3bSJian Shen 1522ee4bcd3bSJian Shen spin_lock_bh(&hdev->mac_table.mac_list_lock); 1523ee4bcd3bSJian Shen 1524ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, list, node) { 1525ee4bcd3bSJian Shen switch (mac_node->state) { 1526ee4bcd3bSJian Shen case HCLGEVF_MAC_TO_DEL: 1527ee4bcd3bSJian Shen list_del(&mac_node->node); 1528ee4bcd3bSJian Shen list_add_tail(&mac_node->node, &tmp_del_list); 1529ee4bcd3bSJian Shen break; 1530ee4bcd3bSJian Shen case HCLGEVF_MAC_TO_ADD: 1531ee4bcd3bSJian Shen new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 1532ee4bcd3bSJian Shen if (!new_node) 1533ee4bcd3bSJian Shen goto stop_traverse; 1534ee4bcd3bSJian Shen 1535ee4bcd3bSJian Shen ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 1536ee4bcd3bSJian Shen new_node->state = mac_node->state; 1537ee4bcd3bSJian Shen list_add_tail(&new_node->node, &tmp_add_list); 1538ee4bcd3bSJian Shen break; 1539ee4bcd3bSJian Shen default: 1540ee4bcd3bSJian Shen break; 1541ee4bcd3bSJian Shen } 1542ee4bcd3bSJian Shen } 1543ee4bcd3bSJian Shen 1544ee4bcd3bSJian Shen stop_traverse: 1545ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1546ee4bcd3bSJian Shen 1547ee4bcd3bSJian Shen /* delete first, in order to get max mac table space for adding */ 1548ee4bcd3bSJian Shen hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); 1549ee4bcd3bSJian Shen hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); 1550ee4bcd3bSJian Shen 1551ee4bcd3bSJian Shen /* if some mac addresses were added/deleted fail, move back to the 1552ee4bcd3bSJian Shen * mac_list, and retry at next time. 1553ee4bcd3bSJian Shen */ 1554ee4bcd3bSJian Shen spin_lock_bh(&hdev->mac_table.mac_list_lock); 1555ee4bcd3bSJian Shen 1556ee4bcd3bSJian Shen hclgevf_sync_from_del_list(&tmp_del_list, list); 1557ee4bcd3bSJian Shen hclgevf_sync_from_add_list(&tmp_add_list, list); 1558ee4bcd3bSJian Shen 1559ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1560ee4bcd3bSJian Shen } 1561ee4bcd3bSJian Shen 1562ee4bcd3bSJian Shen static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) 1563ee4bcd3bSJian Shen { 1564ee4bcd3bSJian Shen hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); 1565ee4bcd3bSJian Shen hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); 1566ee4bcd3bSJian Shen } 1567ee4bcd3bSJian Shen 1568ee4bcd3bSJian Shen static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) 1569ee4bcd3bSJian Shen { 1570ee4bcd3bSJian Shen spin_lock_bh(&hdev->mac_table.mac_list_lock); 1571ee4bcd3bSJian Shen 1572ee4bcd3bSJian Shen hclgevf_clear_list(&hdev->mac_table.uc_mac_list); 1573ee4bcd3bSJian Shen hclgevf_clear_list(&hdev->mac_table.mc_mac_list); 1574ee4bcd3bSJian Shen 1575ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1576ee4bcd3bSJian Shen } 1577ee4bcd3bSJian Shen 1578e2cb1decSSalil Mehta static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1579e2cb1decSSalil Mehta __be16 proto, u16 vlan_id, 1580e2cb1decSSalil Mehta bool is_kill) 1581e2cb1decSSalil Mehta { 1582d3410018SYufeng Mo #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 1583d3410018SYufeng Mo #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 1584d3410018SYufeng Mo #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 1585d3410018SYufeng Mo 1586e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1587d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1588fe4144d4SJian Shen int ret; 1589e2cb1decSSalil Mehta 1590b37ce587SYufeng Mo if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1591e2cb1decSSalil Mehta return -EINVAL; 1592e2cb1decSSalil Mehta 1593e2cb1decSSalil Mehta if (proto != htons(ETH_P_8021Q)) 1594e2cb1decSSalil Mehta return -EPROTONOSUPPORT; 1595e2cb1decSSalil Mehta 1596fe4144d4SJian Shen /* When device is resetting, firmware is unable to handle 1597fe4144d4SJian Shen * mailbox. Just record the vlan id, and remove it after 1598fe4144d4SJian Shen * reset finished. 1599fe4144d4SJian Shen */ 1600fe4144d4SJian Shen if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) { 1601fe4144d4SJian Shen set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1602fe4144d4SJian Shen return -EBUSY; 1603fe4144d4SJian Shen } 1604fe4144d4SJian Shen 1605d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1606d3410018SYufeng Mo HCLGE_MBX_VLAN_FILTER); 1607d3410018SYufeng Mo send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; 1608d3410018SYufeng Mo memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, 1609d3410018SYufeng Mo sizeof(vlan_id)); 1610d3410018SYufeng Mo memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, 1611d3410018SYufeng Mo sizeof(proto)); 161246ee7350SGuojia Liao /* when remove hw vlan filter failed, record the vlan id, 1613fe4144d4SJian Shen * and try to remove it from hw later, to be consistence 1614fe4144d4SJian Shen * with stack. 1615fe4144d4SJian Shen */ 1616d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1617fe4144d4SJian Shen if (is_kill && ret) 1618fe4144d4SJian Shen set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1619fe4144d4SJian Shen 1620fe4144d4SJian Shen return ret; 1621fe4144d4SJian Shen } 1622fe4144d4SJian Shen 1623fe4144d4SJian Shen static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1624fe4144d4SJian Shen { 1625fe4144d4SJian Shen #define HCLGEVF_MAX_SYNC_COUNT 60 1626fe4144d4SJian Shen struct hnae3_handle *handle = &hdev->nic; 1627fe4144d4SJian Shen int ret, sync_cnt = 0; 1628fe4144d4SJian Shen u16 vlan_id; 1629fe4144d4SJian Shen 1630fe4144d4SJian Shen vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1631fe4144d4SJian Shen while (vlan_id != VLAN_N_VID) { 1632fe4144d4SJian Shen ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1633fe4144d4SJian Shen vlan_id, true); 1634fe4144d4SJian Shen if (ret) 1635fe4144d4SJian Shen return; 1636fe4144d4SJian Shen 1637fe4144d4SJian Shen clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1638fe4144d4SJian Shen sync_cnt++; 1639fe4144d4SJian Shen if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1640fe4144d4SJian Shen return; 1641fe4144d4SJian Shen 1642fe4144d4SJian Shen vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1643fe4144d4SJian Shen } 1644e2cb1decSSalil Mehta } 1645e2cb1decSSalil Mehta 1646b2641e2aSYunsheng Lin static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1647b2641e2aSYunsheng Lin { 1648b2641e2aSYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1649d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1650b2641e2aSYunsheng Lin 1651d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1652d3410018SYufeng Mo HCLGE_MBX_VLAN_RX_OFF_CFG); 1653d3410018SYufeng Mo send_msg.data[0] = enable ? 1 : 0; 1654d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1655b2641e2aSYunsheng Lin } 1656b2641e2aSYunsheng Lin 16577fa6be4fSHuazhong Tan static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1658e2cb1decSSalil Mehta { 1659e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1660d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 16611a426f8bSPeng Li int ret; 1662e2cb1decSSalil Mehta 16631a426f8bSPeng Li /* disable vf queue before send queue reset msg to PF */ 16641a426f8bSPeng Li ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 16651a426f8bSPeng Li if (ret) 16667fa6be4fSHuazhong Tan return ret; 16671a426f8bSPeng Li 1668d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1669d3410018SYufeng Mo memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 1670d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1671e2cb1decSSalil Mehta } 1672e2cb1decSSalil Mehta 1673818f1675SYunsheng Lin static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1674818f1675SYunsheng Lin { 1675818f1675SYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1676d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1677818f1675SYunsheng Lin 1678d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1679d3410018SYufeng Mo memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); 1680d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1681818f1675SYunsheng Lin } 1682818f1675SYunsheng Lin 16836988eb2aSSalil Mehta static int hclgevf_notify_client(struct hclgevf_dev *hdev, 16846988eb2aSSalil Mehta enum hnae3_reset_notify_type type) 16856988eb2aSSalil Mehta { 16866988eb2aSSalil Mehta struct hnae3_client *client = hdev->nic_client; 16876988eb2aSSalil Mehta struct hnae3_handle *handle = &hdev->nic; 16886a5f6fa3SHuazhong Tan int ret; 16896988eb2aSSalil Mehta 169025d1817cSHuazhong Tan if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 169125d1817cSHuazhong Tan !client) 169225d1817cSHuazhong Tan return 0; 169325d1817cSHuazhong Tan 16946988eb2aSSalil Mehta if (!client->ops->reset_notify) 16956988eb2aSSalil Mehta return -EOPNOTSUPP; 16966988eb2aSSalil Mehta 16976a5f6fa3SHuazhong Tan ret = client->ops->reset_notify(handle, type); 16986a5f6fa3SHuazhong Tan if (ret) 16996a5f6fa3SHuazhong Tan dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 17006a5f6fa3SHuazhong Tan type, ret); 17016a5f6fa3SHuazhong Tan 17026a5f6fa3SHuazhong Tan return ret; 17036988eb2aSSalil Mehta } 17046988eb2aSSalil Mehta 17056988eb2aSSalil Mehta static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 17066988eb2aSSalil Mehta { 1707aa5c4f17SHuazhong Tan #define HCLGEVF_RESET_WAIT_US 20000 1708aa5c4f17SHuazhong Tan #define HCLGEVF_RESET_WAIT_CNT 2000 1709aa5c4f17SHuazhong Tan #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1710aa5c4f17SHuazhong Tan (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1711aa5c4f17SHuazhong Tan 1712aa5c4f17SHuazhong Tan u32 val; 1713aa5c4f17SHuazhong Tan int ret; 17146988eb2aSSalil Mehta 1715f28368bbSHuazhong Tan if (hdev->reset_type == HNAE3_VF_RESET) 171672e2fb07SHuazhong Tan ret = readl_poll_timeout(hdev->hw.io_base + 171772e2fb07SHuazhong Tan HCLGEVF_VF_RST_ING, val, 171872e2fb07SHuazhong Tan !(val & HCLGEVF_VF_RST_ING_BIT), 171972e2fb07SHuazhong Tan HCLGEVF_RESET_WAIT_US, 172072e2fb07SHuazhong Tan HCLGEVF_RESET_WAIT_TIMEOUT_US); 172172e2fb07SHuazhong Tan else 172272e2fb07SHuazhong Tan ret = readl_poll_timeout(hdev->hw.io_base + 172372e2fb07SHuazhong Tan HCLGEVF_RST_ING, val, 1724aa5c4f17SHuazhong Tan !(val & HCLGEVF_RST_ING_BITS), 1725aa5c4f17SHuazhong Tan HCLGEVF_RESET_WAIT_US, 1726aa5c4f17SHuazhong Tan HCLGEVF_RESET_WAIT_TIMEOUT_US); 17276988eb2aSSalil Mehta 17286988eb2aSSalil Mehta /* hardware completion status should be available by this time */ 1729aa5c4f17SHuazhong Tan if (ret) { 1730aa5c4f17SHuazhong Tan dev_err(&hdev->pdev->dev, 17316988eb2aSSalil Mehta "could'nt get reset done status from h/w, timeout!\n"); 1732aa5c4f17SHuazhong Tan return ret; 17336988eb2aSSalil Mehta } 17346988eb2aSSalil Mehta 17356988eb2aSSalil Mehta /* we will wait a bit more to let reset of the stack to complete. This 17366988eb2aSSalil Mehta * might happen in case reset assertion was made by PF. Yes, this also 17376988eb2aSSalil Mehta * means we might end up waiting bit more even for VF reset. 17386988eb2aSSalil Mehta */ 17396988eb2aSSalil Mehta msleep(5000); 17406988eb2aSSalil Mehta 17416988eb2aSSalil Mehta return 0; 17426988eb2aSSalil Mehta } 17436988eb2aSSalil Mehta 17446b428b4fSHuazhong Tan static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 17456b428b4fSHuazhong Tan { 17466b428b4fSHuazhong Tan u32 reg_val; 17476b428b4fSHuazhong Tan 17486b428b4fSHuazhong Tan reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); 17496b428b4fSHuazhong Tan if (enable) 17506b428b4fSHuazhong Tan reg_val |= HCLGEVF_NIC_SW_RST_RDY; 17516b428b4fSHuazhong Tan else 17526b428b4fSHuazhong Tan reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 17536b428b4fSHuazhong Tan 17546b428b4fSHuazhong Tan hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 17556b428b4fSHuazhong Tan reg_val); 17566b428b4fSHuazhong Tan } 17576b428b4fSHuazhong Tan 17586988eb2aSSalil Mehta static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 17596988eb2aSSalil Mehta { 17607a01c897SSalil Mehta int ret; 17617a01c897SSalil Mehta 17626988eb2aSSalil Mehta /* uninitialize the nic client */ 17636a5f6fa3SHuazhong Tan ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 17646a5f6fa3SHuazhong Tan if (ret) 17656a5f6fa3SHuazhong Tan return ret; 17666988eb2aSSalil Mehta 17677a01c897SSalil Mehta /* re-initialize the hclge device */ 17689c6f7085SHuazhong Tan ret = hclgevf_reset_hdev(hdev); 17697a01c897SSalil Mehta if (ret) { 17707a01c897SSalil Mehta dev_err(&hdev->pdev->dev, 17717a01c897SSalil Mehta "hclge device re-init failed, VF is disabled!\n"); 17727a01c897SSalil Mehta return ret; 17737a01c897SSalil Mehta } 17746988eb2aSSalil Mehta 17756988eb2aSSalil Mehta /* bring up the nic client again */ 17766a5f6fa3SHuazhong Tan ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 17776a5f6fa3SHuazhong Tan if (ret) 17786a5f6fa3SHuazhong Tan return ret; 17796988eb2aSSalil Mehta 17806b428b4fSHuazhong Tan ret = hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); 17816b428b4fSHuazhong Tan if (ret) 17826b428b4fSHuazhong Tan return ret; 17836b428b4fSHuazhong Tan 17846b428b4fSHuazhong Tan /* clear handshake status with IMP */ 17856b428b4fSHuazhong Tan hclgevf_reset_handshake(hdev, false); 17866b428b4fSHuazhong Tan 17871cc9bc6eSHuazhong Tan /* bring up the nic to enable TX/RX again */ 17881cc9bc6eSHuazhong Tan return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 17896988eb2aSSalil Mehta } 17906988eb2aSSalil Mehta 1791dea846e8SHuazhong Tan static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1792dea846e8SHuazhong Tan { 1793ada13ee3SHuazhong Tan #define HCLGEVF_RESET_SYNC_TIME 100 1794ada13ee3SHuazhong Tan 1795d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1796dea846e8SHuazhong Tan int ret = 0; 1797dea846e8SHuazhong Tan 1798f28368bbSHuazhong Tan if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1799d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1800d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1801c88a6e7dSHuazhong Tan hdev->rst_stats.vf_func_rst_cnt++; 1802dea846e8SHuazhong Tan } 1803dea846e8SHuazhong Tan 1804ef5f8e50SHuazhong Tan set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1805ada13ee3SHuazhong Tan /* inform hardware that preparatory work is done */ 1806ada13ee3SHuazhong Tan msleep(HCLGEVF_RESET_SYNC_TIME); 18076b428b4fSHuazhong Tan hclgevf_reset_handshake(hdev, true); 1808dea846e8SHuazhong Tan dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1809dea846e8SHuazhong Tan hdev->reset_type, ret); 1810dea846e8SHuazhong Tan 1811dea846e8SHuazhong Tan return ret; 1812dea846e8SHuazhong Tan } 1813dea846e8SHuazhong Tan 18143d77d0cbSHuazhong Tan static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 18153d77d0cbSHuazhong Tan { 18163d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 18173d77d0cbSHuazhong Tan hdev->rst_stats.vf_func_rst_cnt); 18183d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 18193d77d0cbSHuazhong Tan hdev->rst_stats.flr_rst_cnt); 18203d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 18213d77d0cbSHuazhong Tan hdev->rst_stats.vf_rst_cnt); 18223d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "reset done count: %u\n", 18233d77d0cbSHuazhong Tan hdev->rst_stats.rst_done_cnt); 18243d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 18253d77d0cbSHuazhong Tan hdev->rst_stats.hw_rst_done_cnt); 18263d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "reset count: %u\n", 18273d77d0cbSHuazhong Tan hdev->rst_stats.rst_cnt); 18283d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 18293d77d0cbSHuazhong Tan hdev->rst_stats.rst_fail_cnt); 18303d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 18313d77d0cbSHuazhong Tan hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 18323d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 18333d77d0cbSHuazhong Tan hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STAT_REG)); 18343d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 18353d77d0cbSHuazhong Tan hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); 18363d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 18373d77d0cbSHuazhong Tan hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 18383d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 18393d77d0cbSHuazhong Tan } 18403d77d0cbSHuazhong Tan 1841bbe6540eSHuazhong Tan static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1842bbe6540eSHuazhong Tan { 18436b428b4fSHuazhong Tan /* recover handshake status with IMP when reset fail */ 18446b428b4fSHuazhong Tan hclgevf_reset_handshake(hdev, true); 1845bbe6540eSHuazhong Tan hdev->rst_stats.rst_fail_cnt++; 1846adcf738bSGuojia Liao dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1847bbe6540eSHuazhong Tan hdev->rst_stats.rst_fail_cnt); 1848bbe6540eSHuazhong Tan 1849bbe6540eSHuazhong Tan if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1850bbe6540eSHuazhong Tan set_bit(hdev->reset_type, &hdev->reset_pending); 1851bbe6540eSHuazhong Tan 1852bbe6540eSHuazhong Tan if (hclgevf_is_reset_pending(hdev)) { 1853bbe6540eSHuazhong Tan set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1854bbe6540eSHuazhong Tan hclgevf_reset_task_schedule(hdev); 18553d77d0cbSHuazhong Tan } else { 1856d5432455SGuojia Liao set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 18573d77d0cbSHuazhong Tan hclgevf_dump_rst_info(hdev); 1858bbe6540eSHuazhong Tan } 1859bbe6540eSHuazhong Tan } 1860bbe6540eSHuazhong Tan 18611cc9bc6eSHuazhong Tan static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 18626988eb2aSSalil Mehta { 1863dea846e8SHuazhong Tan struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 18646988eb2aSSalil Mehta int ret; 18656988eb2aSSalil Mehta 1866dea846e8SHuazhong Tan /* Initialize ae_dev reset status as well, in case enet layer wants to 1867dea846e8SHuazhong Tan * know if device is undergoing reset 1868dea846e8SHuazhong Tan */ 1869dea846e8SHuazhong Tan ae_dev->reset_type = hdev->reset_type; 1870c88a6e7dSHuazhong Tan hdev->rst_stats.rst_cnt++; 18716988eb2aSSalil Mehta 18721cc9bc6eSHuazhong Tan rtnl_lock(); 18736988eb2aSSalil Mehta /* bring down the nic to stop any ongoing TX/RX */ 18746a5f6fa3SHuazhong Tan ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 187529118ab9SHuazhong Tan rtnl_unlock(); 18766a5f6fa3SHuazhong Tan if (ret) 18771cc9bc6eSHuazhong Tan return ret; 1878dea846e8SHuazhong Tan 18791cc9bc6eSHuazhong Tan return hclgevf_reset_prepare_wait(hdev); 18806988eb2aSSalil Mehta } 18816988eb2aSSalil Mehta 18821cc9bc6eSHuazhong Tan static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 18831cc9bc6eSHuazhong Tan { 18841cc9bc6eSHuazhong Tan struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 18851cc9bc6eSHuazhong Tan int ret; 18861cc9bc6eSHuazhong Tan 1887c88a6e7dSHuazhong Tan hdev->rst_stats.hw_rst_done_cnt++; 1888c88a6e7dSHuazhong Tan 188929118ab9SHuazhong Tan rtnl_lock(); 18906988eb2aSSalil Mehta /* now, re-initialize the nic client and ae device */ 18916988eb2aSSalil Mehta ret = hclgevf_reset_stack(hdev); 18921cc9bc6eSHuazhong Tan rtnl_unlock(); 18936a5f6fa3SHuazhong Tan if (ret) { 18946988eb2aSSalil Mehta dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 18951cc9bc6eSHuazhong Tan return ret; 18966a5f6fa3SHuazhong Tan } 18976988eb2aSSalil Mehta 1898b644a8d4SHuazhong Tan hdev->last_reset_time = jiffies; 1899b644a8d4SHuazhong Tan ae_dev->reset_type = HNAE3_NONE_RESET; 1900c88a6e7dSHuazhong Tan hdev->rst_stats.rst_done_cnt++; 1901bbe6540eSHuazhong Tan hdev->rst_stats.rst_fail_cnt = 0; 1902d5432455SGuojia Liao clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1903b644a8d4SHuazhong Tan 19041cc9bc6eSHuazhong Tan return 0; 19051cc9bc6eSHuazhong Tan } 19061cc9bc6eSHuazhong Tan 19071cc9bc6eSHuazhong Tan static void hclgevf_reset(struct hclgevf_dev *hdev) 19081cc9bc6eSHuazhong Tan { 19091cc9bc6eSHuazhong Tan if (hclgevf_reset_prepare(hdev)) 19101cc9bc6eSHuazhong Tan goto err_reset; 19111cc9bc6eSHuazhong Tan 19121cc9bc6eSHuazhong Tan /* check if VF could successfully fetch the hardware reset completion 19131cc9bc6eSHuazhong Tan * status from the hardware 19141cc9bc6eSHuazhong Tan */ 19151cc9bc6eSHuazhong Tan if (hclgevf_reset_wait(hdev)) { 19161cc9bc6eSHuazhong Tan /* can't do much in this situation, will disable VF */ 19171cc9bc6eSHuazhong Tan dev_err(&hdev->pdev->dev, 19181cc9bc6eSHuazhong Tan "failed to fetch H/W reset completion status\n"); 19191cc9bc6eSHuazhong Tan goto err_reset; 19201cc9bc6eSHuazhong Tan } 19211cc9bc6eSHuazhong Tan 19221cc9bc6eSHuazhong Tan if (hclgevf_reset_rebuild(hdev)) 19231cc9bc6eSHuazhong Tan goto err_reset; 19241cc9bc6eSHuazhong Tan 19251cc9bc6eSHuazhong Tan return; 19261cc9bc6eSHuazhong Tan 19276a5f6fa3SHuazhong Tan err_reset: 1928bbe6540eSHuazhong Tan hclgevf_reset_err_handle(hdev); 19296988eb2aSSalil Mehta } 19306988eb2aSSalil Mehta 1931720bd583SHuazhong Tan static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1932720bd583SHuazhong Tan unsigned long *addr) 1933720bd583SHuazhong Tan { 1934720bd583SHuazhong Tan enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1935720bd583SHuazhong Tan 1936dea846e8SHuazhong Tan /* return the highest priority reset level amongst all */ 1937b90fcc5bSHuazhong Tan if (test_bit(HNAE3_VF_RESET, addr)) { 1938b90fcc5bSHuazhong Tan rst_level = HNAE3_VF_RESET; 1939b90fcc5bSHuazhong Tan clear_bit(HNAE3_VF_RESET, addr); 1940b90fcc5bSHuazhong Tan clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1941b90fcc5bSHuazhong Tan clear_bit(HNAE3_VF_FUNC_RESET, addr); 1942b90fcc5bSHuazhong Tan } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1943dea846e8SHuazhong Tan rst_level = HNAE3_VF_FULL_RESET; 1944dea846e8SHuazhong Tan clear_bit(HNAE3_VF_FULL_RESET, addr); 1945dea846e8SHuazhong Tan clear_bit(HNAE3_VF_FUNC_RESET, addr); 1946aa5c4f17SHuazhong Tan } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1947aa5c4f17SHuazhong Tan rst_level = HNAE3_VF_PF_FUNC_RESET; 1948aa5c4f17SHuazhong Tan clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1949aa5c4f17SHuazhong Tan clear_bit(HNAE3_VF_FUNC_RESET, addr); 1950dea846e8SHuazhong Tan } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1951dea846e8SHuazhong Tan rst_level = HNAE3_VF_FUNC_RESET; 1952dea846e8SHuazhong Tan clear_bit(HNAE3_VF_FUNC_RESET, addr); 19536ff3cf07SHuazhong Tan } else if (test_bit(HNAE3_FLR_RESET, addr)) { 19546ff3cf07SHuazhong Tan rst_level = HNAE3_FLR_RESET; 19556ff3cf07SHuazhong Tan clear_bit(HNAE3_FLR_RESET, addr); 1956720bd583SHuazhong Tan } 1957720bd583SHuazhong Tan 1958720bd583SHuazhong Tan return rst_level; 1959720bd583SHuazhong Tan } 1960720bd583SHuazhong Tan 19616ae4e733SShiju Jose static void hclgevf_reset_event(struct pci_dev *pdev, 19626ae4e733SShiju Jose struct hnae3_handle *handle) 19636d4c3981SSalil Mehta { 19646ff3cf07SHuazhong Tan struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 19656ff3cf07SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 19666d4c3981SSalil Mehta 19676d4c3981SSalil Mehta dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 19686d4c3981SSalil Mehta 19696ff3cf07SHuazhong Tan if (hdev->default_reset_request) 19700742ed7cSHuazhong Tan hdev->reset_level = 1971720bd583SHuazhong Tan hclgevf_get_reset_level(hdev, 1972720bd583SHuazhong Tan &hdev->default_reset_request); 1973720bd583SHuazhong Tan else 1974dea846e8SHuazhong Tan hdev->reset_level = HNAE3_VF_FUNC_RESET; 19756d4c3981SSalil Mehta 1976436667d2SSalil Mehta /* reset of this VF requested */ 1977436667d2SSalil Mehta set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1978436667d2SSalil Mehta hclgevf_reset_task_schedule(hdev); 19796d4c3981SSalil Mehta 19800742ed7cSHuazhong Tan hdev->last_reset_time = jiffies; 19816d4c3981SSalil Mehta } 19826d4c3981SSalil Mehta 1983720bd583SHuazhong Tan static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1984720bd583SHuazhong Tan enum hnae3_reset_type rst_type) 1985720bd583SHuazhong Tan { 1986720bd583SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 1987720bd583SHuazhong Tan 1988720bd583SHuazhong Tan set_bit(rst_type, &hdev->default_reset_request); 1989720bd583SHuazhong Tan } 1990720bd583SHuazhong Tan 1991f28368bbSHuazhong Tan static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1992f28368bbSHuazhong Tan { 1993f28368bbSHuazhong Tan writel(en ? 1 : 0, vector->addr); 1994f28368bbSHuazhong Tan } 1995f28368bbSHuazhong Tan 19966ff3cf07SHuazhong Tan static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 19976ff3cf07SHuazhong Tan { 1998f28368bbSHuazhong Tan #define HCLGEVF_FLR_RETRY_WAIT_MS 500 1999f28368bbSHuazhong Tan #define HCLGEVF_FLR_RETRY_CNT 5 2000f28368bbSHuazhong Tan 20016ff3cf07SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 2002f28368bbSHuazhong Tan int retry_cnt = 0; 2003f28368bbSHuazhong Tan int ret; 20046ff3cf07SHuazhong Tan 2005f28368bbSHuazhong Tan retry: 2006f28368bbSHuazhong Tan down(&hdev->reset_sem); 2007f28368bbSHuazhong Tan set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2008f28368bbSHuazhong Tan hdev->reset_type = HNAE3_FLR_RESET; 2009f28368bbSHuazhong Tan ret = hclgevf_reset_prepare(hdev); 2010f28368bbSHuazhong Tan if (ret) { 2011f28368bbSHuazhong Tan dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", 2012f28368bbSHuazhong Tan ret); 2013f28368bbSHuazhong Tan if (hdev->reset_pending || 2014f28368bbSHuazhong Tan retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) { 20156ff3cf07SHuazhong Tan dev_err(&hdev->pdev->dev, 2016f28368bbSHuazhong Tan "reset_pending:0x%lx, retry_cnt:%d\n", 2017f28368bbSHuazhong Tan hdev->reset_pending, retry_cnt); 2018f28368bbSHuazhong Tan clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2019f28368bbSHuazhong Tan up(&hdev->reset_sem); 2020f28368bbSHuazhong Tan msleep(HCLGEVF_FLR_RETRY_WAIT_MS); 2021f28368bbSHuazhong Tan goto retry; 2022f28368bbSHuazhong Tan } 2023f28368bbSHuazhong Tan } 2024f28368bbSHuazhong Tan 2025f28368bbSHuazhong Tan /* disable misc vector before FLR done */ 2026f28368bbSHuazhong Tan hclgevf_enable_vector(&hdev->misc_vector, false); 2027f28368bbSHuazhong Tan hdev->rst_stats.flr_rst_cnt++; 2028f28368bbSHuazhong Tan } 2029f28368bbSHuazhong Tan 2030f28368bbSHuazhong Tan static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 2031f28368bbSHuazhong Tan { 2032f28368bbSHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 2033f28368bbSHuazhong Tan int ret; 2034f28368bbSHuazhong Tan 2035f28368bbSHuazhong Tan hclgevf_enable_vector(&hdev->misc_vector, true); 2036f28368bbSHuazhong Tan 2037f28368bbSHuazhong Tan ret = hclgevf_reset_rebuild(hdev); 2038f28368bbSHuazhong Tan if (ret) 2039f28368bbSHuazhong Tan dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 2040f28368bbSHuazhong Tan ret); 2041f28368bbSHuazhong Tan 2042f28368bbSHuazhong Tan hdev->reset_type = HNAE3_NONE_RESET; 2043f28368bbSHuazhong Tan clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2044f28368bbSHuazhong Tan up(&hdev->reset_sem); 20456ff3cf07SHuazhong Tan } 20466ff3cf07SHuazhong Tan 2047e2cb1decSSalil Mehta static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 2048e2cb1decSSalil Mehta { 2049e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2050e2cb1decSSalil Mehta 2051e2cb1decSSalil Mehta return hdev->fw_version; 2052e2cb1decSSalil Mehta } 2053e2cb1decSSalil Mehta 2054e2cb1decSSalil Mehta static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 2055e2cb1decSSalil Mehta { 2056e2cb1decSSalil Mehta struct hclgevf_misc_vector *vector = &hdev->misc_vector; 2057e2cb1decSSalil Mehta 2058e2cb1decSSalil Mehta vector->vector_irq = pci_irq_vector(hdev->pdev, 2059e2cb1decSSalil Mehta HCLGEVF_MISC_VECTOR_NUM); 2060e2cb1decSSalil Mehta vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 2061e2cb1decSSalil Mehta /* vector status always valid for Vector 0 */ 2062e2cb1decSSalil Mehta hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 2063e2cb1decSSalil Mehta hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 2064e2cb1decSSalil Mehta 2065e2cb1decSSalil Mehta hdev->num_msi_left -= 1; 2066e2cb1decSSalil Mehta hdev->num_msi_used += 1; 2067e2cb1decSSalil Mehta } 2068e2cb1decSSalil Mehta 206935a1e503SSalil Mehta void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 207035a1e503SSalil Mehta { 2071ff200099SYunsheng Lin if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2072ff200099SYunsheng Lin !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 2073ff200099SYunsheng Lin &hdev->state)) 20740ea68902SYunsheng Lin mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 207535a1e503SSalil Mehta } 207635a1e503SSalil Mehta 207707a0556aSSalil Mehta void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 2078e2cb1decSSalil Mehta { 2079ff200099SYunsheng Lin if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2080ff200099SYunsheng Lin !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 2081ff200099SYunsheng Lin &hdev->state)) 20820ea68902SYunsheng Lin mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 208307a0556aSSalil Mehta } 2084e2cb1decSSalil Mehta 2085ff200099SYunsheng Lin static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 2086ff200099SYunsheng Lin unsigned long delay) 2087e2cb1decSSalil Mehta { 2088d5432455SGuojia Liao if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2089d5432455SGuojia Liao !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 20900ea68902SYunsheng Lin mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 2091e2cb1decSSalil Mehta } 2092e2cb1decSSalil Mehta 2093ff200099SYunsheng Lin static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 209435a1e503SSalil Mehta { 2095d6ad7c53SGuojia Liao #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 2096d6ad7c53SGuojia Liao 2097ff200099SYunsheng Lin if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 2098ff200099SYunsheng Lin return; 2099ff200099SYunsheng Lin 2100f28368bbSHuazhong Tan down(&hdev->reset_sem); 2101f28368bbSHuazhong Tan set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 210235a1e503SSalil Mehta 2103436667d2SSalil Mehta if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 2104436667d2SSalil Mehta &hdev->reset_state)) { 2105436667d2SSalil Mehta /* PF has initmated that it is about to reset the hardware. 21069b2f3477SWeihang Li * We now have to poll & check if hardware has actually 21079b2f3477SWeihang Li * completed the reset sequence. On hardware reset completion, 21089b2f3477SWeihang Li * VF needs to reset the client and ae device. 210935a1e503SSalil Mehta */ 2110436667d2SSalil Mehta hdev->reset_attempts = 0; 2111436667d2SSalil Mehta 2112dea846e8SHuazhong Tan hdev->last_reset_time = jiffies; 2113dea846e8SHuazhong Tan while ((hdev->reset_type = 2114dea846e8SHuazhong Tan hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 21151cc9bc6eSHuazhong Tan != HNAE3_NONE_RESET) 21161cc9bc6eSHuazhong Tan hclgevf_reset(hdev); 2117436667d2SSalil Mehta } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 2118436667d2SSalil Mehta &hdev->reset_state)) { 2119436667d2SSalil Mehta /* we could be here when either of below happens: 21209b2f3477SWeihang Li * 1. reset was initiated due to watchdog timeout caused by 2121436667d2SSalil Mehta * a. IMP was earlier reset and our TX got choked down and 2122436667d2SSalil Mehta * which resulted in watchdog reacting and inducing VF 2123436667d2SSalil Mehta * reset. This also means our cmdq would be unreliable. 2124436667d2SSalil Mehta * b. problem in TX due to other lower layer(example link 2125436667d2SSalil Mehta * layer not functioning properly etc.) 2126436667d2SSalil Mehta * 2. VF reset might have been initiated due to some config 2127436667d2SSalil Mehta * change. 2128436667d2SSalil Mehta * 2129436667d2SSalil Mehta * NOTE: Theres no clear way to detect above cases than to react 2130436667d2SSalil Mehta * to the response of PF for this reset request. PF will ack the 2131436667d2SSalil Mehta * 1b and 2. cases but we will not get any intimation about 1a 2132436667d2SSalil Mehta * from PF as cmdq would be in unreliable state i.e. mailbox 2133436667d2SSalil Mehta * communication between PF and VF would be broken. 213446ee7350SGuojia Liao * 213546ee7350SGuojia Liao * if we are never geting into pending state it means either: 2136436667d2SSalil Mehta * 1. PF is not receiving our request which could be due to IMP 2137436667d2SSalil Mehta * reset 2138436667d2SSalil Mehta * 2. PF is screwed 2139436667d2SSalil Mehta * We cannot do much for 2. but to check first we can try reset 2140436667d2SSalil Mehta * our PCIe + stack and see if it alleviates the problem. 2141436667d2SSalil Mehta */ 2142d6ad7c53SGuojia Liao if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 2143436667d2SSalil Mehta /* prepare for full reset of stack + pcie interface */ 2144dea846e8SHuazhong Tan set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 2145436667d2SSalil Mehta 2146436667d2SSalil Mehta /* "defer" schedule the reset task again */ 2147436667d2SSalil Mehta set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2148436667d2SSalil Mehta } else { 2149436667d2SSalil Mehta hdev->reset_attempts++; 2150436667d2SSalil Mehta 2151dea846e8SHuazhong Tan set_bit(hdev->reset_level, &hdev->reset_pending); 2152dea846e8SHuazhong Tan set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2153436667d2SSalil Mehta } 2154dea846e8SHuazhong Tan hclgevf_reset_task_schedule(hdev); 2155436667d2SSalil Mehta } 215635a1e503SSalil Mehta 2157afb6afdbSHuazhong Tan hdev->reset_type = HNAE3_NONE_RESET; 215835a1e503SSalil Mehta clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2159f28368bbSHuazhong Tan up(&hdev->reset_sem); 216035a1e503SSalil Mehta } 216135a1e503SSalil Mehta 2162ff200099SYunsheng Lin static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 2163e2cb1decSSalil Mehta { 2164ff200099SYunsheng Lin if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2165ff200099SYunsheng Lin return; 2166e2cb1decSSalil Mehta 2167e2cb1decSSalil Mehta if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 2168e2cb1decSSalil Mehta return; 2169e2cb1decSSalil Mehta 217007a0556aSSalil Mehta hclgevf_mbx_async_handler(hdev); 2171e2cb1decSSalil Mehta 2172e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2173e2cb1decSSalil Mehta } 2174e2cb1decSSalil Mehta 2175ff200099SYunsheng Lin static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 2176a6d818e3SYunsheng Lin { 2177d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 2178a6d818e3SYunsheng Lin int ret; 2179a6d818e3SYunsheng Lin 21801416d333SHuazhong Tan if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) 2181c59a85c0SJian Shen return; 2182c59a85c0SJian Shen 2183d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 2184d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2185a6d818e3SYunsheng Lin if (ret) 2186a6d818e3SYunsheng Lin dev_err(&hdev->pdev->dev, 2187a6d818e3SYunsheng Lin "VF sends keep alive cmd failed(=%d)\n", ret); 2188a6d818e3SYunsheng Lin } 2189a6d818e3SYunsheng Lin 2190ff200099SYunsheng Lin static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 2191e2cb1decSSalil Mehta { 2192ff200099SYunsheng Lin unsigned long delta = round_jiffies_relative(HZ); 2193ff200099SYunsheng Lin struct hnae3_handle *handle = &hdev->nic; 2194e2cb1decSSalil Mehta 2195ff200099SYunsheng Lin if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 2196ff200099SYunsheng Lin delta = jiffies - hdev->last_serv_processed; 2197db01afebSliuzhongzhu 2198ff200099SYunsheng Lin if (delta < round_jiffies_relative(HZ)) { 2199ff200099SYunsheng Lin delta = round_jiffies_relative(HZ) - delta; 2200ff200099SYunsheng Lin goto out; 2201db01afebSliuzhongzhu } 2202ff200099SYunsheng Lin } 2203ff200099SYunsheng Lin 2204ff200099SYunsheng Lin hdev->serv_processed_cnt++; 2205ff200099SYunsheng Lin if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 2206ff200099SYunsheng Lin hclgevf_keep_alive(hdev); 2207ff200099SYunsheng Lin 2208ff200099SYunsheng Lin if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 2209ff200099SYunsheng Lin hdev->last_serv_processed = jiffies; 2210ff200099SYunsheng Lin goto out; 2211ff200099SYunsheng Lin } 2212ff200099SYunsheng Lin 2213ff200099SYunsheng Lin if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 2214ff200099SYunsheng Lin hclgevf_tqps_update_stats(handle); 2215e2cb1decSSalil Mehta 2216e2cb1decSSalil Mehta /* request the link status from the PF. PF would be able to tell VF 2217e2cb1decSSalil Mehta * about such updates in future so we might remove this later 2218e2cb1decSSalil Mehta */ 2219e2cb1decSSalil Mehta hclgevf_request_link_info(hdev); 2220e2cb1decSSalil Mehta 22219194d18bSliuzhongzhu hclgevf_update_link_mode(hdev); 22229194d18bSliuzhongzhu 2223fe4144d4SJian Shen hclgevf_sync_vlan_filter(hdev); 2224fe4144d4SJian Shen 2225ee4bcd3bSJian Shen hclgevf_sync_mac_table(hdev); 2226ee4bcd3bSJian Shen 2227c631c696SJian Shen hclgevf_sync_promisc_mode(hdev); 2228c631c696SJian Shen 2229ff200099SYunsheng Lin hdev->last_serv_processed = jiffies; 2230436667d2SSalil Mehta 2231ff200099SYunsheng Lin out: 2232ff200099SYunsheng Lin hclgevf_task_schedule(hdev, delta); 2233ff200099SYunsheng Lin } 2234b3c3fe8eSYunsheng Lin 2235ff200099SYunsheng Lin static void hclgevf_service_task(struct work_struct *work) 2236ff200099SYunsheng Lin { 2237ff200099SYunsheng Lin struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 2238ff200099SYunsheng Lin service_task.work); 2239ff200099SYunsheng Lin 2240ff200099SYunsheng Lin hclgevf_reset_service_task(hdev); 2241ff200099SYunsheng Lin hclgevf_mailbox_service_task(hdev); 2242ff200099SYunsheng Lin hclgevf_periodic_service_task(hdev); 2243ff200099SYunsheng Lin 2244ff200099SYunsheng Lin /* Handle reset and mbx again in case periodical task delays the 2245ff200099SYunsheng Lin * handling by calling hclgevf_task_schedule() in 2246ff200099SYunsheng Lin * hclgevf_periodic_service_task() 2247ff200099SYunsheng Lin */ 2248ff200099SYunsheng Lin hclgevf_reset_service_task(hdev); 2249ff200099SYunsheng Lin hclgevf_mailbox_service_task(hdev); 2250e2cb1decSSalil Mehta } 2251e2cb1decSSalil Mehta 2252e2cb1decSSalil Mehta static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 2253e2cb1decSSalil Mehta { 2254e2cb1decSSalil Mehta hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 2255e2cb1decSSalil Mehta } 2256e2cb1decSSalil Mehta 2257b90fcc5bSHuazhong Tan static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 2258b90fcc5bSHuazhong Tan u32 *clearval) 2259e2cb1decSSalil Mehta { 226013050921SHuazhong Tan u32 val, cmdq_stat_reg, rst_ing_reg; 2261e2cb1decSSalil Mehta 2262e2cb1decSSalil Mehta /* fetch the events from their corresponding regs */ 226313050921SHuazhong Tan cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 226413050921SHuazhong Tan HCLGEVF_VECTOR0_CMDQ_STAT_REG); 2265e2cb1decSSalil Mehta 226613050921SHuazhong Tan if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 2267b90fcc5bSHuazhong Tan rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2268b90fcc5bSHuazhong Tan dev_info(&hdev->pdev->dev, 2269b90fcc5bSHuazhong Tan "receive reset interrupt 0x%x!\n", rst_ing_reg); 2270b90fcc5bSHuazhong Tan set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 2271b90fcc5bSHuazhong Tan set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2272ef5f8e50SHuazhong Tan set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 227313050921SHuazhong Tan *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 2274c88a6e7dSHuazhong Tan hdev->rst_stats.vf_rst_cnt++; 227572e2fb07SHuazhong Tan /* set up VF hardware reset status, its PF will clear 227672e2fb07SHuazhong Tan * this status when PF has initialized done. 227772e2fb07SHuazhong Tan */ 227872e2fb07SHuazhong Tan val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 227972e2fb07SHuazhong Tan hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 228072e2fb07SHuazhong Tan val | HCLGEVF_VF_RST_ING_BIT); 2281b90fcc5bSHuazhong Tan return HCLGEVF_VECTOR0_EVENT_RST; 2282b90fcc5bSHuazhong Tan } 2283b90fcc5bSHuazhong Tan 2284e2cb1decSSalil Mehta /* check for vector0 mailbox(=CMDQ RX) event source */ 228513050921SHuazhong Tan if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 228613050921SHuazhong Tan /* for revision 0x21, clearing interrupt is writing bit 0 228713050921SHuazhong Tan * to the clear register, writing bit 1 means to keep the 228813050921SHuazhong Tan * old value. 228913050921SHuazhong Tan * for revision 0x20, the clear register is a read & write 229013050921SHuazhong Tan * register, so we should just write 0 to the bit we are 229113050921SHuazhong Tan * handling, and keep other bits as cmdq_stat_reg. 229213050921SHuazhong Tan */ 229313050921SHuazhong Tan if (hdev->pdev->revision >= 0x21) 229413050921SHuazhong Tan *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 229513050921SHuazhong Tan else 229613050921SHuazhong Tan *clearval = cmdq_stat_reg & 229713050921SHuazhong Tan ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 229813050921SHuazhong Tan 2299b90fcc5bSHuazhong Tan return HCLGEVF_VECTOR0_EVENT_MBX; 2300e2cb1decSSalil Mehta } 2301e2cb1decSSalil Mehta 2302e45afb39SHuazhong Tan /* print other vector0 event source */ 2303e45afb39SHuazhong Tan dev_info(&hdev->pdev->dev, 2304e45afb39SHuazhong Tan "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2305e45afb39SHuazhong Tan cmdq_stat_reg); 2306e2cb1decSSalil Mehta 2307b90fcc5bSHuazhong Tan return HCLGEVF_VECTOR0_EVENT_OTHER; 2308e2cb1decSSalil Mehta } 2309e2cb1decSSalil Mehta 2310e2cb1decSSalil Mehta static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2311e2cb1decSSalil Mehta { 2312b90fcc5bSHuazhong Tan enum hclgevf_evt_cause event_cause; 2313e2cb1decSSalil Mehta struct hclgevf_dev *hdev = data; 2314e2cb1decSSalil Mehta u32 clearval; 2315e2cb1decSSalil Mehta 2316e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, false); 2317b90fcc5bSHuazhong Tan event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2318e2cb1decSSalil Mehta 2319b90fcc5bSHuazhong Tan switch (event_cause) { 2320b90fcc5bSHuazhong Tan case HCLGEVF_VECTOR0_EVENT_RST: 2321b90fcc5bSHuazhong Tan hclgevf_reset_task_schedule(hdev); 2322b90fcc5bSHuazhong Tan break; 2323b90fcc5bSHuazhong Tan case HCLGEVF_VECTOR0_EVENT_MBX: 232407a0556aSSalil Mehta hclgevf_mbx_handler(hdev); 2325b90fcc5bSHuazhong Tan break; 2326b90fcc5bSHuazhong Tan default: 2327b90fcc5bSHuazhong Tan break; 2328b90fcc5bSHuazhong Tan } 2329e2cb1decSSalil Mehta 2330b90fcc5bSHuazhong Tan if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 2331e2cb1decSSalil Mehta hclgevf_clear_event_cause(hdev, clearval); 2332e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, true); 2333b90fcc5bSHuazhong Tan } 2334e2cb1decSSalil Mehta 2335e2cb1decSSalil Mehta return IRQ_HANDLED; 2336e2cb1decSSalil Mehta } 2337e2cb1decSSalil Mehta 2338e2cb1decSSalil Mehta static int hclgevf_configure(struct hclgevf_dev *hdev) 2339e2cb1decSSalil Mehta { 2340e2cb1decSSalil Mehta int ret; 2341e2cb1decSSalil Mehta 234292f11ea1SJian Shen /* get current port based vlan state from PF */ 234392f11ea1SJian Shen ret = hclgevf_get_port_base_vlan_filter_state(hdev); 234492f11ea1SJian Shen if (ret) 234592f11ea1SJian Shen return ret; 234692f11ea1SJian Shen 2347e2cb1decSSalil Mehta /* get queue configuration from PF */ 23486cee6fc3SJian Shen ret = hclgevf_get_queue_info(hdev); 2349e2cb1decSSalil Mehta if (ret) 2350e2cb1decSSalil Mehta return ret; 2351c0425944SPeng Li 2352c0425944SPeng Li /* get queue depth info from PF */ 2353c0425944SPeng Li ret = hclgevf_get_queue_depth(hdev); 2354c0425944SPeng Li if (ret) 2355c0425944SPeng Li return ret; 2356c0425944SPeng Li 23579c3e7130Sliuzhongzhu ret = hclgevf_get_pf_media_type(hdev); 23589c3e7130Sliuzhongzhu if (ret) 23599c3e7130Sliuzhongzhu return ret; 23609c3e7130Sliuzhongzhu 2361e2cb1decSSalil Mehta /* get tc configuration from PF */ 2362e2cb1decSSalil Mehta return hclgevf_get_tc_info(hdev); 2363e2cb1decSSalil Mehta } 2364e2cb1decSSalil Mehta 23657a01c897SSalil Mehta static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 23667a01c897SSalil Mehta { 23677a01c897SSalil Mehta struct pci_dev *pdev = ae_dev->pdev; 23681154bb26SPeng Li struct hclgevf_dev *hdev; 23697a01c897SSalil Mehta 23707a01c897SSalil Mehta hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 23717a01c897SSalil Mehta if (!hdev) 23727a01c897SSalil Mehta return -ENOMEM; 23737a01c897SSalil Mehta 23747a01c897SSalil Mehta hdev->pdev = pdev; 23757a01c897SSalil Mehta hdev->ae_dev = ae_dev; 23767a01c897SSalil Mehta ae_dev->priv = hdev; 23777a01c897SSalil Mehta 23787a01c897SSalil Mehta return 0; 23797a01c897SSalil Mehta } 23807a01c897SSalil Mehta 2381e2cb1decSSalil Mehta static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2382e2cb1decSSalil Mehta { 2383e2cb1decSSalil Mehta struct hnae3_handle *roce = &hdev->roce; 2384e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 2385e2cb1decSSalil Mehta 238607acf909SJian Shen roce->rinfo.num_vectors = hdev->num_roce_msix; 2387e2cb1decSSalil Mehta 2388e2cb1decSSalil Mehta if (hdev->num_msi_left < roce->rinfo.num_vectors || 2389e2cb1decSSalil Mehta hdev->num_msi_left == 0) 2390e2cb1decSSalil Mehta return -EINVAL; 2391e2cb1decSSalil Mehta 239207acf909SJian Shen roce->rinfo.base_vector = hdev->roce_base_vector; 2393e2cb1decSSalil Mehta 2394e2cb1decSSalil Mehta roce->rinfo.netdev = nic->kinfo.netdev; 2395e2cb1decSSalil Mehta roce->rinfo.roce_io_base = hdev->hw.io_base; 2396e2cb1decSSalil Mehta 2397e2cb1decSSalil Mehta roce->pdev = nic->pdev; 2398e2cb1decSSalil Mehta roce->ae_algo = nic->ae_algo; 2399e2cb1decSSalil Mehta roce->numa_node_mask = nic->numa_node_mask; 2400e2cb1decSSalil Mehta 2401e2cb1decSSalil Mehta return 0; 2402e2cb1decSSalil Mehta } 2403e2cb1decSSalil Mehta 2404b26a6feaSPeng Li static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 2405b26a6feaSPeng Li { 2406b26a6feaSPeng Li struct hclgevf_cfg_gro_status_cmd *req; 2407b26a6feaSPeng Li struct hclgevf_desc desc; 2408b26a6feaSPeng Li int ret; 2409b26a6feaSPeng Li 2410b26a6feaSPeng Li if (!hnae3_dev_gro_supported(hdev)) 2411b26a6feaSPeng Li return 0; 2412b26a6feaSPeng Li 2413b26a6feaSPeng Li hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2414b26a6feaSPeng Li false); 2415b26a6feaSPeng Li req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2416b26a6feaSPeng Li 2417b26a6feaSPeng Li req->gro_en = cpu_to_le16(en ? 1 : 0); 2418b26a6feaSPeng Li 2419b26a6feaSPeng Li ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2420b26a6feaSPeng Li if (ret) 2421b26a6feaSPeng Li dev_err(&hdev->pdev->dev, 2422b26a6feaSPeng Li "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2423b26a6feaSPeng Li 2424b26a6feaSPeng Li return ret; 2425b26a6feaSPeng Li } 2426b26a6feaSPeng Li 2427944de484SGuojia Liao static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) 2428e2cb1decSSalil Mehta { 2429e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2430944de484SGuojia Liao struct hclgevf_rss_tuple_cfg *tuple_sets; 24314093d1a2SGuangbin Huang u32 i; 2432e2cb1decSSalil Mehta 2433944de484SGuojia Liao rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 24344093d1a2SGuangbin Huang rss_cfg->rss_size = hdev->nic.kinfo.rss_size; 2435944de484SGuojia Liao tuple_sets = &rss_cfg->rss_tuple_sets; 2436374ad291SJian Shen if (hdev->pdev->revision >= 0x21) { 2437472d7eceSJian Shen rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 2438472d7eceSJian Shen memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2439374ad291SJian Shen HCLGEVF_RSS_KEY_SIZE); 2440374ad291SJian Shen 2441944de484SGuojia Liao tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2442944de484SGuojia Liao tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2443944de484SGuojia Liao tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2444944de484SGuojia Liao tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2445944de484SGuojia Liao tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2446944de484SGuojia Liao tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2447944de484SGuojia Liao tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2448944de484SGuojia Liao tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2449374ad291SJian Shen } 2450374ad291SJian Shen 24519b2f3477SWeihang Li /* Initialize RSS indirect table */ 2452e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 24534093d1a2SGuangbin Huang rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 2454944de484SGuojia Liao } 2455944de484SGuojia Liao 2456944de484SGuojia Liao static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2457944de484SGuojia Liao { 2458944de484SGuojia Liao struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2459944de484SGuojia Liao int ret; 2460944de484SGuojia Liao 2461944de484SGuojia Liao if (hdev->pdev->revision >= 0x21) { 2462944de484SGuojia Liao ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 2463944de484SGuojia Liao rss_cfg->rss_hash_key); 2464944de484SGuojia Liao if (ret) 2465944de484SGuojia Liao return ret; 2466944de484SGuojia Liao 2467944de484SGuojia Liao ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2468944de484SGuojia Liao if (ret) 2469944de484SGuojia Liao return ret; 2470944de484SGuojia Liao } 2471e2cb1decSSalil Mehta 2472e2cb1decSSalil Mehta ret = hclgevf_set_rss_indir_table(hdev); 2473e2cb1decSSalil Mehta if (ret) 2474e2cb1decSSalil Mehta return ret; 2475e2cb1decSSalil Mehta 24764093d1a2SGuangbin Huang return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); 2477e2cb1decSSalil Mehta } 2478e2cb1decSSalil Mehta 2479e2cb1decSSalil Mehta static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2480e2cb1decSSalil Mehta { 2481e2cb1decSSalil Mehta return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2482e2cb1decSSalil Mehta false); 2483e2cb1decSSalil Mehta } 2484e2cb1decSSalil Mehta 2485ff200099SYunsheng Lin static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2486ff200099SYunsheng Lin { 2487ff200099SYunsheng Lin #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2488ff200099SYunsheng Lin 2489ff200099SYunsheng Lin unsigned long last = hdev->serv_processed_cnt; 2490ff200099SYunsheng Lin int i = 0; 2491ff200099SYunsheng Lin 2492ff200099SYunsheng Lin while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2493ff200099SYunsheng Lin i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2494ff200099SYunsheng Lin last == hdev->serv_processed_cnt) 2495ff200099SYunsheng Lin usleep_range(1, 1); 2496ff200099SYunsheng Lin } 2497ff200099SYunsheng Lin 24988cdb992fSJian Shen static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 24998cdb992fSJian Shen { 25008cdb992fSJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 25018cdb992fSJian Shen 25028cdb992fSJian Shen if (enable) { 2503ff200099SYunsheng Lin hclgevf_task_schedule(hdev, 0); 25048cdb992fSJian Shen } else { 2505b3c3fe8eSYunsheng Lin set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2506ff200099SYunsheng Lin 2507ff200099SYunsheng Lin /* flush memory to make sure DOWN is seen by service task */ 2508ff200099SYunsheng Lin smp_mb__before_atomic(); 2509ff200099SYunsheng Lin hclgevf_flush_link_update(hdev); 25108cdb992fSJian Shen } 25118cdb992fSJian Shen } 25128cdb992fSJian Shen 2513e2cb1decSSalil Mehta static int hclgevf_ae_start(struct hnae3_handle *handle) 2514e2cb1decSSalil Mehta { 2515e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2516e2cb1decSSalil Mehta 2517e2cb1decSSalil Mehta hclgevf_reset_tqp_stats(handle); 2518e2cb1decSSalil Mehta 2519e2cb1decSSalil Mehta hclgevf_request_link_info(hdev); 2520e2cb1decSSalil Mehta 25219194d18bSliuzhongzhu hclgevf_update_link_mode(hdev); 25229194d18bSliuzhongzhu 2523e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2524e2cb1decSSalil Mehta 2525e2cb1decSSalil Mehta return 0; 2526e2cb1decSSalil Mehta } 2527e2cb1decSSalil Mehta 2528e2cb1decSSalil Mehta static void hclgevf_ae_stop(struct hnae3_handle *handle) 2529e2cb1decSSalil Mehta { 2530e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 253139cfbc9cSHuazhong Tan int i; 2532e2cb1decSSalil Mehta 25332f7e4896SFuyun Liang set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 25342f7e4896SFuyun Liang 2535146e92c1SHuazhong Tan if (hdev->reset_type != HNAE3_VF_RESET) 253639cfbc9cSHuazhong Tan for (i = 0; i < handle->kinfo.num_tqps; i++) 2537146e92c1SHuazhong Tan if (hclgevf_reset_tqp(handle, i)) 2538146e92c1SHuazhong Tan break; 253939cfbc9cSHuazhong Tan 2540e2cb1decSSalil Mehta hclgevf_reset_tqp_stats(handle); 25418cc6c1f7SFuyun Liang hclgevf_update_link_status(hdev, 0); 2542e2cb1decSSalil Mehta } 2543e2cb1decSSalil Mehta 2544a6d818e3SYunsheng Lin static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2545a6d818e3SYunsheng Lin { 2546d3410018SYufeng Mo #define HCLGEVF_STATE_ALIVE 1 2547d3410018SYufeng Mo #define HCLGEVF_STATE_NOT_ALIVE 0 2548a6d818e3SYunsheng Lin 2549d3410018SYufeng Mo struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2550d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 2551d3410018SYufeng Mo 2552d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2553d3410018SYufeng Mo send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2554d3410018SYufeng Mo HCLGEVF_STATE_NOT_ALIVE; 2555d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2556a6d818e3SYunsheng Lin } 2557a6d818e3SYunsheng Lin 2558a6d818e3SYunsheng Lin static int hclgevf_client_start(struct hnae3_handle *handle) 2559a6d818e3SYunsheng Lin { 2560e233516eSHuazhong Tan int ret; 2561e233516eSHuazhong Tan 2562e233516eSHuazhong Tan ret = hclgevf_set_alive(handle, true); 2563e233516eSHuazhong Tan if (ret) 2564e233516eSHuazhong Tan return ret; 2565a6d818e3SYunsheng Lin 2566e233516eSHuazhong Tan return 0; 2567a6d818e3SYunsheng Lin } 2568a6d818e3SYunsheng Lin 2569a6d818e3SYunsheng Lin static void hclgevf_client_stop(struct hnae3_handle *handle) 2570a6d818e3SYunsheng Lin { 2571a6d818e3SYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2572a6d818e3SYunsheng Lin int ret; 2573a6d818e3SYunsheng Lin 2574a6d818e3SYunsheng Lin ret = hclgevf_set_alive(handle, false); 2575a6d818e3SYunsheng Lin if (ret) 2576a6d818e3SYunsheng Lin dev_warn(&hdev->pdev->dev, 2577a6d818e3SYunsheng Lin "%s failed %d\n", __func__, ret); 2578a6d818e3SYunsheng Lin } 2579a6d818e3SYunsheng Lin 2580e2cb1decSSalil Mehta static void hclgevf_state_init(struct hclgevf_dev *hdev) 2581e2cb1decSSalil Mehta { 2582e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2583e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2584d5432455SGuojia Liao clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2585e2cb1decSSalil Mehta 2586b3c3fe8eSYunsheng Lin INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 258735a1e503SSalil Mehta 2588e2cb1decSSalil Mehta mutex_init(&hdev->mbx_resp.mbx_mutex); 2589f28368bbSHuazhong Tan sema_init(&hdev->reset_sem, 1); 2590e2cb1decSSalil Mehta 2591ee4bcd3bSJian Shen spin_lock_init(&hdev->mac_table.mac_list_lock); 2592ee4bcd3bSJian Shen INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); 2593ee4bcd3bSJian Shen INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); 2594ee4bcd3bSJian Shen 2595e2cb1decSSalil Mehta /* bring the device down */ 2596e2cb1decSSalil Mehta set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2597e2cb1decSSalil Mehta } 2598e2cb1decSSalil Mehta 2599e2cb1decSSalil Mehta static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2600e2cb1decSSalil Mehta { 2601e2cb1decSSalil Mehta set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2602acfc3d55SHuazhong Tan set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2603e2cb1decSSalil Mehta 2604b3c3fe8eSYunsheng Lin if (hdev->service_task.work.func) 2605b3c3fe8eSYunsheng Lin cancel_delayed_work_sync(&hdev->service_task); 2606e2cb1decSSalil Mehta 2607e2cb1decSSalil Mehta mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2608e2cb1decSSalil Mehta } 2609e2cb1decSSalil Mehta 2610e2cb1decSSalil Mehta static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2611e2cb1decSSalil Mehta { 2612e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 2613e2cb1decSSalil Mehta int vectors; 2614e2cb1decSSalil Mehta int i; 2615e2cb1decSSalil Mehta 2616580a05f9SYonglong Liu if (hnae3_dev_roce_supported(hdev)) 261707acf909SJian Shen vectors = pci_alloc_irq_vectors(pdev, 261807acf909SJian Shen hdev->roce_base_msix_offset + 1, 261907acf909SJian Shen hdev->num_msi, 262007acf909SJian Shen PCI_IRQ_MSIX); 262107acf909SJian Shen else 2622580a05f9SYonglong Liu vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2623580a05f9SYonglong Liu hdev->num_msi, 2624e2cb1decSSalil Mehta PCI_IRQ_MSI | PCI_IRQ_MSIX); 262507acf909SJian Shen 2626e2cb1decSSalil Mehta if (vectors < 0) { 2627e2cb1decSSalil Mehta dev_err(&pdev->dev, 2628e2cb1decSSalil Mehta "failed(%d) to allocate MSI/MSI-X vectors\n", 2629e2cb1decSSalil Mehta vectors); 2630e2cb1decSSalil Mehta return vectors; 2631e2cb1decSSalil Mehta } 2632e2cb1decSSalil Mehta if (vectors < hdev->num_msi) 2633e2cb1decSSalil Mehta dev_warn(&hdev->pdev->dev, 2634adcf738bSGuojia Liao "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2635e2cb1decSSalil Mehta hdev->num_msi, vectors); 2636e2cb1decSSalil Mehta 2637e2cb1decSSalil Mehta hdev->num_msi = vectors; 2638e2cb1decSSalil Mehta hdev->num_msi_left = vectors; 2639580a05f9SYonglong Liu 2640e2cb1decSSalil Mehta hdev->base_msi_vector = pdev->irq; 264107acf909SJian Shen hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2642e2cb1decSSalil Mehta 2643e2cb1decSSalil Mehta hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2644e2cb1decSSalil Mehta sizeof(u16), GFP_KERNEL); 2645e2cb1decSSalil Mehta if (!hdev->vector_status) { 2646e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 2647e2cb1decSSalil Mehta return -ENOMEM; 2648e2cb1decSSalil Mehta } 2649e2cb1decSSalil Mehta 2650e2cb1decSSalil Mehta for (i = 0; i < hdev->num_msi; i++) 2651e2cb1decSSalil Mehta hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2652e2cb1decSSalil Mehta 2653e2cb1decSSalil Mehta hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2654e2cb1decSSalil Mehta sizeof(int), GFP_KERNEL); 2655e2cb1decSSalil Mehta if (!hdev->vector_irq) { 2656862d969aSHuazhong Tan devm_kfree(&pdev->dev, hdev->vector_status); 2657e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 2658e2cb1decSSalil Mehta return -ENOMEM; 2659e2cb1decSSalil Mehta } 2660e2cb1decSSalil Mehta 2661e2cb1decSSalil Mehta return 0; 2662e2cb1decSSalil Mehta } 2663e2cb1decSSalil Mehta 2664e2cb1decSSalil Mehta static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2665e2cb1decSSalil Mehta { 2666e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 2667e2cb1decSSalil Mehta 2668862d969aSHuazhong Tan devm_kfree(&pdev->dev, hdev->vector_status); 2669862d969aSHuazhong Tan devm_kfree(&pdev->dev, hdev->vector_irq); 2670e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 2671e2cb1decSSalil Mehta } 2672e2cb1decSSalil Mehta 2673e2cb1decSSalil Mehta static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2674e2cb1decSSalil Mehta { 2675cdd332acSGuojia Liao int ret; 2676e2cb1decSSalil Mehta 2677e2cb1decSSalil Mehta hclgevf_get_misc_vector(hdev); 2678e2cb1decSSalil Mehta 2679f97c4d82SYonglong Liu snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2680f97c4d82SYonglong Liu HCLGEVF_NAME, pci_name(hdev->pdev)); 2681e2cb1decSSalil Mehta ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2682f97c4d82SYonglong Liu 0, hdev->misc_vector.name, hdev); 2683e2cb1decSSalil Mehta if (ret) { 2684e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2685e2cb1decSSalil Mehta hdev->misc_vector.vector_irq); 2686e2cb1decSSalil Mehta return ret; 2687e2cb1decSSalil Mehta } 2688e2cb1decSSalil Mehta 26891819e409SXi Wang hclgevf_clear_event_cause(hdev, 0); 26901819e409SXi Wang 2691e2cb1decSSalil Mehta /* enable misc. vector(vector 0) */ 2692e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, true); 2693e2cb1decSSalil Mehta 2694e2cb1decSSalil Mehta return ret; 2695e2cb1decSSalil Mehta } 2696e2cb1decSSalil Mehta 2697e2cb1decSSalil Mehta static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2698e2cb1decSSalil Mehta { 2699e2cb1decSSalil Mehta /* disable misc vector(vector 0) */ 2700e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, false); 27011819e409SXi Wang synchronize_irq(hdev->misc_vector.vector_irq); 2702e2cb1decSSalil Mehta free_irq(hdev->misc_vector.vector_irq, hdev); 2703e2cb1decSSalil Mehta hclgevf_free_vector(hdev, 0); 2704e2cb1decSSalil Mehta } 2705e2cb1decSSalil Mehta 2706bb87be87SYonglong Liu static void hclgevf_info_show(struct hclgevf_dev *hdev) 2707bb87be87SYonglong Liu { 2708bb87be87SYonglong Liu struct device *dev = &hdev->pdev->dev; 2709bb87be87SYonglong Liu 2710bb87be87SYonglong Liu dev_info(dev, "VF info begin:\n"); 2711bb87be87SYonglong Liu 2712adcf738bSGuojia Liao dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2713adcf738bSGuojia Liao dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2714adcf738bSGuojia Liao dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2715adcf738bSGuojia Liao dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2716adcf738bSGuojia Liao dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2717adcf738bSGuojia Liao dev_info(dev, "PF media type of this VF: %u\n", 2718bb87be87SYonglong Liu hdev->hw.mac.media_type); 2719bb87be87SYonglong Liu 2720bb87be87SYonglong Liu dev_info(dev, "VF info end.\n"); 2721bb87be87SYonglong Liu } 2722bb87be87SYonglong Liu 27231db58f86SHuazhong Tan static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 27241db58f86SHuazhong Tan struct hnae3_client *client) 27251db58f86SHuazhong Tan { 27261db58f86SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 27271db58f86SHuazhong Tan int ret; 27281db58f86SHuazhong Tan 27291db58f86SHuazhong Tan ret = client->ops->init_instance(&hdev->nic); 27301db58f86SHuazhong Tan if (ret) 27311db58f86SHuazhong Tan return ret; 27321db58f86SHuazhong Tan 27331db58f86SHuazhong Tan set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 27341db58f86SHuazhong Tan hnae3_set_client_init_flag(client, ae_dev, 1); 27351db58f86SHuazhong Tan 27361db58f86SHuazhong Tan if (netif_msg_drv(&hdev->nic)) 27371db58f86SHuazhong Tan hclgevf_info_show(hdev); 27381db58f86SHuazhong Tan 27391db58f86SHuazhong Tan return 0; 27401db58f86SHuazhong Tan } 27411db58f86SHuazhong Tan 27421db58f86SHuazhong Tan static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 27431db58f86SHuazhong Tan struct hnae3_client *client) 27441db58f86SHuazhong Tan { 27451db58f86SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 27461db58f86SHuazhong Tan int ret; 27471db58f86SHuazhong Tan 27481db58f86SHuazhong Tan if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 27491db58f86SHuazhong Tan !hdev->nic_client) 27501db58f86SHuazhong Tan return 0; 27511db58f86SHuazhong Tan 27521db58f86SHuazhong Tan ret = hclgevf_init_roce_base_info(hdev); 27531db58f86SHuazhong Tan if (ret) 27541db58f86SHuazhong Tan return ret; 27551db58f86SHuazhong Tan 27561db58f86SHuazhong Tan ret = client->ops->init_instance(&hdev->roce); 27571db58f86SHuazhong Tan if (ret) 27581db58f86SHuazhong Tan return ret; 27591db58f86SHuazhong Tan 27601db58f86SHuazhong Tan hnae3_set_client_init_flag(client, ae_dev, 1); 27611db58f86SHuazhong Tan 27621db58f86SHuazhong Tan return 0; 27631db58f86SHuazhong Tan } 27641db58f86SHuazhong Tan 2765e718a93fSPeng Li static int hclgevf_init_client_instance(struct hnae3_client *client, 2766e718a93fSPeng Li struct hnae3_ae_dev *ae_dev) 2767e2cb1decSSalil Mehta { 2768e718a93fSPeng Li struct hclgevf_dev *hdev = ae_dev->priv; 2769e2cb1decSSalil Mehta int ret; 2770e2cb1decSSalil Mehta 2771e2cb1decSSalil Mehta switch (client->type) { 2772e2cb1decSSalil Mehta case HNAE3_CLIENT_KNIC: 2773e2cb1decSSalil Mehta hdev->nic_client = client; 2774e2cb1decSSalil Mehta hdev->nic.client = client; 2775e2cb1decSSalil Mehta 27761db58f86SHuazhong Tan ret = hclgevf_init_nic_client_instance(ae_dev, client); 2777e2cb1decSSalil Mehta if (ret) 277849dd8054SJian Shen goto clear_nic; 2779e2cb1decSSalil Mehta 27801db58f86SHuazhong Tan ret = hclgevf_init_roce_client_instance(ae_dev, 27811db58f86SHuazhong Tan hdev->roce_client); 2782e2cb1decSSalil Mehta if (ret) 278349dd8054SJian Shen goto clear_roce; 2784d9f28fc2SJian Shen 2785e2cb1decSSalil Mehta break; 2786e2cb1decSSalil Mehta case HNAE3_CLIENT_ROCE: 2787544a7bcdSLijun Ou if (hnae3_dev_roce_supported(hdev)) { 2788e2cb1decSSalil Mehta hdev->roce_client = client; 2789e2cb1decSSalil Mehta hdev->roce.client = client; 2790544a7bcdSLijun Ou } 2791e2cb1decSSalil Mehta 27921db58f86SHuazhong Tan ret = hclgevf_init_roce_client_instance(ae_dev, client); 2793e2cb1decSSalil Mehta if (ret) 279449dd8054SJian Shen goto clear_roce; 2795e2cb1decSSalil Mehta 2796fa7a4bd5SJian Shen break; 2797fa7a4bd5SJian Shen default: 2798fa7a4bd5SJian Shen return -EINVAL; 2799e2cb1decSSalil Mehta } 2800e2cb1decSSalil Mehta 2801e2cb1decSSalil Mehta return 0; 280249dd8054SJian Shen 280349dd8054SJian Shen clear_nic: 280449dd8054SJian Shen hdev->nic_client = NULL; 280549dd8054SJian Shen hdev->nic.client = NULL; 280649dd8054SJian Shen return ret; 280749dd8054SJian Shen clear_roce: 280849dd8054SJian Shen hdev->roce_client = NULL; 280949dd8054SJian Shen hdev->roce.client = NULL; 281049dd8054SJian Shen return ret; 2811e2cb1decSSalil Mehta } 2812e2cb1decSSalil Mehta 2813e718a93fSPeng Li static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2814e718a93fSPeng Li struct hnae3_ae_dev *ae_dev) 2815e2cb1decSSalil Mehta { 2816e718a93fSPeng Li struct hclgevf_dev *hdev = ae_dev->priv; 2817e718a93fSPeng Li 2818e2cb1decSSalil Mehta /* un-init roce, if it exists */ 281949dd8054SJian Shen if (hdev->roce_client) { 2820e2cb1decSSalil Mehta hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 282149dd8054SJian Shen hdev->roce_client = NULL; 282249dd8054SJian Shen hdev->roce.client = NULL; 282349dd8054SJian Shen } 2824e2cb1decSSalil Mehta 2825e2cb1decSSalil Mehta /* un-init nic/unic, if this was not called by roce client */ 282649dd8054SJian Shen if (client->ops->uninit_instance && hdev->nic_client && 282749dd8054SJian Shen client->type != HNAE3_CLIENT_ROCE) { 282825d1817cSHuazhong Tan clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 282925d1817cSHuazhong Tan 2830e2cb1decSSalil Mehta client->ops->uninit_instance(&hdev->nic, 0); 283149dd8054SJian Shen hdev->nic_client = NULL; 283249dd8054SJian Shen hdev->nic.client = NULL; 283349dd8054SJian Shen } 2834e2cb1decSSalil Mehta } 2835e2cb1decSSalil Mehta 2836e2cb1decSSalil Mehta static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2837e2cb1decSSalil Mehta { 2838e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 2839e2cb1decSSalil Mehta struct hclgevf_hw *hw; 2840e2cb1decSSalil Mehta int ret; 2841e2cb1decSSalil Mehta 2842e2cb1decSSalil Mehta ret = pci_enable_device(pdev); 2843e2cb1decSSalil Mehta if (ret) { 2844e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed to enable PCI device\n"); 28453e249d3bSFuyun Liang return ret; 2846e2cb1decSSalil Mehta } 2847e2cb1decSSalil Mehta 2848e2cb1decSSalil Mehta ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2849e2cb1decSSalil Mehta if (ret) { 2850e2cb1decSSalil Mehta dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2851e2cb1decSSalil Mehta goto err_disable_device; 2852e2cb1decSSalil Mehta } 2853e2cb1decSSalil Mehta 2854e2cb1decSSalil Mehta ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2855e2cb1decSSalil Mehta if (ret) { 2856e2cb1decSSalil Mehta dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2857e2cb1decSSalil Mehta goto err_disable_device; 2858e2cb1decSSalil Mehta } 2859e2cb1decSSalil Mehta 2860e2cb1decSSalil Mehta pci_set_master(pdev); 2861e2cb1decSSalil Mehta hw = &hdev->hw; 2862e2cb1decSSalil Mehta hw->hdev = hdev; 28632e1ea493SPeng Li hw->io_base = pci_iomap(pdev, 2, 0); 2864e2cb1decSSalil Mehta if (!hw->io_base) { 2865e2cb1decSSalil Mehta dev_err(&pdev->dev, "can't map configuration register space\n"); 2866e2cb1decSSalil Mehta ret = -ENOMEM; 2867e2cb1decSSalil Mehta goto err_clr_master; 2868e2cb1decSSalil Mehta } 2869e2cb1decSSalil Mehta 2870e2cb1decSSalil Mehta return 0; 2871e2cb1decSSalil Mehta 2872e2cb1decSSalil Mehta err_clr_master: 2873e2cb1decSSalil Mehta pci_clear_master(pdev); 2874e2cb1decSSalil Mehta pci_release_regions(pdev); 2875e2cb1decSSalil Mehta err_disable_device: 2876e2cb1decSSalil Mehta pci_disable_device(pdev); 28773e249d3bSFuyun Liang 2878e2cb1decSSalil Mehta return ret; 2879e2cb1decSSalil Mehta } 2880e2cb1decSSalil Mehta 2881e2cb1decSSalil Mehta static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2882e2cb1decSSalil Mehta { 2883e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 2884e2cb1decSSalil Mehta 2885e2cb1decSSalil Mehta pci_iounmap(pdev, hdev->hw.io_base); 2886e2cb1decSSalil Mehta pci_clear_master(pdev); 2887e2cb1decSSalil Mehta pci_release_regions(pdev); 2888e2cb1decSSalil Mehta pci_disable_device(pdev); 2889e2cb1decSSalil Mehta } 2890e2cb1decSSalil Mehta 289107acf909SJian Shen static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 289207acf909SJian Shen { 289307acf909SJian Shen struct hclgevf_query_res_cmd *req; 289407acf909SJian Shen struct hclgevf_desc desc; 289507acf909SJian Shen int ret; 289607acf909SJian Shen 289707acf909SJian Shen hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 289807acf909SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 289907acf909SJian Shen if (ret) { 290007acf909SJian Shen dev_err(&hdev->pdev->dev, 290107acf909SJian Shen "query vf resource failed, ret = %d.\n", ret); 290207acf909SJian Shen return ret; 290307acf909SJian Shen } 290407acf909SJian Shen 290507acf909SJian Shen req = (struct hclgevf_query_res_cmd *)desc.data; 290607acf909SJian Shen 2907580a05f9SYonglong Liu if (hnae3_dev_roce_supported(hdev)) { 290807acf909SJian Shen hdev->roce_base_msix_offset = 290960df7e91SHuazhong Tan hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 291007acf909SJian Shen HCLGEVF_MSIX_OFT_ROCEE_M, 291107acf909SJian Shen HCLGEVF_MSIX_OFT_ROCEE_S); 291207acf909SJian Shen hdev->num_roce_msix = 291360df7e91SHuazhong Tan hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 291407acf909SJian Shen HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 291507acf909SJian Shen 2916580a05f9SYonglong Liu /* nic's msix numbers is always equals to the roce's. */ 2917580a05f9SYonglong Liu hdev->num_nic_msix = hdev->num_roce_msix; 2918580a05f9SYonglong Liu 291907acf909SJian Shen /* VF should have NIC vectors and Roce vectors, NIC vectors 292007acf909SJian Shen * are queued before Roce vectors. The offset is fixed to 64. 292107acf909SJian Shen */ 292207acf909SJian Shen hdev->num_msi = hdev->num_roce_msix + 292307acf909SJian Shen hdev->roce_base_msix_offset; 292407acf909SJian Shen } else { 292507acf909SJian Shen hdev->num_msi = 292660df7e91SHuazhong Tan hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 292707acf909SJian Shen HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2928580a05f9SYonglong Liu 2929580a05f9SYonglong Liu hdev->num_nic_msix = hdev->num_msi; 2930580a05f9SYonglong Liu } 2931580a05f9SYonglong Liu 2932580a05f9SYonglong Liu if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 2933580a05f9SYonglong Liu dev_err(&hdev->pdev->dev, 2934580a05f9SYonglong Liu "Just %u msi resources, not enough for vf(min:2).\n", 2935580a05f9SYonglong Liu hdev->num_nic_msix); 2936580a05f9SYonglong Liu return -EINVAL; 293707acf909SJian Shen } 293807acf909SJian Shen 293907acf909SJian Shen return 0; 294007acf909SJian Shen } 294107acf909SJian Shen 2942862d969aSHuazhong Tan static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2943862d969aSHuazhong Tan { 2944862d969aSHuazhong Tan struct pci_dev *pdev = hdev->pdev; 2945862d969aSHuazhong Tan int ret = 0; 2946862d969aSHuazhong Tan 2947862d969aSHuazhong Tan if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2948862d969aSHuazhong Tan test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2949862d969aSHuazhong Tan hclgevf_misc_irq_uninit(hdev); 2950862d969aSHuazhong Tan hclgevf_uninit_msi(hdev); 2951862d969aSHuazhong Tan clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2952862d969aSHuazhong Tan } 2953862d969aSHuazhong Tan 2954862d969aSHuazhong Tan if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2955862d969aSHuazhong Tan pci_set_master(pdev); 2956862d969aSHuazhong Tan ret = hclgevf_init_msi(hdev); 2957862d969aSHuazhong Tan if (ret) { 2958862d969aSHuazhong Tan dev_err(&pdev->dev, 2959862d969aSHuazhong Tan "failed(%d) to init MSI/MSI-X\n", ret); 2960862d969aSHuazhong Tan return ret; 2961862d969aSHuazhong Tan } 2962862d969aSHuazhong Tan 2963862d969aSHuazhong Tan ret = hclgevf_misc_irq_init(hdev); 2964862d969aSHuazhong Tan if (ret) { 2965862d969aSHuazhong Tan hclgevf_uninit_msi(hdev); 2966862d969aSHuazhong Tan dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2967862d969aSHuazhong Tan ret); 2968862d969aSHuazhong Tan return ret; 2969862d969aSHuazhong Tan } 2970862d969aSHuazhong Tan 2971862d969aSHuazhong Tan set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2972862d969aSHuazhong Tan } 2973862d969aSHuazhong Tan 2974862d969aSHuazhong Tan return ret; 2975862d969aSHuazhong Tan } 2976862d969aSHuazhong Tan 29779c6f7085SHuazhong Tan static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2978e2cb1decSSalil Mehta { 29797a01c897SSalil Mehta struct pci_dev *pdev = hdev->pdev; 2980e2cb1decSSalil Mehta int ret; 2981e2cb1decSSalil Mehta 2982862d969aSHuazhong Tan ret = hclgevf_pci_reset(hdev); 2983862d969aSHuazhong Tan if (ret) { 2984862d969aSHuazhong Tan dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2985862d969aSHuazhong Tan return ret; 2986862d969aSHuazhong Tan } 2987862d969aSHuazhong Tan 29889c6f7085SHuazhong Tan ret = hclgevf_cmd_init(hdev); 29899c6f7085SHuazhong Tan if (ret) { 29909c6f7085SHuazhong Tan dev_err(&pdev->dev, "cmd failed %d\n", ret); 29919c6f7085SHuazhong Tan return ret; 29927a01c897SSalil Mehta } 2993e2cb1decSSalil Mehta 29949c6f7085SHuazhong Tan ret = hclgevf_rss_init_hw(hdev); 29959c6f7085SHuazhong Tan if (ret) { 29969c6f7085SHuazhong Tan dev_err(&hdev->pdev->dev, 29979c6f7085SHuazhong Tan "failed(%d) to initialize RSS\n", ret); 29989c6f7085SHuazhong Tan return ret; 29999c6f7085SHuazhong Tan } 30009c6f7085SHuazhong Tan 3001b26a6feaSPeng Li ret = hclgevf_config_gro(hdev, true); 3002b26a6feaSPeng Li if (ret) 3003b26a6feaSPeng Li return ret; 3004b26a6feaSPeng Li 30059c6f7085SHuazhong Tan ret = hclgevf_init_vlan_config(hdev); 30069c6f7085SHuazhong Tan if (ret) { 30079c6f7085SHuazhong Tan dev_err(&hdev->pdev->dev, 30089c6f7085SHuazhong Tan "failed(%d) to initialize VLAN config\n", ret); 30099c6f7085SHuazhong Tan return ret; 30109c6f7085SHuazhong Tan } 30119c6f7085SHuazhong Tan 3012c631c696SJian Shen set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 3013c631c696SJian Shen 30149c6f7085SHuazhong Tan dev_info(&hdev->pdev->dev, "Reset done\n"); 30159c6f7085SHuazhong Tan 30169c6f7085SHuazhong Tan return 0; 30179c6f7085SHuazhong Tan } 30189c6f7085SHuazhong Tan 30199c6f7085SHuazhong Tan static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 30209c6f7085SHuazhong Tan { 30219c6f7085SHuazhong Tan struct pci_dev *pdev = hdev->pdev; 30229c6f7085SHuazhong Tan int ret; 30239c6f7085SHuazhong Tan 3024e2cb1decSSalil Mehta ret = hclgevf_pci_init(hdev); 302560df7e91SHuazhong Tan if (ret) 3026e2cb1decSSalil Mehta return ret; 3027e2cb1decSSalil Mehta 30288b0195a3SHuazhong Tan ret = hclgevf_cmd_queue_init(hdev); 302960df7e91SHuazhong Tan if (ret) 30308b0195a3SHuazhong Tan goto err_cmd_queue_init; 30318b0195a3SHuazhong Tan 3032eddf0462SYunsheng Lin ret = hclgevf_cmd_init(hdev); 3033eddf0462SYunsheng Lin if (ret) 3034eddf0462SYunsheng Lin goto err_cmd_init; 3035eddf0462SYunsheng Lin 303607acf909SJian Shen /* Get vf resource */ 303707acf909SJian Shen ret = hclgevf_query_vf_resource(hdev); 303860df7e91SHuazhong Tan if (ret) 30398b0195a3SHuazhong Tan goto err_cmd_init; 304007acf909SJian Shen 304107acf909SJian Shen ret = hclgevf_init_msi(hdev); 304207acf909SJian Shen if (ret) { 304307acf909SJian Shen dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 30448b0195a3SHuazhong Tan goto err_cmd_init; 304507acf909SJian Shen } 304607acf909SJian Shen 304707acf909SJian Shen hclgevf_state_init(hdev); 3048dea846e8SHuazhong Tan hdev->reset_level = HNAE3_VF_FUNC_RESET; 3049afb6afdbSHuazhong Tan hdev->reset_type = HNAE3_NONE_RESET; 305007acf909SJian Shen 3051e2cb1decSSalil Mehta ret = hclgevf_misc_irq_init(hdev); 305260df7e91SHuazhong Tan if (ret) 3053e2cb1decSSalil Mehta goto err_misc_irq_init; 3054e2cb1decSSalil Mehta 3055862d969aSHuazhong Tan set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3056862d969aSHuazhong Tan 3057e2cb1decSSalil Mehta ret = hclgevf_configure(hdev); 3058e2cb1decSSalil Mehta if (ret) { 3059e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 3060e2cb1decSSalil Mehta goto err_config; 3061e2cb1decSSalil Mehta } 3062e2cb1decSSalil Mehta 3063e2cb1decSSalil Mehta ret = hclgevf_alloc_tqps(hdev); 3064e2cb1decSSalil Mehta if (ret) { 3065e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 3066e2cb1decSSalil Mehta goto err_config; 3067e2cb1decSSalil Mehta } 3068e2cb1decSSalil Mehta 3069e2cb1decSSalil Mehta ret = hclgevf_set_handle_info(hdev); 307060df7e91SHuazhong Tan if (ret) 3071e2cb1decSSalil Mehta goto err_config; 3072e2cb1decSSalil Mehta 3073b26a6feaSPeng Li ret = hclgevf_config_gro(hdev, true); 3074b26a6feaSPeng Li if (ret) 3075b26a6feaSPeng Li goto err_config; 3076b26a6feaSPeng Li 3077e2cb1decSSalil Mehta /* Initialize RSS for this VF */ 3078944de484SGuojia Liao hclgevf_rss_init_cfg(hdev); 3079e2cb1decSSalil Mehta ret = hclgevf_rss_init_hw(hdev); 3080e2cb1decSSalil Mehta if (ret) { 3081e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 3082e2cb1decSSalil Mehta "failed(%d) to initialize RSS\n", ret); 3083e2cb1decSSalil Mehta goto err_config; 3084e2cb1decSSalil Mehta } 3085e2cb1decSSalil Mehta 3086e2cb1decSSalil Mehta ret = hclgevf_init_vlan_config(hdev); 3087e2cb1decSSalil Mehta if (ret) { 3088e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 3089e2cb1decSSalil Mehta "failed(%d) to initialize VLAN config\n", ret); 3090e2cb1decSSalil Mehta goto err_config; 3091e2cb1decSSalil Mehta } 3092e2cb1decSSalil Mehta 30930742ed7cSHuazhong Tan hdev->last_reset_time = jiffies; 309408d80a4cSHuazhong Tan dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 309508d80a4cSHuazhong Tan HCLGEVF_DRIVER_NAME); 3096e2cb1decSSalil Mehta 3097ff200099SYunsheng Lin hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 3098ff200099SYunsheng Lin 3099e2cb1decSSalil Mehta return 0; 3100e2cb1decSSalil Mehta 3101e2cb1decSSalil Mehta err_config: 3102e2cb1decSSalil Mehta hclgevf_misc_irq_uninit(hdev); 3103e2cb1decSSalil Mehta err_misc_irq_init: 3104e2cb1decSSalil Mehta hclgevf_state_uninit(hdev); 3105e2cb1decSSalil Mehta hclgevf_uninit_msi(hdev); 310607acf909SJian Shen err_cmd_init: 31078b0195a3SHuazhong Tan hclgevf_cmd_uninit(hdev); 31088b0195a3SHuazhong Tan err_cmd_queue_init: 3109e2cb1decSSalil Mehta hclgevf_pci_uninit(hdev); 3110862d969aSHuazhong Tan clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3111e2cb1decSSalil Mehta return ret; 3112e2cb1decSSalil Mehta } 3113e2cb1decSSalil Mehta 31147a01c897SSalil Mehta static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 3115e2cb1decSSalil Mehta { 3116d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 3117d3410018SYufeng Mo 3118e2cb1decSSalil Mehta hclgevf_state_uninit(hdev); 3119862d969aSHuazhong Tan 3120d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 3121d3410018SYufeng Mo hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 312223b4201dSJian Shen 3123862d969aSHuazhong Tan if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3124eddf0462SYunsheng Lin hclgevf_misc_irq_uninit(hdev); 3125e2cb1decSSalil Mehta hclgevf_uninit_msi(hdev); 31267a01c897SSalil Mehta } 31277a01c897SSalil Mehta 3128e3338205SHuazhong Tan hclgevf_pci_uninit(hdev); 3129862d969aSHuazhong Tan hclgevf_cmd_uninit(hdev); 3130ee4bcd3bSJian Shen hclgevf_uninit_mac_list(hdev); 3131862d969aSHuazhong Tan } 3132862d969aSHuazhong Tan 31337a01c897SSalil Mehta static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 31347a01c897SSalil Mehta { 31357a01c897SSalil Mehta struct pci_dev *pdev = ae_dev->pdev; 31367a01c897SSalil Mehta int ret; 31377a01c897SSalil Mehta 31387a01c897SSalil Mehta ret = hclgevf_alloc_hdev(ae_dev); 31397a01c897SSalil Mehta if (ret) { 31407a01c897SSalil Mehta dev_err(&pdev->dev, "hclge device allocation failed\n"); 31417a01c897SSalil Mehta return ret; 31427a01c897SSalil Mehta } 31437a01c897SSalil Mehta 31447a01c897SSalil Mehta ret = hclgevf_init_hdev(ae_dev->priv); 3145a6d818e3SYunsheng Lin if (ret) { 31467a01c897SSalil Mehta dev_err(&pdev->dev, "hclge device initialization failed\n"); 31477a01c897SSalil Mehta return ret; 31487a01c897SSalil Mehta } 31497a01c897SSalil Mehta 3150a6d818e3SYunsheng Lin return 0; 3151a6d818e3SYunsheng Lin } 3152a6d818e3SYunsheng Lin 31537a01c897SSalil Mehta static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 31547a01c897SSalil Mehta { 31557a01c897SSalil Mehta struct hclgevf_dev *hdev = ae_dev->priv; 31567a01c897SSalil Mehta 31577a01c897SSalil Mehta hclgevf_uninit_hdev(hdev); 3158e2cb1decSSalil Mehta ae_dev->priv = NULL; 3159e2cb1decSSalil Mehta } 3160e2cb1decSSalil Mehta 3161849e4607SPeng Li static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3162849e4607SPeng Li { 3163849e4607SPeng Li struct hnae3_handle *nic = &hdev->nic; 3164849e4607SPeng Li struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3165849e4607SPeng Li 31668be73621SHuazhong Tan return min_t(u32, hdev->rss_size_max, 31678be73621SHuazhong Tan hdev->num_tqps / kinfo->num_tc); 3168849e4607SPeng Li } 3169849e4607SPeng Li 3170849e4607SPeng Li /** 3171849e4607SPeng Li * hclgevf_get_channels - Get the current channels enabled and max supported. 3172849e4607SPeng Li * @handle: hardware information for network interface 3173849e4607SPeng Li * @ch: ethtool channels structure 3174849e4607SPeng Li * 3175849e4607SPeng Li * We don't support separate tx and rx queues as channels. The other count 3176849e4607SPeng Li * represents how many queues are being used for control. max_combined counts 3177849e4607SPeng Li * how many queue pairs we can support. They may not be mapped 1 to 1 with 3178849e4607SPeng Li * q_vectors since we support a lot more queue pairs than q_vectors. 3179849e4607SPeng Li **/ 3180849e4607SPeng Li static void hclgevf_get_channels(struct hnae3_handle *handle, 3181849e4607SPeng Li struct ethtool_channels *ch) 3182849e4607SPeng Li { 3183849e4607SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3184849e4607SPeng Li 3185849e4607SPeng Li ch->max_combined = hclgevf_get_max_channels(hdev); 3186849e4607SPeng Li ch->other_count = 0; 3187849e4607SPeng Li ch->max_other = 0; 31888be73621SHuazhong Tan ch->combined_count = handle->kinfo.rss_size; 3189849e4607SPeng Li } 3190849e4607SPeng Li 3191cc719218SPeng Li static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 31920d43bf45SHuazhong Tan u16 *alloc_tqps, u16 *max_rss_size) 3193cc719218SPeng Li { 3194cc719218SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3195cc719218SPeng Li 31960d43bf45SHuazhong Tan *alloc_tqps = hdev->num_tqps; 3197cc719218SPeng Li *max_rss_size = hdev->rss_size_max; 3198cc719218SPeng Li } 3199cc719218SPeng Li 32004093d1a2SGuangbin Huang static void hclgevf_update_rss_size(struct hnae3_handle *handle, 32014093d1a2SGuangbin Huang u32 new_tqps_num) 32024093d1a2SGuangbin Huang { 32034093d1a2SGuangbin Huang struct hnae3_knic_private_info *kinfo = &handle->kinfo; 32044093d1a2SGuangbin Huang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 32054093d1a2SGuangbin Huang u16 max_rss_size; 32064093d1a2SGuangbin Huang 32074093d1a2SGuangbin Huang kinfo->req_rss_size = new_tqps_num; 32084093d1a2SGuangbin Huang 32094093d1a2SGuangbin Huang max_rss_size = min_t(u16, hdev->rss_size_max, 32104093d1a2SGuangbin Huang hdev->num_tqps / kinfo->num_tc); 32114093d1a2SGuangbin Huang 32124093d1a2SGuangbin Huang /* Use the user's configuration when it is not larger than 32134093d1a2SGuangbin Huang * max_rss_size, otherwise, use the maximum specification value. 32144093d1a2SGuangbin Huang */ 32154093d1a2SGuangbin Huang if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 32164093d1a2SGuangbin Huang kinfo->req_rss_size <= max_rss_size) 32174093d1a2SGuangbin Huang kinfo->rss_size = kinfo->req_rss_size; 32184093d1a2SGuangbin Huang else if (kinfo->rss_size > max_rss_size || 32194093d1a2SGuangbin Huang (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 32204093d1a2SGuangbin Huang kinfo->rss_size = max_rss_size; 32214093d1a2SGuangbin Huang 32224093d1a2SGuangbin Huang kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; 32234093d1a2SGuangbin Huang } 32244093d1a2SGuangbin Huang 32254093d1a2SGuangbin Huang static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 32264093d1a2SGuangbin Huang bool rxfh_configured) 32274093d1a2SGuangbin Huang { 32284093d1a2SGuangbin Huang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 32294093d1a2SGuangbin Huang struct hnae3_knic_private_info *kinfo = &handle->kinfo; 32304093d1a2SGuangbin Huang u16 cur_rss_size = kinfo->rss_size; 32314093d1a2SGuangbin Huang u16 cur_tqps = kinfo->num_tqps; 32324093d1a2SGuangbin Huang u32 *rss_indir; 32334093d1a2SGuangbin Huang unsigned int i; 32344093d1a2SGuangbin Huang int ret; 32354093d1a2SGuangbin Huang 32364093d1a2SGuangbin Huang hclgevf_update_rss_size(handle, new_tqps_num); 32374093d1a2SGuangbin Huang 32384093d1a2SGuangbin Huang ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); 32394093d1a2SGuangbin Huang if (ret) 32404093d1a2SGuangbin Huang return ret; 32414093d1a2SGuangbin Huang 32424093d1a2SGuangbin Huang /* RSS indirection table has been configuared by user */ 32434093d1a2SGuangbin Huang if (rxfh_configured) 32444093d1a2SGuangbin Huang goto out; 32454093d1a2SGuangbin Huang 32464093d1a2SGuangbin Huang /* Reinitializes the rss indirect table according to the new RSS size */ 32474093d1a2SGuangbin Huang rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 32484093d1a2SGuangbin Huang if (!rss_indir) 32494093d1a2SGuangbin Huang return -ENOMEM; 32504093d1a2SGuangbin Huang 32514093d1a2SGuangbin Huang for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 32524093d1a2SGuangbin Huang rss_indir[i] = i % kinfo->rss_size; 32534093d1a2SGuangbin Huang 3254944de484SGuojia Liao hdev->rss_cfg.rss_size = kinfo->rss_size; 3255944de484SGuojia Liao 32564093d1a2SGuangbin Huang ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 32574093d1a2SGuangbin Huang if (ret) 32584093d1a2SGuangbin Huang dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 32594093d1a2SGuangbin Huang ret); 32604093d1a2SGuangbin Huang 32614093d1a2SGuangbin Huang kfree(rss_indir); 32624093d1a2SGuangbin Huang 32634093d1a2SGuangbin Huang out: 32644093d1a2SGuangbin Huang if (!ret) 32654093d1a2SGuangbin Huang dev_info(&hdev->pdev->dev, 32664093d1a2SGuangbin Huang "Channels changed, rss_size from %u to %u, tqps from %u to %u", 32674093d1a2SGuangbin Huang cur_rss_size, kinfo->rss_size, 32684093d1a2SGuangbin Huang cur_tqps, kinfo->rss_size * kinfo->num_tc); 32694093d1a2SGuangbin Huang 32704093d1a2SGuangbin Huang return ret; 32714093d1a2SGuangbin Huang } 32724093d1a2SGuangbin Huang 3273175ec96bSFuyun Liang static int hclgevf_get_status(struct hnae3_handle *handle) 3274175ec96bSFuyun Liang { 3275175ec96bSFuyun Liang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3276175ec96bSFuyun Liang 3277175ec96bSFuyun Liang return hdev->hw.mac.link; 3278175ec96bSFuyun Liang } 3279175ec96bSFuyun Liang 32804a152de9SFuyun Liang static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 32814a152de9SFuyun Liang u8 *auto_neg, u32 *speed, 32824a152de9SFuyun Liang u8 *duplex) 32834a152de9SFuyun Liang { 32844a152de9SFuyun Liang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 32854a152de9SFuyun Liang 32864a152de9SFuyun Liang if (speed) 32874a152de9SFuyun Liang *speed = hdev->hw.mac.speed; 32884a152de9SFuyun Liang if (duplex) 32894a152de9SFuyun Liang *duplex = hdev->hw.mac.duplex; 32904a152de9SFuyun Liang if (auto_neg) 32914a152de9SFuyun Liang *auto_neg = AUTONEG_DISABLE; 32924a152de9SFuyun Liang } 32934a152de9SFuyun Liang 32944a152de9SFuyun Liang void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 32954a152de9SFuyun Liang u8 duplex) 32964a152de9SFuyun Liang { 32974a152de9SFuyun Liang hdev->hw.mac.speed = speed; 32984a152de9SFuyun Liang hdev->hw.mac.duplex = duplex; 32994a152de9SFuyun Liang } 33004a152de9SFuyun Liang 33011731be4cSYonglong Liu static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 33025c9f6b39SPeng Li { 33035c9f6b39SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33045c9f6b39SPeng Li 33055c9f6b39SPeng Li return hclgevf_config_gro(hdev, enable); 33065c9f6b39SPeng Li } 33075c9f6b39SPeng Li 330888d10bd6SJian Shen static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 330988d10bd6SJian Shen u8 *module_type) 3310c136b884SPeng Li { 3311c136b884SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 331288d10bd6SJian Shen 3313c136b884SPeng Li if (media_type) 3314c136b884SPeng Li *media_type = hdev->hw.mac.media_type; 331588d10bd6SJian Shen 331688d10bd6SJian Shen if (module_type) 331788d10bd6SJian Shen *module_type = hdev->hw.mac.module_type; 3318c136b884SPeng Li } 3319c136b884SPeng Li 33204d60291bSHuazhong Tan static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 33214d60291bSHuazhong Tan { 33224d60291bSHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33234d60291bSHuazhong Tan 3324aa5c4f17SHuazhong Tan return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 33254d60291bSHuazhong Tan } 33264d60291bSHuazhong Tan 33274d60291bSHuazhong Tan static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 33284d60291bSHuazhong Tan { 33294d60291bSHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33304d60291bSHuazhong Tan 33314d60291bSHuazhong Tan return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 33324d60291bSHuazhong Tan } 33334d60291bSHuazhong Tan 33344d60291bSHuazhong Tan static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 33354d60291bSHuazhong Tan { 33364d60291bSHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33374d60291bSHuazhong Tan 3338c88a6e7dSHuazhong Tan return hdev->rst_stats.hw_rst_done_cnt; 33394d60291bSHuazhong Tan } 33404d60291bSHuazhong Tan 33419194d18bSliuzhongzhu static void hclgevf_get_link_mode(struct hnae3_handle *handle, 33429194d18bSliuzhongzhu unsigned long *supported, 33439194d18bSliuzhongzhu unsigned long *advertising) 33449194d18bSliuzhongzhu { 33459194d18bSliuzhongzhu struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33469194d18bSliuzhongzhu 33479194d18bSliuzhongzhu *supported = hdev->hw.mac.supported; 33489194d18bSliuzhongzhu *advertising = hdev->hw.mac.advertising; 33499194d18bSliuzhongzhu } 33509194d18bSliuzhongzhu 33511600c3e5SJian Shen #define MAX_SEPARATE_NUM 4 33521600c3e5SJian Shen #define SEPARATOR_VALUE 0xFFFFFFFF 33531600c3e5SJian Shen #define REG_NUM_PER_LINE 4 33541600c3e5SJian Shen #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 33551600c3e5SJian Shen 33561600c3e5SJian Shen static int hclgevf_get_regs_len(struct hnae3_handle *handle) 33571600c3e5SJian Shen { 33581600c3e5SJian Shen int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 33591600c3e5SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33601600c3e5SJian Shen 33611600c3e5SJian Shen cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 33621600c3e5SJian Shen common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 33631600c3e5SJian Shen ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 33641600c3e5SJian Shen tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 33651600c3e5SJian Shen 33661600c3e5SJian Shen return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 33671600c3e5SJian Shen tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 33681600c3e5SJian Shen } 33691600c3e5SJian Shen 33701600c3e5SJian Shen static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 33711600c3e5SJian Shen void *data) 33721600c3e5SJian Shen { 33731600c3e5SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33741600c3e5SJian Shen int i, j, reg_um, separator_num; 33751600c3e5SJian Shen u32 *reg = data; 33761600c3e5SJian Shen 33771600c3e5SJian Shen *version = hdev->fw_version; 33781600c3e5SJian Shen 33791600c3e5SJian Shen /* fetching per-VF registers values from VF PCIe register space */ 33801600c3e5SJian Shen reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 33811600c3e5SJian Shen separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 33821600c3e5SJian Shen for (i = 0; i < reg_um; i++) 33831600c3e5SJian Shen *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 33841600c3e5SJian Shen for (i = 0; i < separator_num; i++) 33851600c3e5SJian Shen *reg++ = SEPARATOR_VALUE; 33861600c3e5SJian Shen 33871600c3e5SJian Shen reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 33881600c3e5SJian Shen separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 33891600c3e5SJian Shen for (i = 0; i < reg_um; i++) 33901600c3e5SJian Shen *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 33911600c3e5SJian Shen for (i = 0; i < separator_num; i++) 33921600c3e5SJian Shen *reg++ = SEPARATOR_VALUE; 33931600c3e5SJian Shen 33941600c3e5SJian Shen reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 33951600c3e5SJian Shen separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 33961600c3e5SJian Shen for (j = 0; j < hdev->num_tqps; j++) { 33971600c3e5SJian Shen for (i = 0; i < reg_um; i++) 33981600c3e5SJian Shen *reg++ = hclgevf_read_dev(&hdev->hw, 33991600c3e5SJian Shen ring_reg_addr_list[i] + 34001600c3e5SJian Shen 0x200 * j); 34011600c3e5SJian Shen for (i = 0; i < separator_num; i++) 34021600c3e5SJian Shen *reg++ = SEPARATOR_VALUE; 34031600c3e5SJian Shen } 34041600c3e5SJian Shen 34051600c3e5SJian Shen reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 34061600c3e5SJian Shen separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 34071600c3e5SJian Shen for (j = 0; j < hdev->num_msi_used - 1; j++) { 34081600c3e5SJian Shen for (i = 0; i < reg_um; i++) 34091600c3e5SJian Shen *reg++ = hclgevf_read_dev(&hdev->hw, 34101600c3e5SJian Shen tqp_intr_reg_addr_list[i] + 34111600c3e5SJian Shen 4 * j); 34121600c3e5SJian Shen for (i = 0; i < separator_num; i++) 34131600c3e5SJian Shen *reg++ = SEPARATOR_VALUE; 34141600c3e5SJian Shen } 34151600c3e5SJian Shen } 34161600c3e5SJian Shen 341792f11ea1SJian Shen void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 341892f11ea1SJian Shen u8 *port_base_vlan_info, u8 data_size) 341992f11ea1SJian Shen { 342092f11ea1SJian Shen struct hnae3_handle *nic = &hdev->nic; 3421d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 342292f11ea1SJian Shen 342392f11ea1SJian Shen rtnl_lock(); 342492f11ea1SJian Shen hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 342592f11ea1SJian Shen rtnl_unlock(); 342692f11ea1SJian Shen 342792f11ea1SJian Shen /* send msg to PF and wait update port based vlan info */ 3428d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3429d3410018SYufeng Mo HCLGE_MBX_PORT_BASE_VLAN_CFG); 3430d3410018SYufeng Mo memcpy(send_msg.data, port_base_vlan_info, data_size); 3431d3410018SYufeng Mo hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 343292f11ea1SJian Shen 343392f11ea1SJian Shen if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 343492f11ea1SJian Shen nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; 343592f11ea1SJian Shen else 343692f11ea1SJian Shen nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 343792f11ea1SJian Shen 343892f11ea1SJian Shen rtnl_lock(); 343992f11ea1SJian Shen hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 344092f11ea1SJian Shen rtnl_unlock(); 344192f11ea1SJian Shen } 344292f11ea1SJian Shen 3443e2cb1decSSalil Mehta static const struct hnae3_ae_ops hclgevf_ops = { 3444e2cb1decSSalil Mehta .init_ae_dev = hclgevf_init_ae_dev, 3445e2cb1decSSalil Mehta .uninit_ae_dev = hclgevf_uninit_ae_dev, 34466ff3cf07SHuazhong Tan .flr_prepare = hclgevf_flr_prepare, 34476ff3cf07SHuazhong Tan .flr_done = hclgevf_flr_done, 3448e718a93fSPeng Li .init_client_instance = hclgevf_init_client_instance, 3449e718a93fSPeng Li .uninit_client_instance = hclgevf_uninit_client_instance, 3450e2cb1decSSalil Mehta .start = hclgevf_ae_start, 3451e2cb1decSSalil Mehta .stop = hclgevf_ae_stop, 3452a6d818e3SYunsheng Lin .client_start = hclgevf_client_start, 3453a6d818e3SYunsheng Lin .client_stop = hclgevf_client_stop, 3454e2cb1decSSalil Mehta .map_ring_to_vector = hclgevf_map_ring_to_vector, 3455e2cb1decSSalil Mehta .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3456e2cb1decSSalil Mehta .get_vector = hclgevf_get_vector, 34570d3e6631SYunsheng Lin .put_vector = hclgevf_put_vector, 3458e2cb1decSSalil Mehta .reset_queue = hclgevf_reset_tqp, 3459e2cb1decSSalil Mehta .get_mac_addr = hclgevf_get_mac_addr, 3460e2cb1decSSalil Mehta .set_mac_addr = hclgevf_set_mac_addr, 3461e2cb1decSSalil Mehta .add_uc_addr = hclgevf_add_uc_addr, 3462e2cb1decSSalil Mehta .rm_uc_addr = hclgevf_rm_uc_addr, 3463e2cb1decSSalil Mehta .add_mc_addr = hclgevf_add_mc_addr, 3464e2cb1decSSalil Mehta .rm_mc_addr = hclgevf_rm_mc_addr, 3465e2cb1decSSalil Mehta .get_stats = hclgevf_get_stats, 3466e2cb1decSSalil Mehta .update_stats = hclgevf_update_stats, 3467e2cb1decSSalil Mehta .get_strings = hclgevf_get_strings, 3468e2cb1decSSalil Mehta .get_sset_count = hclgevf_get_sset_count, 3469e2cb1decSSalil Mehta .get_rss_key_size = hclgevf_get_rss_key_size, 3470e2cb1decSSalil Mehta .get_rss_indir_size = hclgevf_get_rss_indir_size, 3471e2cb1decSSalil Mehta .get_rss = hclgevf_get_rss, 3472e2cb1decSSalil Mehta .set_rss = hclgevf_set_rss, 3473d97b3072SJian Shen .get_rss_tuple = hclgevf_get_rss_tuple, 3474d97b3072SJian Shen .set_rss_tuple = hclgevf_set_rss_tuple, 3475e2cb1decSSalil Mehta .get_tc_size = hclgevf_get_tc_size, 3476e2cb1decSSalil Mehta .get_fw_version = hclgevf_get_fw_version, 3477e2cb1decSSalil Mehta .set_vlan_filter = hclgevf_set_vlan_filter, 3478b2641e2aSYunsheng Lin .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 34796d4c3981SSalil Mehta .reset_event = hclgevf_reset_event, 3480720bd583SHuazhong Tan .set_default_reset_request = hclgevf_set_def_reset_request, 34814093d1a2SGuangbin Huang .set_channels = hclgevf_set_channels, 3482849e4607SPeng Li .get_channels = hclgevf_get_channels, 3483cc719218SPeng Li .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 34841600c3e5SJian Shen .get_regs_len = hclgevf_get_regs_len, 34851600c3e5SJian Shen .get_regs = hclgevf_get_regs, 3486175ec96bSFuyun Liang .get_status = hclgevf_get_status, 34874a152de9SFuyun Liang .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3488c136b884SPeng Li .get_media_type = hclgevf_get_media_type, 34894d60291bSHuazhong Tan .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 34904d60291bSHuazhong Tan .ae_dev_resetting = hclgevf_ae_dev_resetting, 34914d60291bSHuazhong Tan .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 34925c9f6b39SPeng Li .set_gro_en = hclgevf_gro_en, 3493818f1675SYunsheng Lin .set_mtu = hclgevf_set_mtu, 34940c29d191Sliuzhongzhu .get_global_queue_id = hclgevf_get_qid_global, 34958cdb992fSJian Shen .set_timer_task = hclgevf_set_timer_task, 34969194d18bSliuzhongzhu .get_link_mode = hclgevf_get_link_mode, 3497e196ec75SJian Shen .set_promisc_mode = hclgevf_set_promisc_mode, 3498c631c696SJian Shen .request_update_promisc_mode = hclgevf_request_update_promisc_mode, 3499e2cb1decSSalil Mehta }; 3500e2cb1decSSalil Mehta 3501e2cb1decSSalil Mehta static struct hnae3_ae_algo ae_algovf = { 3502e2cb1decSSalil Mehta .ops = &hclgevf_ops, 3503e2cb1decSSalil Mehta .pdev_id_table = ae_algovf_pci_tbl, 3504e2cb1decSSalil Mehta }; 3505e2cb1decSSalil Mehta 3506e2cb1decSSalil Mehta static int hclgevf_init(void) 3507e2cb1decSSalil Mehta { 3508e2cb1decSSalil Mehta pr_info("%s is initializing\n", HCLGEVF_NAME); 3509e2cb1decSSalil Mehta 351016deaef2SYunsheng Lin hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME); 35110ea68902SYunsheng Lin if (!hclgevf_wq) { 35120ea68902SYunsheng Lin pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 35130ea68902SYunsheng Lin return -ENOMEM; 35140ea68902SYunsheng Lin } 35150ea68902SYunsheng Lin 3516854cf33aSFuyun Liang hnae3_register_ae_algo(&ae_algovf); 3517854cf33aSFuyun Liang 3518854cf33aSFuyun Liang return 0; 3519e2cb1decSSalil Mehta } 3520e2cb1decSSalil Mehta 3521e2cb1decSSalil Mehta static void hclgevf_exit(void) 3522e2cb1decSSalil Mehta { 3523e2cb1decSSalil Mehta hnae3_unregister_ae_algo(&ae_algovf); 35240ea68902SYunsheng Lin destroy_workqueue(hclgevf_wq); 3525e2cb1decSSalil Mehta } 3526e2cb1decSSalil Mehta module_init(hclgevf_init); 3527e2cb1decSSalil Mehta module_exit(hclgevf_exit); 3528e2cb1decSSalil Mehta 3529e2cb1decSSalil Mehta MODULE_LICENSE("GPL"); 3530e2cb1decSSalil Mehta MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3531e2cb1decSSalil Mehta MODULE_DESCRIPTION("HCLGEVF Driver"); 3532e2cb1decSSalil Mehta MODULE_VERSION(HCLGEVF_MOD_VERSION); 3533