1e2cb1decSSalil Mehta // SPDX-License-Identifier: GPL-2.0+ 2e2cb1decSSalil Mehta // Copyright (c) 2016-2017 Hisilicon Limited. 3e2cb1decSSalil Mehta 4e2cb1decSSalil Mehta #include <linux/etherdevice.h> 5aa5c4f17SHuazhong Tan #include <linux/iopoll.h> 66988eb2aSSalil Mehta #include <net/rtnetlink.h> 7e2cb1decSSalil Mehta #include "hclgevf_cmd.h" 8e2cb1decSSalil Mehta #include "hclgevf_main.h" 9e2cb1decSSalil Mehta #include "hclge_mbx.h" 10e2cb1decSSalil Mehta #include "hnae3.h" 11e2cb1decSSalil Mehta 12e2cb1decSSalil Mehta #define HCLGEVF_NAME "hclgevf" 13e2cb1decSSalil Mehta 14bbe6540eSHuazhong Tan #define HCLGEVF_RESET_MAX_FAIL_CNT 5 15bbe6540eSHuazhong Tan 169c6f7085SHuazhong Tan static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 17e2cb1decSSalil Mehta static struct hnae3_ae_algo ae_algovf; 18e2cb1decSSalil Mehta 190ea68902SYunsheng Lin static struct workqueue_struct *hclgevf_wq; 200ea68902SYunsheng Lin 21e2cb1decSSalil Mehta static const struct pci_device_id ae_algovf_pci_tbl[] = { 22e2cb1decSSalil Mehta {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 23e2cb1decSSalil Mehta {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 24e2cb1decSSalil Mehta /* required last entry */ 25e2cb1decSSalil Mehta {0, } 26e2cb1decSSalil Mehta }; 27e2cb1decSSalil Mehta 28472d7eceSJian Shen static const u8 hclgevf_hash_key[] = { 29472d7eceSJian Shen 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 30472d7eceSJian Shen 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 31472d7eceSJian Shen 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 32472d7eceSJian Shen 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 33472d7eceSJian Shen 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 34472d7eceSJian Shen }; 35472d7eceSJian Shen 362f550a46SYunsheng Lin MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 372f550a46SYunsheng Lin 381600c3e5SJian Shen static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 391600c3e5SJian Shen HCLGEVF_CMDQ_TX_ADDR_H_REG, 401600c3e5SJian Shen HCLGEVF_CMDQ_TX_DEPTH_REG, 411600c3e5SJian Shen HCLGEVF_CMDQ_TX_TAIL_REG, 421600c3e5SJian Shen HCLGEVF_CMDQ_TX_HEAD_REG, 431600c3e5SJian Shen HCLGEVF_CMDQ_RX_ADDR_L_REG, 441600c3e5SJian Shen HCLGEVF_CMDQ_RX_ADDR_H_REG, 451600c3e5SJian Shen HCLGEVF_CMDQ_RX_DEPTH_REG, 461600c3e5SJian Shen HCLGEVF_CMDQ_RX_TAIL_REG, 471600c3e5SJian Shen HCLGEVF_CMDQ_RX_HEAD_REG, 481600c3e5SJian Shen HCLGEVF_VECTOR0_CMDQ_SRC_REG, 491600c3e5SJian Shen HCLGEVF_CMDQ_INTR_STS_REG, 501600c3e5SJian Shen HCLGEVF_CMDQ_INTR_EN_REG, 511600c3e5SJian Shen HCLGEVF_CMDQ_INTR_GEN_REG}; 521600c3e5SJian Shen 531600c3e5SJian Shen static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 541600c3e5SJian Shen HCLGEVF_RST_ING, 551600c3e5SJian Shen HCLGEVF_GRO_EN_REG}; 561600c3e5SJian Shen 571600c3e5SJian Shen static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 581600c3e5SJian Shen HCLGEVF_RING_RX_ADDR_H_REG, 591600c3e5SJian Shen HCLGEVF_RING_RX_BD_NUM_REG, 601600c3e5SJian Shen HCLGEVF_RING_RX_BD_LENGTH_REG, 611600c3e5SJian Shen HCLGEVF_RING_RX_MERGE_EN_REG, 621600c3e5SJian Shen HCLGEVF_RING_RX_TAIL_REG, 631600c3e5SJian Shen HCLGEVF_RING_RX_HEAD_REG, 641600c3e5SJian Shen HCLGEVF_RING_RX_FBD_NUM_REG, 651600c3e5SJian Shen HCLGEVF_RING_RX_OFFSET_REG, 661600c3e5SJian Shen HCLGEVF_RING_RX_FBD_OFFSET_REG, 671600c3e5SJian Shen HCLGEVF_RING_RX_STASH_REG, 681600c3e5SJian Shen HCLGEVF_RING_RX_BD_ERR_REG, 691600c3e5SJian Shen HCLGEVF_RING_TX_ADDR_L_REG, 701600c3e5SJian Shen HCLGEVF_RING_TX_ADDR_H_REG, 711600c3e5SJian Shen HCLGEVF_RING_TX_BD_NUM_REG, 721600c3e5SJian Shen HCLGEVF_RING_TX_PRIORITY_REG, 731600c3e5SJian Shen HCLGEVF_RING_TX_TC_REG, 741600c3e5SJian Shen HCLGEVF_RING_TX_MERGE_EN_REG, 751600c3e5SJian Shen HCLGEVF_RING_TX_TAIL_REG, 761600c3e5SJian Shen HCLGEVF_RING_TX_HEAD_REG, 771600c3e5SJian Shen HCLGEVF_RING_TX_FBD_NUM_REG, 781600c3e5SJian Shen HCLGEVF_RING_TX_OFFSET_REG, 791600c3e5SJian Shen HCLGEVF_RING_TX_EBD_NUM_REG, 801600c3e5SJian Shen HCLGEVF_RING_TX_EBD_OFFSET_REG, 811600c3e5SJian Shen HCLGEVF_RING_TX_BD_ERR_REG, 821600c3e5SJian Shen HCLGEVF_RING_EN_REG}; 831600c3e5SJian Shen 841600c3e5SJian Shen static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 851600c3e5SJian Shen HCLGEVF_TQP_INTR_GL0_REG, 861600c3e5SJian Shen HCLGEVF_TQP_INTR_GL1_REG, 871600c3e5SJian Shen HCLGEVF_TQP_INTR_GL2_REG, 881600c3e5SJian Shen HCLGEVF_TQP_INTR_RL_REG}; 891600c3e5SJian Shen 909b2f3477SWeihang Li static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 91e2cb1decSSalil Mehta { 92eed9535fSPeng Li if (!handle->client) 93eed9535fSPeng Li return container_of(handle, struct hclgevf_dev, nic); 94eed9535fSPeng Li else if (handle->client->type == HNAE3_CLIENT_ROCE) 95eed9535fSPeng Li return container_of(handle, struct hclgevf_dev, roce); 96eed9535fSPeng Li else 97e2cb1decSSalil Mehta return container_of(handle, struct hclgevf_dev, nic); 98e2cb1decSSalil Mehta } 99e2cb1decSSalil Mehta 100e2cb1decSSalil Mehta static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 101e2cb1decSSalil Mehta { 102b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 103e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 104e2cb1decSSalil Mehta struct hclgevf_desc desc; 105e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 106e2cb1decSSalil Mehta int status; 107e2cb1decSSalil Mehta int i; 108e2cb1decSSalil Mehta 109b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 110b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 111e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, 112e2cb1decSSalil Mehta HCLGEVF_OPC_QUERY_RX_STATUS, 113e2cb1decSSalil Mehta true); 114e2cb1decSSalil Mehta 115e2cb1decSSalil Mehta desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 116e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 117e2cb1decSSalil Mehta if (status) { 118e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 119e2cb1decSSalil Mehta "Query tqp stat fail, status = %d,queue = %d\n", 120e2cb1decSSalil Mehta status, i); 121e2cb1decSSalil Mehta return status; 122e2cb1decSSalil Mehta } 123e2cb1decSSalil Mehta tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 124cf72fa63SJian Shen le32_to_cpu(desc.data[1]); 125e2cb1decSSalil Mehta 126e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 127e2cb1decSSalil Mehta true); 128e2cb1decSSalil Mehta 129e2cb1decSSalil Mehta desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 130e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 131e2cb1decSSalil Mehta if (status) { 132e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 133e2cb1decSSalil Mehta "Query tqp stat fail, status = %d,queue = %d\n", 134e2cb1decSSalil Mehta status, i); 135e2cb1decSSalil Mehta return status; 136e2cb1decSSalil Mehta } 137e2cb1decSSalil Mehta tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 138cf72fa63SJian Shen le32_to_cpu(desc.data[1]); 139e2cb1decSSalil Mehta } 140e2cb1decSSalil Mehta 141e2cb1decSSalil Mehta return 0; 142e2cb1decSSalil Mehta } 143e2cb1decSSalil Mehta 144e2cb1decSSalil Mehta static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 145e2cb1decSSalil Mehta { 146e2cb1decSSalil Mehta struct hnae3_knic_private_info *kinfo = &handle->kinfo; 147e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 148e2cb1decSSalil Mehta u64 *buff = data; 149e2cb1decSSalil Mehta int i; 150e2cb1decSSalil Mehta 151b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 152b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 153e2cb1decSSalil Mehta *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 154e2cb1decSSalil Mehta } 155e2cb1decSSalil Mehta for (i = 0; i < kinfo->num_tqps; i++) { 156b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 157e2cb1decSSalil Mehta *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 158e2cb1decSSalil Mehta } 159e2cb1decSSalil Mehta 160e2cb1decSSalil Mehta return buff; 161e2cb1decSSalil Mehta } 162e2cb1decSSalil Mehta 163e2cb1decSSalil Mehta static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 164e2cb1decSSalil Mehta { 165b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 166e2cb1decSSalil Mehta 167b4f1d303SJian Shen return kinfo->num_tqps * 2; 168e2cb1decSSalil Mehta } 169e2cb1decSSalil Mehta 170e2cb1decSSalil Mehta static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 171e2cb1decSSalil Mehta { 172b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 173e2cb1decSSalil Mehta u8 *buff = data; 174e2cb1decSSalil Mehta int i = 0; 175e2cb1decSSalil Mehta 176b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 177b4f1d303SJian Shen struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 178e2cb1decSSalil Mehta struct hclgevf_tqp, q); 1790c218123SJian Shen snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 180e2cb1decSSalil Mehta tqp->index); 181e2cb1decSSalil Mehta buff += ETH_GSTRING_LEN; 182e2cb1decSSalil Mehta } 183e2cb1decSSalil Mehta 184b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 185b4f1d303SJian Shen struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 186e2cb1decSSalil Mehta struct hclgevf_tqp, q); 1870c218123SJian Shen snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 188e2cb1decSSalil Mehta tqp->index); 189e2cb1decSSalil Mehta buff += ETH_GSTRING_LEN; 190e2cb1decSSalil Mehta } 191e2cb1decSSalil Mehta 192e2cb1decSSalil Mehta return buff; 193e2cb1decSSalil Mehta } 194e2cb1decSSalil Mehta 195e2cb1decSSalil Mehta static void hclgevf_update_stats(struct hnae3_handle *handle, 196e2cb1decSSalil Mehta struct net_device_stats *net_stats) 197e2cb1decSSalil Mehta { 198e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 199e2cb1decSSalil Mehta int status; 200e2cb1decSSalil Mehta 201e2cb1decSSalil Mehta status = hclgevf_tqps_update_stats(handle); 202e2cb1decSSalil Mehta if (status) 203e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 204e2cb1decSSalil Mehta "VF update of TQPS stats fail, status = %d.\n", 205e2cb1decSSalil Mehta status); 206e2cb1decSSalil Mehta } 207e2cb1decSSalil Mehta 208e2cb1decSSalil Mehta static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 209e2cb1decSSalil Mehta { 210e2cb1decSSalil Mehta if (strset == ETH_SS_TEST) 211e2cb1decSSalil Mehta return -EOPNOTSUPP; 212e2cb1decSSalil Mehta else if (strset == ETH_SS_STATS) 213e2cb1decSSalil Mehta return hclgevf_tqps_get_sset_count(handle, strset); 214e2cb1decSSalil Mehta 215e2cb1decSSalil Mehta return 0; 216e2cb1decSSalil Mehta } 217e2cb1decSSalil Mehta 218e2cb1decSSalil Mehta static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 219e2cb1decSSalil Mehta u8 *data) 220e2cb1decSSalil Mehta { 221e2cb1decSSalil Mehta u8 *p = (char *)data; 222e2cb1decSSalil Mehta 223e2cb1decSSalil Mehta if (strset == ETH_SS_STATS) 224e2cb1decSSalil Mehta p = hclgevf_tqps_get_strings(handle, p); 225e2cb1decSSalil Mehta } 226e2cb1decSSalil Mehta 227e2cb1decSSalil Mehta static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 228e2cb1decSSalil Mehta { 229e2cb1decSSalil Mehta hclgevf_tqps_get_stats(handle, data); 230e2cb1decSSalil Mehta } 231e2cb1decSSalil Mehta 232d3410018SYufeng Mo static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 233d3410018SYufeng Mo u8 subcode) 234d3410018SYufeng Mo { 235d3410018SYufeng Mo if (msg) { 236d3410018SYufeng Mo memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 237d3410018SYufeng Mo msg->code = code; 238d3410018SYufeng Mo msg->subcode = subcode; 239d3410018SYufeng Mo } 240d3410018SYufeng Mo } 241d3410018SYufeng Mo 242e2cb1decSSalil Mehta static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 243e2cb1decSSalil Mehta { 244d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 245e2cb1decSSalil Mehta u8 resp_msg; 246e2cb1decSSalil Mehta int status; 247e2cb1decSSalil Mehta 248d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0); 249d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 250d3410018SYufeng Mo sizeof(resp_msg)); 251e2cb1decSSalil Mehta if (status) { 252e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 253e2cb1decSSalil Mehta "VF request to get TC info from PF failed %d", 254e2cb1decSSalil Mehta status); 255e2cb1decSSalil Mehta return status; 256e2cb1decSSalil Mehta } 257e2cb1decSSalil Mehta 258e2cb1decSSalil Mehta hdev->hw_tc_map = resp_msg; 259e2cb1decSSalil Mehta 260e2cb1decSSalil Mehta return 0; 261e2cb1decSSalil Mehta } 262e2cb1decSSalil Mehta 26392f11ea1SJian Shen static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 26492f11ea1SJian Shen { 26592f11ea1SJian Shen struct hnae3_handle *nic = &hdev->nic; 266d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 26792f11ea1SJian Shen u8 resp_msg; 26892f11ea1SJian Shen int ret; 26992f11ea1SJian Shen 270d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 271d3410018SYufeng Mo HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 272d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 273d3410018SYufeng Mo sizeof(u8)); 27492f11ea1SJian Shen if (ret) { 27592f11ea1SJian Shen dev_err(&hdev->pdev->dev, 27692f11ea1SJian Shen "VF request to get port based vlan state failed %d", 27792f11ea1SJian Shen ret); 27892f11ea1SJian Shen return ret; 27992f11ea1SJian Shen } 28092f11ea1SJian Shen 28192f11ea1SJian Shen nic->port_base_vlan_state = resp_msg; 28292f11ea1SJian Shen 28392f11ea1SJian Shen return 0; 28492f11ea1SJian Shen } 28592f11ea1SJian Shen 2866cee6fc3SJian Shen static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 287e2cb1decSSalil Mehta { 288c0425944SPeng Li #define HCLGEVF_TQPS_RSS_INFO_LEN 6 289d3410018SYufeng Mo #define HCLGEVF_TQPS_ALLOC_OFFSET 0 290d3410018SYufeng Mo #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 291d3410018SYufeng Mo #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 292d3410018SYufeng Mo 293e2cb1decSSalil Mehta u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 294d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 295e2cb1decSSalil Mehta int status; 296e2cb1decSSalil Mehta 297d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 298d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 299e2cb1decSSalil Mehta HCLGEVF_TQPS_RSS_INFO_LEN); 300e2cb1decSSalil Mehta if (status) { 301e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 302e2cb1decSSalil Mehta "VF request to get tqp info from PF failed %d", 303e2cb1decSSalil Mehta status); 304e2cb1decSSalil Mehta return status; 305e2cb1decSSalil Mehta } 306e2cb1decSSalil Mehta 307d3410018SYufeng Mo memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], 308d3410018SYufeng Mo sizeof(u16)); 309d3410018SYufeng Mo memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], 310d3410018SYufeng Mo sizeof(u16)); 311d3410018SYufeng Mo memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], 312d3410018SYufeng Mo sizeof(u16)); 313c0425944SPeng Li 314c0425944SPeng Li return 0; 315c0425944SPeng Li } 316c0425944SPeng Li 317c0425944SPeng Li static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 318c0425944SPeng Li { 319c0425944SPeng Li #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 320d3410018SYufeng Mo #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 321d3410018SYufeng Mo #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 322d3410018SYufeng Mo 323c0425944SPeng Li u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 324d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 325c0425944SPeng Li int ret; 326c0425944SPeng Li 327d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 328d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 329c0425944SPeng Li HCLGEVF_TQPS_DEPTH_INFO_LEN); 330c0425944SPeng Li if (ret) { 331c0425944SPeng Li dev_err(&hdev->pdev->dev, 332c0425944SPeng Li "VF request to get tqp depth info from PF failed %d", 333c0425944SPeng Li ret); 334c0425944SPeng Li return ret; 335c0425944SPeng Li } 336c0425944SPeng Li 337d3410018SYufeng Mo memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], 338d3410018SYufeng Mo sizeof(u16)); 339d3410018SYufeng Mo memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], 340d3410018SYufeng Mo sizeof(u16)); 341e2cb1decSSalil Mehta 342e2cb1decSSalil Mehta return 0; 343e2cb1decSSalil Mehta } 344e2cb1decSSalil Mehta 3450c29d191Sliuzhongzhu static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 3460c29d191Sliuzhongzhu { 3470c29d191Sliuzhongzhu struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 348d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 3490c29d191Sliuzhongzhu u16 qid_in_pf = 0; 350d3410018SYufeng Mo u8 resp_data[2]; 3510c29d191Sliuzhongzhu int ret; 3520c29d191Sliuzhongzhu 353d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 354d3410018SYufeng Mo memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 355d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 35663cbf7a9SYufeng Mo sizeof(resp_data)); 3570c29d191Sliuzhongzhu if (!ret) 3580c29d191Sliuzhongzhu qid_in_pf = *(u16 *)resp_data; 3590c29d191Sliuzhongzhu 3600c29d191Sliuzhongzhu return qid_in_pf; 3610c29d191Sliuzhongzhu } 3620c29d191Sliuzhongzhu 3639c3e7130Sliuzhongzhu static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 3649c3e7130Sliuzhongzhu { 365d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 36688d10bd6SJian Shen u8 resp_msg[2]; 3679c3e7130Sliuzhongzhu int ret; 3689c3e7130Sliuzhongzhu 369d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 370d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 371d3410018SYufeng Mo sizeof(resp_msg)); 3729c3e7130Sliuzhongzhu if (ret) { 3739c3e7130Sliuzhongzhu dev_err(&hdev->pdev->dev, 3749c3e7130Sliuzhongzhu "VF request to get the pf port media type failed %d", 3759c3e7130Sliuzhongzhu ret); 3769c3e7130Sliuzhongzhu return ret; 3779c3e7130Sliuzhongzhu } 3789c3e7130Sliuzhongzhu 37988d10bd6SJian Shen hdev->hw.mac.media_type = resp_msg[0]; 38088d10bd6SJian Shen hdev->hw.mac.module_type = resp_msg[1]; 3819c3e7130Sliuzhongzhu 3829c3e7130Sliuzhongzhu return 0; 3839c3e7130Sliuzhongzhu } 3849c3e7130Sliuzhongzhu 385e2cb1decSSalil Mehta static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 386e2cb1decSSalil Mehta { 387e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 388e2cb1decSSalil Mehta int i; 389e2cb1decSSalil Mehta 390e2cb1decSSalil Mehta hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 391e2cb1decSSalil Mehta sizeof(struct hclgevf_tqp), GFP_KERNEL); 392e2cb1decSSalil Mehta if (!hdev->htqp) 393e2cb1decSSalil Mehta return -ENOMEM; 394e2cb1decSSalil Mehta 395e2cb1decSSalil Mehta tqp = hdev->htqp; 396e2cb1decSSalil Mehta 397e2cb1decSSalil Mehta for (i = 0; i < hdev->num_tqps; i++) { 398e2cb1decSSalil Mehta tqp->dev = &hdev->pdev->dev; 399e2cb1decSSalil Mehta tqp->index = i; 400e2cb1decSSalil Mehta 401e2cb1decSSalil Mehta tqp->q.ae_algo = &ae_algovf; 402e2cb1decSSalil Mehta tqp->q.buf_size = hdev->rx_buf_len; 403c0425944SPeng Li tqp->q.tx_desc_num = hdev->num_tx_desc; 404c0425944SPeng Li tqp->q.rx_desc_num = hdev->num_rx_desc; 405e2cb1decSSalil Mehta tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 406e2cb1decSSalil Mehta i * HCLGEVF_TQP_REG_SIZE; 407e2cb1decSSalil Mehta 408e2cb1decSSalil Mehta tqp++; 409e2cb1decSSalil Mehta } 410e2cb1decSSalil Mehta 411e2cb1decSSalil Mehta return 0; 412e2cb1decSSalil Mehta } 413e2cb1decSSalil Mehta 414e2cb1decSSalil Mehta static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 415e2cb1decSSalil Mehta { 416e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 417e2cb1decSSalil Mehta struct hnae3_knic_private_info *kinfo; 418e2cb1decSSalil Mehta u16 new_tqps = hdev->num_tqps; 419ebaf1908SWeihang Li unsigned int i; 420e2cb1decSSalil Mehta 421e2cb1decSSalil Mehta kinfo = &nic->kinfo; 422e2cb1decSSalil Mehta kinfo->num_tc = 0; 423c0425944SPeng Li kinfo->num_tx_desc = hdev->num_tx_desc; 424c0425944SPeng Li kinfo->num_rx_desc = hdev->num_rx_desc; 425e2cb1decSSalil Mehta kinfo->rx_buf_len = hdev->rx_buf_len; 426e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 427e2cb1decSSalil Mehta if (hdev->hw_tc_map & BIT(i)) 428e2cb1decSSalil Mehta kinfo->num_tc++; 429e2cb1decSSalil Mehta 430e2cb1decSSalil Mehta kinfo->rss_size 431e2cb1decSSalil Mehta = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 432e2cb1decSSalil Mehta new_tqps = kinfo->rss_size * kinfo->num_tc; 433e2cb1decSSalil Mehta kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 434e2cb1decSSalil Mehta 435e2cb1decSSalil Mehta kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 436e2cb1decSSalil Mehta sizeof(struct hnae3_queue *), GFP_KERNEL); 437e2cb1decSSalil Mehta if (!kinfo->tqp) 438e2cb1decSSalil Mehta return -ENOMEM; 439e2cb1decSSalil Mehta 440e2cb1decSSalil Mehta for (i = 0; i < kinfo->num_tqps; i++) { 441e2cb1decSSalil Mehta hdev->htqp[i].q.handle = &hdev->nic; 442e2cb1decSSalil Mehta hdev->htqp[i].q.tqp_index = i; 443e2cb1decSSalil Mehta kinfo->tqp[i] = &hdev->htqp[i].q; 444e2cb1decSSalil Mehta } 445e2cb1decSSalil Mehta 446580a05f9SYonglong Liu /* after init the max rss_size and tqps, adjust the default tqp numbers 447580a05f9SYonglong Liu * and rss size with the actual vector numbers 448580a05f9SYonglong Liu */ 449580a05f9SYonglong Liu kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 450580a05f9SYonglong Liu kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc, 451580a05f9SYonglong Liu kinfo->rss_size); 452580a05f9SYonglong Liu 453e2cb1decSSalil Mehta return 0; 454e2cb1decSSalil Mehta } 455e2cb1decSSalil Mehta 456e2cb1decSSalil Mehta static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 457e2cb1decSSalil Mehta { 458d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 459e2cb1decSSalil Mehta int status; 460e2cb1decSSalil Mehta 461d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 462d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 463e2cb1decSSalil Mehta if (status) 464e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 465e2cb1decSSalil Mehta "VF failed to fetch link status(%d) from PF", status); 466e2cb1decSSalil Mehta } 467e2cb1decSSalil Mehta 468e2cb1decSSalil Mehta void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 469e2cb1decSSalil Mehta { 47045e92b7eSPeng Li struct hnae3_handle *rhandle = &hdev->roce; 471e2cb1decSSalil Mehta struct hnae3_handle *handle = &hdev->nic; 47245e92b7eSPeng Li struct hnae3_client *rclient; 473e2cb1decSSalil Mehta struct hnae3_client *client; 474e2cb1decSSalil Mehta 475ff200099SYunsheng Lin if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 476ff200099SYunsheng Lin return; 477ff200099SYunsheng Lin 478e2cb1decSSalil Mehta client = handle->client; 47945e92b7eSPeng Li rclient = hdev->roce_client; 480e2cb1decSSalil Mehta 481582d37bbSPeng Li link_state = 482582d37bbSPeng Li test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 483582d37bbSPeng Li 484e2cb1decSSalil Mehta if (link_state != hdev->hw.mac.link) { 485e2cb1decSSalil Mehta client->ops->link_status_change(handle, !!link_state); 48645e92b7eSPeng Li if (rclient && rclient->ops->link_status_change) 48745e92b7eSPeng Li rclient->ops->link_status_change(rhandle, !!link_state); 488e2cb1decSSalil Mehta hdev->hw.mac.link = link_state; 489e2cb1decSSalil Mehta } 490ff200099SYunsheng Lin 491ff200099SYunsheng Lin clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 492e2cb1decSSalil Mehta } 493e2cb1decSSalil Mehta 494538abaf3SYueHaibing static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 4959194d18bSliuzhongzhu { 4969194d18bSliuzhongzhu #define HCLGEVF_ADVERTISING 0 4979194d18bSliuzhongzhu #define HCLGEVF_SUPPORTED 1 4989194d18bSliuzhongzhu 499d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 500d3410018SYufeng Mo 501d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 502d3410018SYufeng Mo send_msg.data[0] = HCLGEVF_ADVERTISING; 503d3410018SYufeng Mo hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 504d3410018SYufeng Mo send_msg.data[0] = HCLGEVF_SUPPORTED; 505d3410018SYufeng Mo hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 5069194d18bSliuzhongzhu } 5079194d18bSliuzhongzhu 508e2cb1decSSalil Mehta static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 509e2cb1decSSalil Mehta { 510e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 511e2cb1decSSalil Mehta int ret; 512e2cb1decSSalil Mehta 513e2cb1decSSalil Mehta nic->ae_algo = &ae_algovf; 514e2cb1decSSalil Mehta nic->pdev = hdev->pdev; 515e2cb1decSSalil Mehta nic->numa_node_mask = hdev->numa_node_mask; 516424eb834SSalil Mehta nic->flags |= HNAE3_SUPPORT_VF; 517e2cb1decSSalil Mehta 518e2cb1decSSalil Mehta ret = hclgevf_knic_setup(hdev); 519e2cb1decSSalil Mehta if (ret) 520e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 521e2cb1decSSalil Mehta ret); 522e2cb1decSSalil Mehta return ret; 523e2cb1decSSalil Mehta } 524e2cb1decSSalil Mehta 525e2cb1decSSalil Mehta static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 526e2cb1decSSalil Mehta { 52736cbbdf6SPeng Li if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 52836cbbdf6SPeng Li dev_warn(&hdev->pdev->dev, 52936cbbdf6SPeng Li "vector(vector_id %d) has been freed.\n", vector_id); 53036cbbdf6SPeng Li return; 53136cbbdf6SPeng Li } 53236cbbdf6SPeng Li 533e2cb1decSSalil Mehta hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 534e2cb1decSSalil Mehta hdev->num_msi_left += 1; 535e2cb1decSSalil Mehta hdev->num_msi_used -= 1; 536e2cb1decSSalil Mehta } 537e2cb1decSSalil Mehta 538e2cb1decSSalil Mehta static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 539e2cb1decSSalil Mehta struct hnae3_vector_info *vector_info) 540e2cb1decSSalil Mehta { 541e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 542e2cb1decSSalil Mehta struct hnae3_vector_info *vector = vector_info; 543e2cb1decSSalil Mehta int alloc = 0; 544e2cb1decSSalil Mehta int i, j; 545e2cb1decSSalil Mehta 546580a05f9SYonglong Liu vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 547e2cb1decSSalil Mehta vector_num = min(hdev->num_msi_left, vector_num); 548e2cb1decSSalil Mehta 549e2cb1decSSalil Mehta for (j = 0; j < vector_num; j++) { 550e2cb1decSSalil Mehta for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 551e2cb1decSSalil Mehta if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 552e2cb1decSSalil Mehta vector->vector = pci_irq_vector(hdev->pdev, i); 553e2cb1decSSalil Mehta vector->io_addr = hdev->hw.io_base + 554e2cb1decSSalil Mehta HCLGEVF_VECTOR_REG_BASE + 555e2cb1decSSalil Mehta (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 556e2cb1decSSalil Mehta hdev->vector_status[i] = 0; 557e2cb1decSSalil Mehta hdev->vector_irq[i] = vector->vector; 558e2cb1decSSalil Mehta 559e2cb1decSSalil Mehta vector++; 560e2cb1decSSalil Mehta alloc++; 561e2cb1decSSalil Mehta 562e2cb1decSSalil Mehta break; 563e2cb1decSSalil Mehta } 564e2cb1decSSalil Mehta } 565e2cb1decSSalil Mehta } 566e2cb1decSSalil Mehta hdev->num_msi_left -= alloc; 567e2cb1decSSalil Mehta hdev->num_msi_used += alloc; 568e2cb1decSSalil Mehta 569e2cb1decSSalil Mehta return alloc; 570e2cb1decSSalil Mehta } 571e2cb1decSSalil Mehta 572e2cb1decSSalil Mehta static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 573e2cb1decSSalil Mehta { 574e2cb1decSSalil Mehta int i; 575e2cb1decSSalil Mehta 576e2cb1decSSalil Mehta for (i = 0; i < hdev->num_msi; i++) 577e2cb1decSSalil Mehta if (vector == hdev->vector_irq[i]) 578e2cb1decSSalil Mehta return i; 579e2cb1decSSalil Mehta 580e2cb1decSSalil Mehta return -EINVAL; 581e2cb1decSSalil Mehta } 582e2cb1decSSalil Mehta 583374ad291SJian Shen static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 584374ad291SJian Shen const u8 hfunc, const u8 *key) 585374ad291SJian Shen { 586374ad291SJian Shen struct hclgevf_rss_config_cmd *req; 587ebaf1908SWeihang Li unsigned int key_offset = 0; 588374ad291SJian Shen struct hclgevf_desc desc; 5893caf772bSYufeng Mo int key_counts; 590374ad291SJian Shen int key_size; 591374ad291SJian Shen int ret; 592374ad291SJian Shen 5933caf772bSYufeng Mo key_counts = HCLGEVF_RSS_KEY_SIZE; 594374ad291SJian Shen req = (struct hclgevf_rss_config_cmd *)desc.data; 595374ad291SJian Shen 5963caf772bSYufeng Mo while (key_counts) { 597374ad291SJian Shen hclgevf_cmd_setup_basic_desc(&desc, 598374ad291SJian Shen HCLGEVF_OPC_RSS_GENERIC_CONFIG, 599374ad291SJian Shen false); 600374ad291SJian Shen 601374ad291SJian Shen req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 602374ad291SJian Shen req->hash_config |= 603374ad291SJian Shen (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 604374ad291SJian Shen 6053caf772bSYufeng Mo key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 606374ad291SJian Shen memcpy(req->hash_key, 607374ad291SJian Shen key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 608374ad291SJian Shen 6093caf772bSYufeng Mo key_counts -= key_size; 6103caf772bSYufeng Mo key_offset++; 611374ad291SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 612374ad291SJian Shen if (ret) { 613374ad291SJian Shen dev_err(&hdev->pdev->dev, 614374ad291SJian Shen "Configure RSS config fail, status = %d\n", 615374ad291SJian Shen ret); 616374ad291SJian Shen return ret; 617374ad291SJian Shen } 618374ad291SJian Shen } 619374ad291SJian Shen 620374ad291SJian Shen return 0; 621374ad291SJian Shen } 622374ad291SJian Shen 623e2cb1decSSalil Mehta static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 624e2cb1decSSalil Mehta { 625e2cb1decSSalil Mehta return HCLGEVF_RSS_KEY_SIZE; 626e2cb1decSSalil Mehta } 627e2cb1decSSalil Mehta 628e2cb1decSSalil Mehta static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 629e2cb1decSSalil Mehta { 630e2cb1decSSalil Mehta return HCLGEVF_RSS_IND_TBL_SIZE; 631e2cb1decSSalil Mehta } 632e2cb1decSSalil Mehta 633e2cb1decSSalil Mehta static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 634e2cb1decSSalil Mehta { 635e2cb1decSSalil Mehta const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 636e2cb1decSSalil Mehta struct hclgevf_rss_indirection_table_cmd *req; 637e2cb1decSSalil Mehta struct hclgevf_desc desc; 638e2cb1decSSalil Mehta int status; 639e2cb1decSSalil Mehta int i, j; 640e2cb1decSSalil Mehta 641e2cb1decSSalil Mehta req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 642e2cb1decSSalil Mehta 643e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 644e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 645e2cb1decSSalil Mehta false); 646e2cb1decSSalil Mehta req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 647e2cb1decSSalil Mehta req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 648e2cb1decSSalil Mehta for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 649e2cb1decSSalil Mehta req->rss_result[j] = 650e2cb1decSSalil Mehta indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 651e2cb1decSSalil Mehta 652e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 653e2cb1decSSalil Mehta if (status) { 654e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 655e2cb1decSSalil Mehta "VF failed(=%d) to set RSS indirection table\n", 656e2cb1decSSalil Mehta status); 657e2cb1decSSalil Mehta return status; 658e2cb1decSSalil Mehta } 659e2cb1decSSalil Mehta } 660e2cb1decSSalil Mehta 661e2cb1decSSalil Mehta return 0; 662e2cb1decSSalil Mehta } 663e2cb1decSSalil Mehta 664e2cb1decSSalil Mehta static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 665e2cb1decSSalil Mehta { 666e2cb1decSSalil Mehta struct hclgevf_rss_tc_mode_cmd *req; 667e2cb1decSSalil Mehta u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 668e2cb1decSSalil Mehta u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 669e2cb1decSSalil Mehta u16 tc_size[HCLGEVF_MAX_TC_NUM]; 670e2cb1decSSalil Mehta struct hclgevf_desc desc; 671e2cb1decSSalil Mehta u16 roundup_size; 672e2cb1decSSalil Mehta int status; 673ebaf1908SWeihang Li unsigned int i; 674e2cb1decSSalil Mehta 675e2cb1decSSalil Mehta req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 676e2cb1decSSalil Mehta 677e2cb1decSSalil Mehta roundup_size = roundup_pow_of_two(rss_size); 678e2cb1decSSalil Mehta roundup_size = ilog2(roundup_size); 679e2cb1decSSalil Mehta 680e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 681e2cb1decSSalil Mehta tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 682e2cb1decSSalil Mehta tc_size[i] = roundup_size; 683e2cb1decSSalil Mehta tc_offset[i] = rss_size * i; 684e2cb1decSSalil Mehta } 685e2cb1decSSalil Mehta 686e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 687e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 688e4e87715SPeng Li hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 689e2cb1decSSalil Mehta (tc_valid[i] & 0x1)); 690e4e87715SPeng Li hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 691e2cb1decSSalil Mehta HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 692e4e87715SPeng Li hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 693e2cb1decSSalil Mehta HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 694e2cb1decSSalil Mehta } 695e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 696e2cb1decSSalil Mehta if (status) 697e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 698e2cb1decSSalil Mehta "VF failed(=%d) to set rss tc mode\n", status); 699e2cb1decSSalil Mehta 700e2cb1decSSalil Mehta return status; 701e2cb1decSSalil Mehta } 702e2cb1decSSalil Mehta 703a638b1d8SJian Shen /* for revision 0x20, vf shared the same rss config with pf */ 704a638b1d8SJian Shen static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 705a638b1d8SJian Shen { 706a638b1d8SJian Shen #define HCLGEVF_RSS_MBX_RESP_LEN 8 707a638b1d8SJian Shen struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 708a638b1d8SJian Shen u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 709d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 710a638b1d8SJian Shen u16 msg_num, hash_key_index; 711a638b1d8SJian Shen u8 index; 712a638b1d8SJian Shen int ret; 713a638b1d8SJian Shen 714d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 715a638b1d8SJian Shen msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 716a638b1d8SJian Shen HCLGEVF_RSS_MBX_RESP_LEN; 717a638b1d8SJian Shen for (index = 0; index < msg_num; index++) { 718d3410018SYufeng Mo send_msg.data[0] = index; 719d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 720a638b1d8SJian Shen HCLGEVF_RSS_MBX_RESP_LEN); 721a638b1d8SJian Shen if (ret) { 722a638b1d8SJian Shen dev_err(&hdev->pdev->dev, 723a638b1d8SJian Shen "VF get rss hash key from PF failed, ret=%d", 724a638b1d8SJian Shen ret); 725a638b1d8SJian Shen return ret; 726a638b1d8SJian Shen } 727a638b1d8SJian Shen 728a638b1d8SJian Shen hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 729a638b1d8SJian Shen if (index == msg_num - 1) 730a638b1d8SJian Shen memcpy(&rss_cfg->rss_hash_key[hash_key_index], 731a638b1d8SJian Shen &resp_msg[0], 732a638b1d8SJian Shen HCLGEVF_RSS_KEY_SIZE - hash_key_index); 733a638b1d8SJian Shen else 734a638b1d8SJian Shen memcpy(&rss_cfg->rss_hash_key[hash_key_index], 735a638b1d8SJian Shen &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 736a638b1d8SJian Shen } 737a638b1d8SJian Shen 738a638b1d8SJian Shen return 0; 739a638b1d8SJian Shen } 740a638b1d8SJian Shen 741e2cb1decSSalil Mehta static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 742e2cb1decSSalil Mehta u8 *hfunc) 743e2cb1decSSalil Mehta { 744e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 745e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 746a638b1d8SJian Shen int i, ret; 747e2cb1decSSalil Mehta 748374ad291SJian Shen if (handle->pdev->revision >= 0x21) { 749374ad291SJian Shen /* Get hash algorithm */ 750374ad291SJian Shen if (hfunc) { 751374ad291SJian Shen switch (rss_cfg->hash_algo) { 752374ad291SJian Shen case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 753374ad291SJian Shen *hfunc = ETH_RSS_HASH_TOP; 754374ad291SJian Shen break; 755374ad291SJian Shen case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 756374ad291SJian Shen *hfunc = ETH_RSS_HASH_XOR; 757374ad291SJian Shen break; 758374ad291SJian Shen default: 759374ad291SJian Shen *hfunc = ETH_RSS_HASH_UNKNOWN; 760374ad291SJian Shen break; 761374ad291SJian Shen } 762374ad291SJian Shen } 763374ad291SJian Shen 764374ad291SJian Shen /* Get the RSS Key required by the user */ 765374ad291SJian Shen if (key) 766374ad291SJian Shen memcpy(key, rss_cfg->rss_hash_key, 767374ad291SJian Shen HCLGEVF_RSS_KEY_SIZE); 768a638b1d8SJian Shen } else { 769a638b1d8SJian Shen if (hfunc) 770a638b1d8SJian Shen *hfunc = ETH_RSS_HASH_TOP; 771a638b1d8SJian Shen if (key) { 772a638b1d8SJian Shen ret = hclgevf_get_rss_hash_key(hdev); 773a638b1d8SJian Shen if (ret) 774a638b1d8SJian Shen return ret; 775a638b1d8SJian Shen memcpy(key, rss_cfg->rss_hash_key, 776a638b1d8SJian Shen HCLGEVF_RSS_KEY_SIZE); 777a638b1d8SJian Shen } 778374ad291SJian Shen } 779374ad291SJian Shen 780e2cb1decSSalil Mehta if (indir) 781e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 782e2cb1decSSalil Mehta indir[i] = rss_cfg->rss_indirection_tbl[i]; 783e2cb1decSSalil Mehta 784374ad291SJian Shen return 0; 785e2cb1decSSalil Mehta } 786e2cb1decSSalil Mehta 787e2cb1decSSalil Mehta static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 788e2cb1decSSalil Mehta const u8 *key, const u8 hfunc) 789e2cb1decSSalil Mehta { 790e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 791e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 792374ad291SJian Shen int ret, i; 793374ad291SJian Shen 794374ad291SJian Shen if (handle->pdev->revision >= 0x21) { 795374ad291SJian Shen /* Set the RSS Hash Key if specififed by the user */ 796374ad291SJian Shen if (key) { 797374ad291SJian Shen switch (hfunc) { 798374ad291SJian Shen case ETH_RSS_HASH_TOP: 799374ad291SJian Shen rss_cfg->hash_algo = 800374ad291SJian Shen HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 801374ad291SJian Shen break; 802374ad291SJian Shen case ETH_RSS_HASH_XOR: 803374ad291SJian Shen rss_cfg->hash_algo = 804374ad291SJian Shen HCLGEVF_RSS_HASH_ALGO_SIMPLE; 805374ad291SJian Shen break; 806374ad291SJian Shen case ETH_RSS_HASH_NO_CHANGE: 807374ad291SJian Shen break; 808374ad291SJian Shen default: 809374ad291SJian Shen return -EINVAL; 810374ad291SJian Shen } 811374ad291SJian Shen 812374ad291SJian Shen ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 813374ad291SJian Shen key); 814374ad291SJian Shen if (ret) 815374ad291SJian Shen return ret; 816374ad291SJian Shen 817374ad291SJian Shen /* Update the shadow RSS key with user specified qids */ 818374ad291SJian Shen memcpy(rss_cfg->rss_hash_key, key, 819374ad291SJian Shen HCLGEVF_RSS_KEY_SIZE); 820374ad291SJian Shen } 821374ad291SJian Shen } 822e2cb1decSSalil Mehta 823e2cb1decSSalil Mehta /* update the shadow RSS table with user specified qids */ 824e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 825e2cb1decSSalil Mehta rss_cfg->rss_indirection_tbl[i] = indir[i]; 826e2cb1decSSalil Mehta 827e2cb1decSSalil Mehta /* update the hardware */ 828e2cb1decSSalil Mehta return hclgevf_set_rss_indir_table(hdev); 829e2cb1decSSalil Mehta } 830e2cb1decSSalil Mehta 831d97b3072SJian Shen static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 832d97b3072SJian Shen { 833d97b3072SJian Shen u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 834d97b3072SJian Shen 835d97b3072SJian Shen if (nfc->data & RXH_L4_B_2_3) 836d97b3072SJian Shen hash_sets |= HCLGEVF_D_PORT_BIT; 837d97b3072SJian Shen else 838d97b3072SJian Shen hash_sets &= ~HCLGEVF_D_PORT_BIT; 839d97b3072SJian Shen 840d97b3072SJian Shen if (nfc->data & RXH_IP_SRC) 841d97b3072SJian Shen hash_sets |= HCLGEVF_S_IP_BIT; 842d97b3072SJian Shen else 843d97b3072SJian Shen hash_sets &= ~HCLGEVF_S_IP_BIT; 844d97b3072SJian Shen 845d97b3072SJian Shen if (nfc->data & RXH_IP_DST) 846d97b3072SJian Shen hash_sets |= HCLGEVF_D_IP_BIT; 847d97b3072SJian Shen else 848d97b3072SJian Shen hash_sets &= ~HCLGEVF_D_IP_BIT; 849d97b3072SJian Shen 850d97b3072SJian Shen if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 851d97b3072SJian Shen hash_sets |= HCLGEVF_V_TAG_BIT; 852d97b3072SJian Shen 853d97b3072SJian Shen return hash_sets; 854d97b3072SJian Shen } 855d97b3072SJian Shen 856d97b3072SJian Shen static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 857d97b3072SJian Shen struct ethtool_rxnfc *nfc) 858d97b3072SJian Shen { 859d97b3072SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 860d97b3072SJian Shen struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 861d97b3072SJian Shen struct hclgevf_rss_input_tuple_cmd *req; 862d97b3072SJian Shen struct hclgevf_desc desc; 863d97b3072SJian Shen u8 tuple_sets; 864d97b3072SJian Shen int ret; 865d97b3072SJian Shen 866d97b3072SJian Shen if (handle->pdev->revision == 0x20) 867d97b3072SJian Shen return -EOPNOTSUPP; 868d97b3072SJian Shen 869d97b3072SJian Shen if (nfc->data & 870d97b3072SJian Shen ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 871d97b3072SJian Shen return -EINVAL; 872d97b3072SJian Shen 873d97b3072SJian Shen req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 874d97b3072SJian Shen hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 875d97b3072SJian Shen 876d97b3072SJian Shen req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 877d97b3072SJian Shen req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 878d97b3072SJian Shen req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 879d97b3072SJian Shen req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 880d97b3072SJian Shen req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 881d97b3072SJian Shen req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 882d97b3072SJian Shen req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 883d97b3072SJian Shen req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 884d97b3072SJian Shen 885d97b3072SJian Shen tuple_sets = hclgevf_get_rss_hash_bits(nfc); 886d97b3072SJian Shen switch (nfc->flow_type) { 887d97b3072SJian Shen case TCP_V4_FLOW: 888d97b3072SJian Shen req->ipv4_tcp_en = tuple_sets; 889d97b3072SJian Shen break; 890d97b3072SJian Shen case TCP_V6_FLOW: 891d97b3072SJian Shen req->ipv6_tcp_en = tuple_sets; 892d97b3072SJian Shen break; 893d97b3072SJian Shen case UDP_V4_FLOW: 894d97b3072SJian Shen req->ipv4_udp_en = tuple_sets; 895d97b3072SJian Shen break; 896d97b3072SJian Shen case UDP_V6_FLOW: 897d97b3072SJian Shen req->ipv6_udp_en = tuple_sets; 898d97b3072SJian Shen break; 899d97b3072SJian Shen case SCTP_V4_FLOW: 900d97b3072SJian Shen req->ipv4_sctp_en = tuple_sets; 901d97b3072SJian Shen break; 902d97b3072SJian Shen case SCTP_V6_FLOW: 903d97b3072SJian Shen if ((nfc->data & RXH_L4_B_0_1) || 904d97b3072SJian Shen (nfc->data & RXH_L4_B_2_3)) 905d97b3072SJian Shen return -EINVAL; 906d97b3072SJian Shen 907d97b3072SJian Shen req->ipv6_sctp_en = tuple_sets; 908d97b3072SJian Shen break; 909d97b3072SJian Shen case IPV4_FLOW: 910d97b3072SJian Shen req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 911d97b3072SJian Shen break; 912d97b3072SJian Shen case IPV6_FLOW: 913d97b3072SJian Shen req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 914d97b3072SJian Shen break; 915d97b3072SJian Shen default: 916d97b3072SJian Shen return -EINVAL; 917d97b3072SJian Shen } 918d97b3072SJian Shen 919d97b3072SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 920d97b3072SJian Shen if (ret) { 921d97b3072SJian Shen dev_err(&hdev->pdev->dev, 922d97b3072SJian Shen "Set rss tuple fail, status = %d\n", ret); 923d97b3072SJian Shen return ret; 924d97b3072SJian Shen } 925d97b3072SJian Shen 926d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 927d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 928d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 929d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 930d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 931d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 932d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 933d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 934d97b3072SJian Shen return 0; 935d97b3072SJian Shen } 936d97b3072SJian Shen 937d97b3072SJian Shen static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 938d97b3072SJian Shen struct ethtool_rxnfc *nfc) 939d97b3072SJian Shen { 940d97b3072SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 941d97b3072SJian Shen struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 942d97b3072SJian Shen u8 tuple_sets; 943d97b3072SJian Shen 944d97b3072SJian Shen if (handle->pdev->revision == 0x20) 945d97b3072SJian Shen return -EOPNOTSUPP; 946d97b3072SJian Shen 947d97b3072SJian Shen nfc->data = 0; 948d97b3072SJian Shen 949d97b3072SJian Shen switch (nfc->flow_type) { 950d97b3072SJian Shen case TCP_V4_FLOW: 951d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 952d97b3072SJian Shen break; 953d97b3072SJian Shen case UDP_V4_FLOW: 954d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 955d97b3072SJian Shen break; 956d97b3072SJian Shen case TCP_V6_FLOW: 957d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 958d97b3072SJian Shen break; 959d97b3072SJian Shen case UDP_V6_FLOW: 960d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 961d97b3072SJian Shen break; 962d97b3072SJian Shen case SCTP_V4_FLOW: 963d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 964d97b3072SJian Shen break; 965d97b3072SJian Shen case SCTP_V6_FLOW: 966d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 967d97b3072SJian Shen break; 968d97b3072SJian Shen case IPV4_FLOW: 969d97b3072SJian Shen case IPV6_FLOW: 970d97b3072SJian Shen tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 971d97b3072SJian Shen break; 972d97b3072SJian Shen default: 973d97b3072SJian Shen return -EINVAL; 974d97b3072SJian Shen } 975d97b3072SJian Shen 976d97b3072SJian Shen if (!tuple_sets) 977d97b3072SJian Shen return 0; 978d97b3072SJian Shen 979d97b3072SJian Shen if (tuple_sets & HCLGEVF_D_PORT_BIT) 980d97b3072SJian Shen nfc->data |= RXH_L4_B_2_3; 981d97b3072SJian Shen if (tuple_sets & HCLGEVF_S_PORT_BIT) 982d97b3072SJian Shen nfc->data |= RXH_L4_B_0_1; 983d97b3072SJian Shen if (tuple_sets & HCLGEVF_D_IP_BIT) 984d97b3072SJian Shen nfc->data |= RXH_IP_DST; 985d97b3072SJian Shen if (tuple_sets & HCLGEVF_S_IP_BIT) 986d97b3072SJian Shen nfc->data |= RXH_IP_SRC; 987d97b3072SJian Shen 988d97b3072SJian Shen return 0; 989d97b3072SJian Shen } 990d97b3072SJian Shen 991d97b3072SJian Shen static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 992d97b3072SJian Shen struct hclgevf_rss_cfg *rss_cfg) 993d97b3072SJian Shen { 994d97b3072SJian Shen struct hclgevf_rss_input_tuple_cmd *req; 995d97b3072SJian Shen struct hclgevf_desc desc; 996d97b3072SJian Shen int ret; 997d97b3072SJian Shen 998d97b3072SJian Shen hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 999d97b3072SJian Shen 1000d97b3072SJian Shen req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 1001d97b3072SJian Shen 1002d97b3072SJian Shen req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 1003d97b3072SJian Shen req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 1004d97b3072SJian Shen req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 1005d97b3072SJian Shen req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 1006d97b3072SJian Shen req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 1007d97b3072SJian Shen req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 1008d97b3072SJian Shen req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 1009d97b3072SJian Shen req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 1010d97b3072SJian Shen 1011d97b3072SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1012d97b3072SJian Shen if (ret) 1013d97b3072SJian Shen dev_err(&hdev->pdev->dev, 1014d97b3072SJian Shen "Configure rss input fail, status = %d\n", ret); 1015d97b3072SJian Shen return ret; 1016d97b3072SJian Shen } 1017d97b3072SJian Shen 1018e2cb1decSSalil Mehta static int hclgevf_get_tc_size(struct hnae3_handle *handle) 1019e2cb1decSSalil Mehta { 1020e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1021e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1022e2cb1decSSalil Mehta 1023e2cb1decSSalil Mehta return rss_cfg->rss_size; 1024e2cb1decSSalil Mehta } 1025e2cb1decSSalil Mehta 1026e2cb1decSSalil Mehta static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 1027b204bc74SPeng Li int vector_id, 1028e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 1029e2cb1decSSalil Mehta { 1030e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1031d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1032e2cb1decSSalil Mehta struct hnae3_ring_chain_node *node; 1033e2cb1decSSalil Mehta int status; 1034d3410018SYufeng Mo int i = 0; 1035e2cb1decSSalil Mehta 1036d3410018SYufeng Mo memset(&send_msg, 0, sizeof(send_msg)); 1037d3410018SYufeng Mo send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 1038c09ba484SPeng Li HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1039d3410018SYufeng Mo send_msg.vector_id = vector_id; 1040e2cb1decSSalil Mehta 1041e2cb1decSSalil Mehta for (node = ring_chain; node; node = node->next) { 1042d3410018SYufeng Mo send_msg.param[i].ring_type = 1043e4e87715SPeng Li hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1044d3410018SYufeng Mo 1045d3410018SYufeng Mo send_msg.param[i].tqp_index = node->tqp_index; 1046d3410018SYufeng Mo send_msg.param[i].int_gl_index = 1047d3410018SYufeng Mo hnae3_get_field(node->int_gl_idx, 104879eee410SFuyun Liang HNAE3_RING_GL_IDX_M, 104979eee410SFuyun Liang HNAE3_RING_GL_IDX_S); 105079eee410SFuyun Liang 10515d02a58dSYunsheng Lin i++; 1052d3410018SYufeng Mo if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 1053d3410018SYufeng Mo send_msg.ring_num = i; 1054e2cb1decSSalil Mehta 1055d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 1056d3410018SYufeng Mo NULL, 0); 1057e2cb1decSSalil Mehta if (status) { 1058e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 1059e2cb1decSSalil Mehta "Map TQP fail, status is %d.\n", 1060e2cb1decSSalil Mehta status); 1061e2cb1decSSalil Mehta return status; 1062e2cb1decSSalil Mehta } 1063e2cb1decSSalil Mehta i = 0; 1064e2cb1decSSalil Mehta } 1065e2cb1decSSalil Mehta } 1066e2cb1decSSalil Mehta 1067e2cb1decSSalil Mehta return 0; 1068e2cb1decSSalil Mehta } 1069e2cb1decSSalil Mehta 1070e2cb1decSSalil Mehta static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1071e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 1072e2cb1decSSalil Mehta { 1073b204bc74SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1074b204bc74SPeng Li int vector_id; 1075b204bc74SPeng Li 1076b204bc74SPeng Li vector_id = hclgevf_get_vector_index(hdev, vector); 1077b204bc74SPeng Li if (vector_id < 0) { 1078b204bc74SPeng Li dev_err(&handle->pdev->dev, 1079b204bc74SPeng Li "Get vector index fail. ret =%d\n", vector_id); 1080b204bc74SPeng Li return vector_id; 1081b204bc74SPeng Li } 1082b204bc74SPeng Li 1083b204bc74SPeng Li return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1084e2cb1decSSalil Mehta } 1085e2cb1decSSalil Mehta 1086e2cb1decSSalil Mehta static int hclgevf_unmap_ring_from_vector( 1087e2cb1decSSalil Mehta struct hnae3_handle *handle, 1088e2cb1decSSalil Mehta int vector, 1089e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 1090e2cb1decSSalil Mehta { 1091e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1092e2cb1decSSalil Mehta int ret, vector_id; 1093e2cb1decSSalil Mehta 1094dea846e8SHuazhong Tan if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1095dea846e8SHuazhong Tan return 0; 1096dea846e8SHuazhong Tan 1097e2cb1decSSalil Mehta vector_id = hclgevf_get_vector_index(hdev, vector); 1098e2cb1decSSalil Mehta if (vector_id < 0) { 1099e2cb1decSSalil Mehta dev_err(&handle->pdev->dev, 1100e2cb1decSSalil Mehta "Get vector index fail. ret =%d\n", vector_id); 1101e2cb1decSSalil Mehta return vector_id; 1102e2cb1decSSalil Mehta } 1103e2cb1decSSalil Mehta 1104b204bc74SPeng Li ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 11050d3e6631SYunsheng Lin if (ret) 1106e2cb1decSSalil Mehta dev_err(&handle->pdev->dev, 1107e2cb1decSSalil Mehta "Unmap ring from vector fail. vector=%d, ret =%d\n", 1108e2cb1decSSalil Mehta vector_id, 1109e2cb1decSSalil Mehta ret); 11100d3e6631SYunsheng Lin 1111e2cb1decSSalil Mehta return ret; 1112e2cb1decSSalil Mehta } 1113e2cb1decSSalil Mehta 11140d3e6631SYunsheng Lin static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 11150d3e6631SYunsheng Lin { 11160d3e6631SYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 111703718db9SYunsheng Lin int vector_id; 11180d3e6631SYunsheng Lin 111903718db9SYunsheng Lin vector_id = hclgevf_get_vector_index(hdev, vector); 112003718db9SYunsheng Lin if (vector_id < 0) { 112103718db9SYunsheng Lin dev_err(&handle->pdev->dev, 112203718db9SYunsheng Lin "hclgevf_put_vector get vector index fail. ret =%d\n", 112303718db9SYunsheng Lin vector_id); 112403718db9SYunsheng Lin return vector_id; 112503718db9SYunsheng Lin } 112603718db9SYunsheng Lin 112703718db9SYunsheng Lin hclgevf_free_vector(hdev, vector_id); 1128e2cb1decSSalil Mehta 1129e2cb1decSSalil Mehta return 0; 1130e2cb1decSSalil Mehta } 1131e2cb1decSSalil Mehta 11323b75c3dfSPeng Li static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1133e196ec75SJian Shen bool en_uc_pmc, bool en_mc_pmc, 1134f01f5559SJian Shen bool en_bc_pmc) 1135e2cb1decSSalil Mehta { 1136d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1137f01f5559SJian Shen int ret; 1138e2cb1decSSalil Mehta 1139d3410018SYufeng Mo memset(&send_msg, 0, sizeof(send_msg)); 1140d3410018SYufeng Mo send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 1141d3410018SYufeng Mo send_msg.en_bc = en_bc_pmc ? 1 : 0; 1142d3410018SYufeng Mo send_msg.en_uc = en_uc_pmc ? 1 : 0; 1143d3410018SYufeng Mo send_msg.en_mc = en_mc_pmc ? 1 : 0; 1144e2cb1decSSalil Mehta 1145d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1146d3410018SYufeng Mo 1147f01f5559SJian Shen if (ret) 1148e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 1149f01f5559SJian Shen "Set promisc mode fail, status is %d.\n", ret); 1150e2cb1decSSalil Mehta 1151f01f5559SJian Shen return ret; 1152e2cb1decSSalil Mehta } 1153e2cb1decSSalil Mehta 1154e196ec75SJian Shen static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 1155e196ec75SJian Shen bool en_mc_pmc) 1156e2cb1decSSalil Mehta { 1157e196ec75SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1158e196ec75SJian Shen struct pci_dev *pdev = hdev->pdev; 1159e196ec75SJian Shen bool en_bc_pmc; 1160e196ec75SJian Shen 1161e196ec75SJian Shen en_bc_pmc = pdev->revision != 0x20; 1162e196ec75SJian Shen 1163e196ec75SJian Shen return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 1164e196ec75SJian Shen en_bc_pmc); 1165e2cb1decSSalil Mehta } 1166e2cb1decSSalil Mehta 1167c631c696SJian Shen static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) 1168c631c696SJian Shen { 1169c631c696SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1170c631c696SJian Shen 1171c631c696SJian Shen set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1172c631c696SJian Shen } 1173c631c696SJian Shen 1174c631c696SJian Shen static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) 1175c631c696SJian Shen { 1176c631c696SJian Shen struct hnae3_handle *handle = &hdev->nic; 1177c631c696SJian Shen bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; 1178c631c696SJian Shen bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; 1179c631c696SJian Shen int ret; 1180c631c696SJian Shen 1181c631c696SJian Shen if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { 1182c631c696SJian Shen ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); 1183c631c696SJian Shen if (!ret) 1184c631c696SJian Shen clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1185c631c696SJian Shen } 1186c631c696SJian Shen } 1187c631c696SJian Shen 1188ebaf1908SWeihang Li static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, 1189e2cb1decSSalil Mehta int stream_id, bool enable) 1190e2cb1decSSalil Mehta { 1191e2cb1decSSalil Mehta struct hclgevf_cfg_com_tqp_queue_cmd *req; 1192e2cb1decSSalil Mehta struct hclgevf_desc desc; 1193e2cb1decSSalil Mehta int status; 1194e2cb1decSSalil Mehta 1195e2cb1decSSalil Mehta req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1196e2cb1decSSalil Mehta 1197e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1198e2cb1decSSalil Mehta false); 1199e2cb1decSSalil Mehta req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1200e2cb1decSSalil Mehta req->stream_id = cpu_to_le16(stream_id); 1201ebaf1908SWeihang Li if (enable) 1202ebaf1908SWeihang Li req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1203e2cb1decSSalil Mehta 1204e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1205e2cb1decSSalil Mehta if (status) 1206e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 1207e2cb1decSSalil Mehta "TQP enable fail, status =%d.\n", status); 1208e2cb1decSSalil Mehta 1209e2cb1decSSalil Mehta return status; 1210e2cb1decSSalil Mehta } 1211e2cb1decSSalil Mehta 1212e2cb1decSSalil Mehta static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1213e2cb1decSSalil Mehta { 1214b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1215e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 1216e2cb1decSSalil Mehta int i; 1217e2cb1decSSalil Mehta 1218b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 1219b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1220e2cb1decSSalil Mehta memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1221e2cb1decSSalil Mehta } 1222e2cb1decSSalil Mehta } 1223e2cb1decSSalil Mehta 12248e6de441SHuazhong Tan static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 12258e6de441SHuazhong Tan { 1226d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 12278e6de441SHuazhong Tan u8 host_mac[ETH_ALEN]; 12288e6de441SHuazhong Tan int status; 12298e6de441SHuazhong Tan 1230d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 1231d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 1232d3410018SYufeng Mo ETH_ALEN); 12338e6de441SHuazhong Tan if (status) { 12348e6de441SHuazhong Tan dev_err(&hdev->pdev->dev, 12358e6de441SHuazhong Tan "fail to get VF MAC from host %d", status); 12368e6de441SHuazhong Tan return status; 12378e6de441SHuazhong Tan } 12388e6de441SHuazhong Tan 12398e6de441SHuazhong Tan ether_addr_copy(p, host_mac); 12408e6de441SHuazhong Tan 12418e6de441SHuazhong Tan return 0; 12428e6de441SHuazhong Tan } 12438e6de441SHuazhong Tan 1244e2cb1decSSalil Mehta static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1245e2cb1decSSalil Mehta { 1246e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 12478e6de441SHuazhong Tan u8 host_mac_addr[ETH_ALEN]; 1248e2cb1decSSalil Mehta 12498e6de441SHuazhong Tan if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 12508e6de441SHuazhong Tan return; 12518e6de441SHuazhong Tan 12528e6de441SHuazhong Tan hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 12538e6de441SHuazhong Tan if (hdev->has_pf_mac) 12548e6de441SHuazhong Tan ether_addr_copy(p, host_mac_addr); 12558e6de441SHuazhong Tan else 1256e2cb1decSSalil Mehta ether_addr_copy(p, hdev->hw.mac.mac_addr); 1257e2cb1decSSalil Mehta } 1258e2cb1decSSalil Mehta 125959098055SFuyun Liang static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 126059098055SFuyun Liang bool is_first) 1261e2cb1decSSalil Mehta { 1262e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1263e2cb1decSSalil Mehta u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1264d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1265e2cb1decSSalil Mehta u8 *new_mac_addr = (u8 *)p; 1266e2cb1decSSalil Mehta int status; 1267e2cb1decSSalil Mehta 1268d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 1269ee4bcd3bSJian Shen send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1270d3410018SYufeng Mo ether_addr_copy(send_msg.data, new_mac_addr); 1271ee4bcd3bSJian Shen if (is_first && !hdev->has_pf_mac) 1272ee4bcd3bSJian Shen eth_zero_addr(&send_msg.data[ETH_ALEN]); 1273ee4bcd3bSJian Shen else 1274d3410018SYufeng Mo ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 1275d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1276e2cb1decSSalil Mehta if (!status) 1277e2cb1decSSalil Mehta ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1278e2cb1decSSalil Mehta 1279e2cb1decSSalil Mehta return status; 1280e2cb1decSSalil Mehta } 1281e2cb1decSSalil Mehta 1282ee4bcd3bSJian Shen static struct hclgevf_mac_addr_node * 1283ee4bcd3bSJian Shen hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) 1284ee4bcd3bSJian Shen { 1285ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp; 1286ee4bcd3bSJian Shen 1287ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, list, node) 1288ee4bcd3bSJian Shen if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 1289ee4bcd3bSJian Shen return mac_node; 1290ee4bcd3bSJian Shen 1291ee4bcd3bSJian Shen return NULL; 1292ee4bcd3bSJian Shen } 1293ee4bcd3bSJian Shen 1294ee4bcd3bSJian Shen static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, 1295ee4bcd3bSJian Shen enum HCLGEVF_MAC_NODE_STATE state) 1296ee4bcd3bSJian Shen { 1297ee4bcd3bSJian Shen switch (state) { 1298ee4bcd3bSJian Shen /* from set_rx_mode or tmp_add_list */ 1299ee4bcd3bSJian Shen case HCLGEVF_MAC_TO_ADD: 1300ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_DEL) 1301ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_ACTIVE; 1302ee4bcd3bSJian Shen break; 1303ee4bcd3bSJian Shen /* only from set_rx_mode */ 1304ee4bcd3bSJian Shen case HCLGEVF_MAC_TO_DEL: 1305ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1306ee4bcd3bSJian Shen list_del(&mac_node->node); 1307ee4bcd3bSJian Shen kfree(mac_node); 1308ee4bcd3bSJian Shen } else { 1309ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_TO_DEL; 1310ee4bcd3bSJian Shen } 1311ee4bcd3bSJian Shen break; 1312ee4bcd3bSJian Shen /* only from tmp_add_list, the mac_node->state won't be 1313ee4bcd3bSJian Shen * HCLGEVF_MAC_ACTIVE 1314ee4bcd3bSJian Shen */ 1315ee4bcd3bSJian Shen case HCLGEVF_MAC_ACTIVE: 1316ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1317ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_ACTIVE; 1318ee4bcd3bSJian Shen break; 1319ee4bcd3bSJian Shen } 1320ee4bcd3bSJian Shen } 1321ee4bcd3bSJian Shen 1322ee4bcd3bSJian Shen static int hclgevf_update_mac_list(struct hnae3_handle *handle, 1323ee4bcd3bSJian Shen enum HCLGEVF_MAC_NODE_STATE state, 1324ee4bcd3bSJian Shen enum HCLGEVF_MAC_ADDR_TYPE mac_type, 1325e2cb1decSSalil Mehta const unsigned char *addr) 1326e2cb1decSSalil Mehta { 1327e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1328ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node; 1329ee4bcd3bSJian Shen struct list_head *list; 1330e2cb1decSSalil Mehta 1331ee4bcd3bSJian Shen list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1332ee4bcd3bSJian Shen &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1333ee4bcd3bSJian Shen 1334ee4bcd3bSJian Shen spin_lock_bh(&hdev->mac_table.mac_list_lock); 1335ee4bcd3bSJian Shen 1336ee4bcd3bSJian Shen /* if the mac addr is already in the mac list, no need to add a new 1337ee4bcd3bSJian Shen * one into it, just check the mac addr state, convert it to a new 1338ee4bcd3bSJian Shen * new state, or just remove it, or do nothing. 1339ee4bcd3bSJian Shen */ 1340ee4bcd3bSJian Shen mac_node = hclgevf_find_mac_node(list, addr); 1341ee4bcd3bSJian Shen if (mac_node) { 1342ee4bcd3bSJian Shen hclgevf_update_mac_node(mac_node, state); 1343ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1344ee4bcd3bSJian Shen return 0; 1345ee4bcd3bSJian Shen } 1346ee4bcd3bSJian Shen /* if this address is never added, unnecessary to delete */ 1347ee4bcd3bSJian Shen if (state == HCLGEVF_MAC_TO_DEL) { 1348ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1349ee4bcd3bSJian Shen return -ENOENT; 1350ee4bcd3bSJian Shen } 1351ee4bcd3bSJian Shen 1352ee4bcd3bSJian Shen mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 1353ee4bcd3bSJian Shen if (!mac_node) { 1354ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1355ee4bcd3bSJian Shen return -ENOMEM; 1356ee4bcd3bSJian Shen } 1357ee4bcd3bSJian Shen 1358ee4bcd3bSJian Shen mac_node->state = state; 1359ee4bcd3bSJian Shen ether_addr_copy(mac_node->mac_addr, addr); 1360ee4bcd3bSJian Shen list_add_tail(&mac_node->node, list); 1361ee4bcd3bSJian Shen 1362ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1363ee4bcd3bSJian Shen return 0; 1364ee4bcd3bSJian Shen } 1365ee4bcd3bSJian Shen 1366ee4bcd3bSJian Shen static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1367ee4bcd3bSJian Shen const unsigned char *addr) 1368ee4bcd3bSJian Shen { 1369ee4bcd3bSJian Shen return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1370ee4bcd3bSJian Shen HCLGEVF_MAC_ADDR_UC, addr); 1371e2cb1decSSalil Mehta } 1372e2cb1decSSalil Mehta 1373e2cb1decSSalil Mehta static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1374e2cb1decSSalil Mehta const unsigned char *addr) 1375e2cb1decSSalil Mehta { 1376ee4bcd3bSJian Shen return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1377ee4bcd3bSJian Shen HCLGEVF_MAC_ADDR_UC, addr); 1378e2cb1decSSalil Mehta } 1379e2cb1decSSalil Mehta 1380e2cb1decSSalil Mehta static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1381e2cb1decSSalil Mehta const unsigned char *addr) 1382e2cb1decSSalil Mehta { 1383ee4bcd3bSJian Shen return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1384ee4bcd3bSJian Shen HCLGEVF_MAC_ADDR_MC, addr); 1385e2cb1decSSalil Mehta } 1386e2cb1decSSalil Mehta 1387e2cb1decSSalil Mehta static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1388e2cb1decSSalil Mehta const unsigned char *addr) 1389e2cb1decSSalil Mehta { 1390ee4bcd3bSJian Shen return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1391ee4bcd3bSJian Shen HCLGEVF_MAC_ADDR_MC, addr); 1392ee4bcd3bSJian Shen } 1393e2cb1decSSalil Mehta 1394ee4bcd3bSJian Shen static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, 1395ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, 1396ee4bcd3bSJian Shen enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1397ee4bcd3bSJian Shen { 1398ee4bcd3bSJian Shen struct hclge_vf_to_pf_msg send_msg; 1399ee4bcd3bSJian Shen u8 code, subcode; 1400ee4bcd3bSJian Shen 1401ee4bcd3bSJian Shen if (mac_type == HCLGEVF_MAC_ADDR_UC) { 1402ee4bcd3bSJian Shen code = HCLGE_MBX_SET_UNICAST; 1403ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1404ee4bcd3bSJian Shen subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; 1405ee4bcd3bSJian Shen else 1406ee4bcd3bSJian Shen subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; 1407ee4bcd3bSJian Shen } else { 1408ee4bcd3bSJian Shen code = HCLGE_MBX_SET_MULTICAST; 1409ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1410ee4bcd3bSJian Shen subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; 1411ee4bcd3bSJian Shen else 1412ee4bcd3bSJian Shen subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; 1413ee4bcd3bSJian Shen } 1414ee4bcd3bSJian Shen 1415ee4bcd3bSJian Shen hclgevf_build_send_msg(&send_msg, code, subcode); 1416ee4bcd3bSJian Shen ether_addr_copy(send_msg.data, mac_node->mac_addr); 1417d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1418e2cb1decSSalil Mehta } 1419e2cb1decSSalil Mehta 1420ee4bcd3bSJian Shen static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, 1421ee4bcd3bSJian Shen struct list_head *list, 1422ee4bcd3bSJian Shen enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1423ee4bcd3bSJian Shen { 1424ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp; 1425ee4bcd3bSJian Shen int ret; 1426ee4bcd3bSJian Shen 1427ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, list, node) { 1428ee4bcd3bSJian Shen ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); 1429ee4bcd3bSJian Shen if (ret) { 1430ee4bcd3bSJian Shen dev_err(&hdev->pdev->dev, 1431ee4bcd3bSJian Shen "failed to configure mac %pM, state = %d, ret = %d\n", 1432ee4bcd3bSJian Shen mac_node->mac_addr, mac_node->state, ret); 1433ee4bcd3bSJian Shen return; 1434ee4bcd3bSJian Shen } 1435ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1436ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_ACTIVE; 1437ee4bcd3bSJian Shen } else { 1438ee4bcd3bSJian Shen list_del(&mac_node->node); 1439ee4bcd3bSJian Shen kfree(mac_node); 1440ee4bcd3bSJian Shen } 1441ee4bcd3bSJian Shen } 1442ee4bcd3bSJian Shen } 1443ee4bcd3bSJian Shen 1444ee4bcd3bSJian Shen static void hclgevf_sync_from_add_list(struct list_head *add_list, 1445ee4bcd3bSJian Shen struct list_head *mac_list) 1446ee4bcd3bSJian Shen { 1447ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1448ee4bcd3bSJian Shen 1449ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, add_list, node) { 1450ee4bcd3bSJian Shen /* if the mac address from tmp_add_list is not in the 1451ee4bcd3bSJian Shen * uc/mc_mac_list, it means have received a TO_DEL request 1452ee4bcd3bSJian Shen * during the time window of sending mac config request to PF 1453ee4bcd3bSJian Shen * If mac_node state is ACTIVE, then change its state to TO_DEL, 1454ee4bcd3bSJian Shen * then it will be removed at next time. If is TO_ADD, it means 1455ee4bcd3bSJian Shen * send TO_ADD request failed, so just remove the mac node. 1456ee4bcd3bSJian Shen */ 1457ee4bcd3bSJian Shen new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1458ee4bcd3bSJian Shen if (new_node) { 1459ee4bcd3bSJian Shen hclgevf_update_mac_node(new_node, mac_node->state); 1460ee4bcd3bSJian Shen list_del(&mac_node->node); 1461ee4bcd3bSJian Shen kfree(mac_node); 1462ee4bcd3bSJian Shen } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { 1463ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_TO_DEL; 1464ee4bcd3bSJian Shen list_del(&mac_node->node); 1465ee4bcd3bSJian Shen list_add_tail(&mac_node->node, mac_list); 1466ee4bcd3bSJian Shen } else { 1467ee4bcd3bSJian Shen list_del(&mac_node->node); 1468ee4bcd3bSJian Shen kfree(mac_node); 1469ee4bcd3bSJian Shen } 1470ee4bcd3bSJian Shen } 1471ee4bcd3bSJian Shen } 1472ee4bcd3bSJian Shen 1473ee4bcd3bSJian Shen static void hclgevf_sync_from_del_list(struct list_head *del_list, 1474ee4bcd3bSJian Shen struct list_head *mac_list) 1475ee4bcd3bSJian Shen { 1476ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1477ee4bcd3bSJian Shen 1478ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, del_list, node) { 1479ee4bcd3bSJian Shen new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1480ee4bcd3bSJian Shen if (new_node) { 1481ee4bcd3bSJian Shen /* If the mac addr is exist in the mac list, it means 1482ee4bcd3bSJian Shen * received a new request TO_ADD during the time window 1483ee4bcd3bSJian Shen * of sending mac addr configurrequest to PF, so just 1484ee4bcd3bSJian Shen * change the mac state to ACTIVE. 1485ee4bcd3bSJian Shen */ 1486ee4bcd3bSJian Shen new_node->state = HCLGEVF_MAC_ACTIVE; 1487ee4bcd3bSJian Shen list_del(&mac_node->node); 1488ee4bcd3bSJian Shen kfree(mac_node); 1489ee4bcd3bSJian Shen } else { 1490ee4bcd3bSJian Shen list_del(&mac_node->node); 1491ee4bcd3bSJian Shen list_add_tail(&mac_node->node, mac_list); 1492ee4bcd3bSJian Shen } 1493ee4bcd3bSJian Shen } 1494ee4bcd3bSJian Shen } 1495ee4bcd3bSJian Shen 1496ee4bcd3bSJian Shen static void hclgevf_clear_list(struct list_head *list) 1497ee4bcd3bSJian Shen { 1498ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp; 1499ee4bcd3bSJian Shen 1500ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, list, node) { 1501ee4bcd3bSJian Shen list_del(&mac_node->node); 1502ee4bcd3bSJian Shen kfree(mac_node); 1503ee4bcd3bSJian Shen } 1504ee4bcd3bSJian Shen } 1505ee4bcd3bSJian Shen 1506ee4bcd3bSJian Shen static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, 1507ee4bcd3bSJian Shen enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1508ee4bcd3bSJian Shen { 1509ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1510ee4bcd3bSJian Shen struct list_head tmp_add_list, tmp_del_list; 1511ee4bcd3bSJian Shen struct list_head *list; 1512ee4bcd3bSJian Shen 1513ee4bcd3bSJian Shen INIT_LIST_HEAD(&tmp_add_list); 1514ee4bcd3bSJian Shen INIT_LIST_HEAD(&tmp_del_list); 1515ee4bcd3bSJian Shen 1516ee4bcd3bSJian Shen /* move the mac addr to the tmp_add_list and tmp_del_list, then 1517ee4bcd3bSJian Shen * we can add/delete these mac addr outside the spin lock 1518ee4bcd3bSJian Shen */ 1519ee4bcd3bSJian Shen list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1520ee4bcd3bSJian Shen &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1521ee4bcd3bSJian Shen 1522ee4bcd3bSJian Shen spin_lock_bh(&hdev->mac_table.mac_list_lock); 1523ee4bcd3bSJian Shen 1524ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, list, node) { 1525ee4bcd3bSJian Shen switch (mac_node->state) { 1526ee4bcd3bSJian Shen case HCLGEVF_MAC_TO_DEL: 1527ee4bcd3bSJian Shen list_del(&mac_node->node); 1528ee4bcd3bSJian Shen list_add_tail(&mac_node->node, &tmp_del_list); 1529ee4bcd3bSJian Shen break; 1530ee4bcd3bSJian Shen case HCLGEVF_MAC_TO_ADD: 1531ee4bcd3bSJian Shen new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 1532ee4bcd3bSJian Shen if (!new_node) 1533ee4bcd3bSJian Shen goto stop_traverse; 1534ee4bcd3bSJian Shen 1535ee4bcd3bSJian Shen ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 1536ee4bcd3bSJian Shen new_node->state = mac_node->state; 1537ee4bcd3bSJian Shen list_add_tail(&new_node->node, &tmp_add_list); 1538ee4bcd3bSJian Shen break; 1539ee4bcd3bSJian Shen default: 1540ee4bcd3bSJian Shen break; 1541ee4bcd3bSJian Shen } 1542ee4bcd3bSJian Shen } 1543ee4bcd3bSJian Shen 1544ee4bcd3bSJian Shen stop_traverse: 1545ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1546ee4bcd3bSJian Shen 1547ee4bcd3bSJian Shen /* delete first, in order to get max mac table space for adding */ 1548ee4bcd3bSJian Shen hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); 1549ee4bcd3bSJian Shen hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); 1550ee4bcd3bSJian Shen 1551ee4bcd3bSJian Shen /* if some mac addresses were added/deleted fail, move back to the 1552ee4bcd3bSJian Shen * mac_list, and retry at next time. 1553ee4bcd3bSJian Shen */ 1554ee4bcd3bSJian Shen spin_lock_bh(&hdev->mac_table.mac_list_lock); 1555ee4bcd3bSJian Shen 1556ee4bcd3bSJian Shen hclgevf_sync_from_del_list(&tmp_del_list, list); 1557ee4bcd3bSJian Shen hclgevf_sync_from_add_list(&tmp_add_list, list); 1558ee4bcd3bSJian Shen 1559ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1560ee4bcd3bSJian Shen } 1561ee4bcd3bSJian Shen 1562ee4bcd3bSJian Shen static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) 1563ee4bcd3bSJian Shen { 1564ee4bcd3bSJian Shen hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); 1565ee4bcd3bSJian Shen hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); 1566ee4bcd3bSJian Shen } 1567ee4bcd3bSJian Shen 1568ee4bcd3bSJian Shen static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) 1569ee4bcd3bSJian Shen { 1570ee4bcd3bSJian Shen spin_lock_bh(&hdev->mac_table.mac_list_lock); 1571ee4bcd3bSJian Shen 1572ee4bcd3bSJian Shen hclgevf_clear_list(&hdev->mac_table.uc_mac_list); 1573ee4bcd3bSJian Shen hclgevf_clear_list(&hdev->mac_table.mc_mac_list); 1574ee4bcd3bSJian Shen 1575ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1576ee4bcd3bSJian Shen } 1577ee4bcd3bSJian Shen 1578e2cb1decSSalil Mehta static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1579e2cb1decSSalil Mehta __be16 proto, u16 vlan_id, 1580e2cb1decSSalil Mehta bool is_kill) 1581e2cb1decSSalil Mehta { 1582d3410018SYufeng Mo #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 1583d3410018SYufeng Mo #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 1584d3410018SYufeng Mo #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 1585d3410018SYufeng Mo 1586e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1587d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1588fe4144d4SJian Shen int ret; 1589e2cb1decSSalil Mehta 1590b37ce587SYufeng Mo if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1591e2cb1decSSalil Mehta return -EINVAL; 1592e2cb1decSSalil Mehta 1593e2cb1decSSalil Mehta if (proto != htons(ETH_P_8021Q)) 1594e2cb1decSSalil Mehta return -EPROTONOSUPPORT; 1595e2cb1decSSalil Mehta 1596fe4144d4SJian Shen /* When device is resetting, firmware is unable to handle 1597fe4144d4SJian Shen * mailbox. Just record the vlan id, and remove it after 1598fe4144d4SJian Shen * reset finished. 1599fe4144d4SJian Shen */ 1600fe4144d4SJian Shen if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) { 1601fe4144d4SJian Shen set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1602fe4144d4SJian Shen return -EBUSY; 1603fe4144d4SJian Shen } 1604fe4144d4SJian Shen 1605d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1606d3410018SYufeng Mo HCLGE_MBX_VLAN_FILTER); 1607d3410018SYufeng Mo send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; 1608d3410018SYufeng Mo memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, 1609d3410018SYufeng Mo sizeof(vlan_id)); 1610d3410018SYufeng Mo memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, 1611d3410018SYufeng Mo sizeof(proto)); 161246ee7350SGuojia Liao /* when remove hw vlan filter failed, record the vlan id, 1613fe4144d4SJian Shen * and try to remove it from hw later, to be consistence 1614fe4144d4SJian Shen * with stack. 1615fe4144d4SJian Shen */ 1616d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1617fe4144d4SJian Shen if (is_kill && ret) 1618fe4144d4SJian Shen set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1619fe4144d4SJian Shen 1620fe4144d4SJian Shen return ret; 1621fe4144d4SJian Shen } 1622fe4144d4SJian Shen 1623fe4144d4SJian Shen static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1624fe4144d4SJian Shen { 1625fe4144d4SJian Shen #define HCLGEVF_MAX_SYNC_COUNT 60 1626fe4144d4SJian Shen struct hnae3_handle *handle = &hdev->nic; 1627fe4144d4SJian Shen int ret, sync_cnt = 0; 1628fe4144d4SJian Shen u16 vlan_id; 1629fe4144d4SJian Shen 1630fe4144d4SJian Shen vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1631fe4144d4SJian Shen while (vlan_id != VLAN_N_VID) { 1632fe4144d4SJian Shen ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1633fe4144d4SJian Shen vlan_id, true); 1634fe4144d4SJian Shen if (ret) 1635fe4144d4SJian Shen return; 1636fe4144d4SJian Shen 1637fe4144d4SJian Shen clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1638fe4144d4SJian Shen sync_cnt++; 1639fe4144d4SJian Shen if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1640fe4144d4SJian Shen return; 1641fe4144d4SJian Shen 1642fe4144d4SJian Shen vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1643fe4144d4SJian Shen } 1644e2cb1decSSalil Mehta } 1645e2cb1decSSalil Mehta 1646b2641e2aSYunsheng Lin static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1647b2641e2aSYunsheng Lin { 1648b2641e2aSYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1649d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1650b2641e2aSYunsheng Lin 1651d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1652d3410018SYufeng Mo HCLGE_MBX_VLAN_RX_OFF_CFG); 1653d3410018SYufeng Mo send_msg.data[0] = enable ? 1 : 0; 1654d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1655b2641e2aSYunsheng Lin } 1656b2641e2aSYunsheng Lin 16577fa6be4fSHuazhong Tan static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1658e2cb1decSSalil Mehta { 1659e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1660d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 16611a426f8bSPeng Li int ret; 1662e2cb1decSSalil Mehta 16631a426f8bSPeng Li /* disable vf queue before send queue reset msg to PF */ 16641a426f8bSPeng Li ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 16651a426f8bSPeng Li if (ret) 16667fa6be4fSHuazhong Tan return ret; 16671a426f8bSPeng Li 1668d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1669d3410018SYufeng Mo memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 1670d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1671e2cb1decSSalil Mehta } 1672e2cb1decSSalil Mehta 1673818f1675SYunsheng Lin static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1674818f1675SYunsheng Lin { 1675818f1675SYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1676d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1677818f1675SYunsheng Lin 1678d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1679d3410018SYufeng Mo memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); 1680d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1681818f1675SYunsheng Lin } 1682818f1675SYunsheng Lin 16836988eb2aSSalil Mehta static int hclgevf_notify_client(struct hclgevf_dev *hdev, 16846988eb2aSSalil Mehta enum hnae3_reset_notify_type type) 16856988eb2aSSalil Mehta { 16866988eb2aSSalil Mehta struct hnae3_client *client = hdev->nic_client; 16876988eb2aSSalil Mehta struct hnae3_handle *handle = &hdev->nic; 16886a5f6fa3SHuazhong Tan int ret; 16896988eb2aSSalil Mehta 169025d1817cSHuazhong Tan if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 169125d1817cSHuazhong Tan !client) 169225d1817cSHuazhong Tan return 0; 169325d1817cSHuazhong Tan 16946988eb2aSSalil Mehta if (!client->ops->reset_notify) 16956988eb2aSSalil Mehta return -EOPNOTSUPP; 16966988eb2aSSalil Mehta 16976a5f6fa3SHuazhong Tan ret = client->ops->reset_notify(handle, type); 16986a5f6fa3SHuazhong Tan if (ret) 16996a5f6fa3SHuazhong Tan dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 17006a5f6fa3SHuazhong Tan type, ret); 17016a5f6fa3SHuazhong Tan 17026a5f6fa3SHuazhong Tan return ret; 17036988eb2aSSalil Mehta } 17046988eb2aSSalil Mehta 17056988eb2aSSalil Mehta static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 17066988eb2aSSalil Mehta { 1707aa5c4f17SHuazhong Tan #define HCLGEVF_RESET_WAIT_US 20000 1708aa5c4f17SHuazhong Tan #define HCLGEVF_RESET_WAIT_CNT 2000 1709aa5c4f17SHuazhong Tan #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1710aa5c4f17SHuazhong Tan (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1711aa5c4f17SHuazhong Tan 1712aa5c4f17SHuazhong Tan u32 val; 1713aa5c4f17SHuazhong Tan int ret; 17146988eb2aSSalil Mehta 1715f28368bbSHuazhong Tan if (hdev->reset_type == HNAE3_VF_RESET) 171672e2fb07SHuazhong Tan ret = readl_poll_timeout(hdev->hw.io_base + 171772e2fb07SHuazhong Tan HCLGEVF_VF_RST_ING, val, 171872e2fb07SHuazhong Tan !(val & HCLGEVF_VF_RST_ING_BIT), 171972e2fb07SHuazhong Tan HCLGEVF_RESET_WAIT_US, 172072e2fb07SHuazhong Tan HCLGEVF_RESET_WAIT_TIMEOUT_US); 172172e2fb07SHuazhong Tan else 172272e2fb07SHuazhong Tan ret = readl_poll_timeout(hdev->hw.io_base + 172372e2fb07SHuazhong Tan HCLGEVF_RST_ING, val, 1724aa5c4f17SHuazhong Tan !(val & HCLGEVF_RST_ING_BITS), 1725aa5c4f17SHuazhong Tan HCLGEVF_RESET_WAIT_US, 1726aa5c4f17SHuazhong Tan HCLGEVF_RESET_WAIT_TIMEOUT_US); 17276988eb2aSSalil Mehta 17286988eb2aSSalil Mehta /* hardware completion status should be available by this time */ 1729aa5c4f17SHuazhong Tan if (ret) { 1730aa5c4f17SHuazhong Tan dev_err(&hdev->pdev->dev, 17316988eb2aSSalil Mehta "could'nt get reset done status from h/w, timeout!\n"); 1732aa5c4f17SHuazhong Tan return ret; 17336988eb2aSSalil Mehta } 17346988eb2aSSalil Mehta 17356988eb2aSSalil Mehta /* we will wait a bit more to let reset of the stack to complete. This 17366988eb2aSSalil Mehta * might happen in case reset assertion was made by PF. Yes, this also 17376988eb2aSSalil Mehta * means we might end up waiting bit more even for VF reset. 17386988eb2aSSalil Mehta */ 17396988eb2aSSalil Mehta msleep(5000); 17406988eb2aSSalil Mehta 17416988eb2aSSalil Mehta return 0; 17426988eb2aSSalil Mehta } 17436988eb2aSSalil Mehta 17446b428b4fSHuazhong Tan static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 17456b428b4fSHuazhong Tan { 17466b428b4fSHuazhong Tan u32 reg_val; 17476b428b4fSHuazhong Tan 17486b428b4fSHuazhong Tan reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); 17496b428b4fSHuazhong Tan if (enable) 17506b428b4fSHuazhong Tan reg_val |= HCLGEVF_NIC_SW_RST_RDY; 17516b428b4fSHuazhong Tan else 17526b428b4fSHuazhong Tan reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 17536b428b4fSHuazhong Tan 17546b428b4fSHuazhong Tan hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 17556b428b4fSHuazhong Tan reg_val); 17566b428b4fSHuazhong Tan } 17576b428b4fSHuazhong Tan 17586988eb2aSSalil Mehta static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 17596988eb2aSSalil Mehta { 17607a01c897SSalil Mehta int ret; 17617a01c897SSalil Mehta 17626988eb2aSSalil Mehta /* uninitialize the nic client */ 17636a5f6fa3SHuazhong Tan ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 17646a5f6fa3SHuazhong Tan if (ret) 17656a5f6fa3SHuazhong Tan return ret; 17666988eb2aSSalil Mehta 17677a01c897SSalil Mehta /* re-initialize the hclge device */ 17689c6f7085SHuazhong Tan ret = hclgevf_reset_hdev(hdev); 17697a01c897SSalil Mehta if (ret) { 17707a01c897SSalil Mehta dev_err(&hdev->pdev->dev, 17717a01c897SSalil Mehta "hclge device re-init failed, VF is disabled!\n"); 17727a01c897SSalil Mehta return ret; 17737a01c897SSalil Mehta } 17746988eb2aSSalil Mehta 17756988eb2aSSalil Mehta /* bring up the nic client again */ 17766a5f6fa3SHuazhong Tan ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 17776a5f6fa3SHuazhong Tan if (ret) 17786a5f6fa3SHuazhong Tan return ret; 17796988eb2aSSalil Mehta 17806b428b4fSHuazhong Tan /* clear handshake status with IMP */ 17816b428b4fSHuazhong Tan hclgevf_reset_handshake(hdev, false); 17826b428b4fSHuazhong Tan 17831cc9bc6eSHuazhong Tan /* bring up the nic to enable TX/RX again */ 17841cc9bc6eSHuazhong Tan return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 17856988eb2aSSalil Mehta } 17866988eb2aSSalil Mehta 1787dea846e8SHuazhong Tan static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1788dea846e8SHuazhong Tan { 1789ada13ee3SHuazhong Tan #define HCLGEVF_RESET_SYNC_TIME 100 1790ada13ee3SHuazhong Tan 1791d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1792dea846e8SHuazhong Tan int ret = 0; 1793dea846e8SHuazhong Tan 1794f28368bbSHuazhong Tan if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1795d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1796d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1797c88a6e7dSHuazhong Tan hdev->rst_stats.vf_func_rst_cnt++; 1798dea846e8SHuazhong Tan } 1799dea846e8SHuazhong Tan 1800ef5f8e50SHuazhong Tan set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1801ada13ee3SHuazhong Tan /* inform hardware that preparatory work is done */ 1802ada13ee3SHuazhong Tan msleep(HCLGEVF_RESET_SYNC_TIME); 18036b428b4fSHuazhong Tan hclgevf_reset_handshake(hdev, true); 1804dea846e8SHuazhong Tan dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1805dea846e8SHuazhong Tan hdev->reset_type, ret); 1806dea846e8SHuazhong Tan 1807dea846e8SHuazhong Tan return ret; 1808dea846e8SHuazhong Tan } 1809dea846e8SHuazhong Tan 18103d77d0cbSHuazhong Tan static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 18113d77d0cbSHuazhong Tan { 18123d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 18133d77d0cbSHuazhong Tan hdev->rst_stats.vf_func_rst_cnt); 18143d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 18153d77d0cbSHuazhong Tan hdev->rst_stats.flr_rst_cnt); 18163d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 18173d77d0cbSHuazhong Tan hdev->rst_stats.vf_rst_cnt); 18183d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "reset done count: %u\n", 18193d77d0cbSHuazhong Tan hdev->rst_stats.rst_done_cnt); 18203d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 18213d77d0cbSHuazhong Tan hdev->rst_stats.hw_rst_done_cnt); 18223d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "reset count: %u\n", 18233d77d0cbSHuazhong Tan hdev->rst_stats.rst_cnt); 18243d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 18253d77d0cbSHuazhong Tan hdev->rst_stats.rst_fail_cnt); 18263d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 18273d77d0cbSHuazhong Tan hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 18283d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 18293d77d0cbSHuazhong Tan hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STAT_REG)); 18303d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 18313d77d0cbSHuazhong Tan hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); 18323d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 18333d77d0cbSHuazhong Tan hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 18343d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 18353d77d0cbSHuazhong Tan } 18363d77d0cbSHuazhong Tan 1837bbe6540eSHuazhong Tan static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1838bbe6540eSHuazhong Tan { 18396b428b4fSHuazhong Tan /* recover handshake status with IMP when reset fail */ 18406b428b4fSHuazhong Tan hclgevf_reset_handshake(hdev, true); 1841bbe6540eSHuazhong Tan hdev->rst_stats.rst_fail_cnt++; 1842adcf738bSGuojia Liao dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1843bbe6540eSHuazhong Tan hdev->rst_stats.rst_fail_cnt); 1844bbe6540eSHuazhong Tan 1845bbe6540eSHuazhong Tan if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1846bbe6540eSHuazhong Tan set_bit(hdev->reset_type, &hdev->reset_pending); 1847bbe6540eSHuazhong Tan 1848bbe6540eSHuazhong Tan if (hclgevf_is_reset_pending(hdev)) { 1849bbe6540eSHuazhong Tan set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1850bbe6540eSHuazhong Tan hclgevf_reset_task_schedule(hdev); 18513d77d0cbSHuazhong Tan } else { 1852d5432455SGuojia Liao set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 18533d77d0cbSHuazhong Tan hclgevf_dump_rst_info(hdev); 1854bbe6540eSHuazhong Tan } 1855bbe6540eSHuazhong Tan } 1856bbe6540eSHuazhong Tan 18571cc9bc6eSHuazhong Tan static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 18586988eb2aSSalil Mehta { 18596988eb2aSSalil Mehta int ret; 18606988eb2aSSalil Mehta 1861c88a6e7dSHuazhong Tan hdev->rst_stats.rst_cnt++; 18626988eb2aSSalil Mehta 18631cc9bc6eSHuazhong Tan rtnl_lock(); 18646988eb2aSSalil Mehta /* bring down the nic to stop any ongoing TX/RX */ 18656a5f6fa3SHuazhong Tan ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 186629118ab9SHuazhong Tan rtnl_unlock(); 18676a5f6fa3SHuazhong Tan if (ret) 18681cc9bc6eSHuazhong Tan return ret; 1869dea846e8SHuazhong Tan 18701cc9bc6eSHuazhong Tan return hclgevf_reset_prepare_wait(hdev); 18716988eb2aSSalil Mehta } 18726988eb2aSSalil Mehta 18731cc9bc6eSHuazhong Tan static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 18741cc9bc6eSHuazhong Tan { 18751cc9bc6eSHuazhong Tan int ret; 18761cc9bc6eSHuazhong Tan 1877c88a6e7dSHuazhong Tan hdev->rst_stats.hw_rst_done_cnt++; 1878c88a6e7dSHuazhong Tan 187929118ab9SHuazhong Tan rtnl_lock(); 18806988eb2aSSalil Mehta /* now, re-initialize the nic client and ae device */ 18816988eb2aSSalil Mehta ret = hclgevf_reset_stack(hdev); 18821cc9bc6eSHuazhong Tan rtnl_unlock(); 18836a5f6fa3SHuazhong Tan if (ret) { 18846988eb2aSSalil Mehta dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 18851cc9bc6eSHuazhong Tan return ret; 18866a5f6fa3SHuazhong Tan } 18876988eb2aSSalil Mehta 1888b644a8d4SHuazhong Tan hdev->last_reset_time = jiffies; 1889c88a6e7dSHuazhong Tan hdev->rst_stats.rst_done_cnt++; 1890bbe6540eSHuazhong Tan hdev->rst_stats.rst_fail_cnt = 0; 1891d5432455SGuojia Liao clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1892b644a8d4SHuazhong Tan 18931cc9bc6eSHuazhong Tan return 0; 18941cc9bc6eSHuazhong Tan } 18951cc9bc6eSHuazhong Tan 18961cc9bc6eSHuazhong Tan static void hclgevf_reset(struct hclgevf_dev *hdev) 18971cc9bc6eSHuazhong Tan { 18981cc9bc6eSHuazhong Tan if (hclgevf_reset_prepare(hdev)) 18991cc9bc6eSHuazhong Tan goto err_reset; 19001cc9bc6eSHuazhong Tan 19011cc9bc6eSHuazhong Tan /* check if VF could successfully fetch the hardware reset completion 19021cc9bc6eSHuazhong Tan * status from the hardware 19031cc9bc6eSHuazhong Tan */ 19041cc9bc6eSHuazhong Tan if (hclgevf_reset_wait(hdev)) { 19051cc9bc6eSHuazhong Tan /* can't do much in this situation, will disable VF */ 19061cc9bc6eSHuazhong Tan dev_err(&hdev->pdev->dev, 19071cc9bc6eSHuazhong Tan "failed to fetch H/W reset completion status\n"); 19081cc9bc6eSHuazhong Tan goto err_reset; 19091cc9bc6eSHuazhong Tan } 19101cc9bc6eSHuazhong Tan 19111cc9bc6eSHuazhong Tan if (hclgevf_reset_rebuild(hdev)) 19121cc9bc6eSHuazhong Tan goto err_reset; 19131cc9bc6eSHuazhong Tan 19141cc9bc6eSHuazhong Tan return; 19151cc9bc6eSHuazhong Tan 19166a5f6fa3SHuazhong Tan err_reset: 1917bbe6540eSHuazhong Tan hclgevf_reset_err_handle(hdev); 19186988eb2aSSalil Mehta } 19196988eb2aSSalil Mehta 1920720bd583SHuazhong Tan static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1921720bd583SHuazhong Tan unsigned long *addr) 1922720bd583SHuazhong Tan { 1923720bd583SHuazhong Tan enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1924720bd583SHuazhong Tan 1925dea846e8SHuazhong Tan /* return the highest priority reset level amongst all */ 1926b90fcc5bSHuazhong Tan if (test_bit(HNAE3_VF_RESET, addr)) { 1927b90fcc5bSHuazhong Tan rst_level = HNAE3_VF_RESET; 1928b90fcc5bSHuazhong Tan clear_bit(HNAE3_VF_RESET, addr); 1929b90fcc5bSHuazhong Tan clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1930b90fcc5bSHuazhong Tan clear_bit(HNAE3_VF_FUNC_RESET, addr); 1931b90fcc5bSHuazhong Tan } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1932dea846e8SHuazhong Tan rst_level = HNAE3_VF_FULL_RESET; 1933dea846e8SHuazhong Tan clear_bit(HNAE3_VF_FULL_RESET, addr); 1934dea846e8SHuazhong Tan clear_bit(HNAE3_VF_FUNC_RESET, addr); 1935aa5c4f17SHuazhong Tan } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1936aa5c4f17SHuazhong Tan rst_level = HNAE3_VF_PF_FUNC_RESET; 1937aa5c4f17SHuazhong Tan clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1938aa5c4f17SHuazhong Tan clear_bit(HNAE3_VF_FUNC_RESET, addr); 1939dea846e8SHuazhong Tan } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1940dea846e8SHuazhong Tan rst_level = HNAE3_VF_FUNC_RESET; 1941dea846e8SHuazhong Tan clear_bit(HNAE3_VF_FUNC_RESET, addr); 19426ff3cf07SHuazhong Tan } else if (test_bit(HNAE3_FLR_RESET, addr)) { 19436ff3cf07SHuazhong Tan rst_level = HNAE3_FLR_RESET; 19446ff3cf07SHuazhong Tan clear_bit(HNAE3_FLR_RESET, addr); 1945720bd583SHuazhong Tan } 1946720bd583SHuazhong Tan 1947720bd583SHuazhong Tan return rst_level; 1948720bd583SHuazhong Tan } 1949720bd583SHuazhong Tan 19506ae4e733SShiju Jose static void hclgevf_reset_event(struct pci_dev *pdev, 19516ae4e733SShiju Jose struct hnae3_handle *handle) 19526d4c3981SSalil Mehta { 19536ff3cf07SHuazhong Tan struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 19546ff3cf07SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 19556d4c3981SSalil Mehta 19566d4c3981SSalil Mehta dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 19576d4c3981SSalil Mehta 19586ff3cf07SHuazhong Tan if (hdev->default_reset_request) 19590742ed7cSHuazhong Tan hdev->reset_level = 1960720bd583SHuazhong Tan hclgevf_get_reset_level(hdev, 1961720bd583SHuazhong Tan &hdev->default_reset_request); 1962720bd583SHuazhong Tan else 1963dea846e8SHuazhong Tan hdev->reset_level = HNAE3_VF_FUNC_RESET; 19646d4c3981SSalil Mehta 1965436667d2SSalil Mehta /* reset of this VF requested */ 1966436667d2SSalil Mehta set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1967436667d2SSalil Mehta hclgevf_reset_task_schedule(hdev); 19686d4c3981SSalil Mehta 19690742ed7cSHuazhong Tan hdev->last_reset_time = jiffies; 19706d4c3981SSalil Mehta } 19716d4c3981SSalil Mehta 1972720bd583SHuazhong Tan static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1973720bd583SHuazhong Tan enum hnae3_reset_type rst_type) 1974720bd583SHuazhong Tan { 1975720bd583SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 1976720bd583SHuazhong Tan 1977720bd583SHuazhong Tan set_bit(rst_type, &hdev->default_reset_request); 1978720bd583SHuazhong Tan } 1979720bd583SHuazhong Tan 1980f28368bbSHuazhong Tan static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1981f28368bbSHuazhong Tan { 1982f28368bbSHuazhong Tan writel(en ? 1 : 0, vector->addr); 1983f28368bbSHuazhong Tan } 1984f28368bbSHuazhong Tan 19856ff3cf07SHuazhong Tan static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 19866ff3cf07SHuazhong Tan { 1987f28368bbSHuazhong Tan #define HCLGEVF_FLR_RETRY_WAIT_MS 500 1988f28368bbSHuazhong Tan #define HCLGEVF_FLR_RETRY_CNT 5 1989f28368bbSHuazhong Tan 19906ff3cf07SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 1991f28368bbSHuazhong Tan int retry_cnt = 0; 1992f28368bbSHuazhong Tan int ret; 19936ff3cf07SHuazhong Tan 1994f28368bbSHuazhong Tan retry: 1995f28368bbSHuazhong Tan down(&hdev->reset_sem); 1996f28368bbSHuazhong Tan set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1997f28368bbSHuazhong Tan hdev->reset_type = HNAE3_FLR_RESET; 1998f28368bbSHuazhong Tan ret = hclgevf_reset_prepare(hdev); 1999f28368bbSHuazhong Tan if (ret) { 2000f28368bbSHuazhong Tan dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", 2001f28368bbSHuazhong Tan ret); 2002f28368bbSHuazhong Tan if (hdev->reset_pending || 2003f28368bbSHuazhong Tan retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) { 20046ff3cf07SHuazhong Tan dev_err(&hdev->pdev->dev, 2005f28368bbSHuazhong Tan "reset_pending:0x%lx, retry_cnt:%d\n", 2006f28368bbSHuazhong Tan hdev->reset_pending, retry_cnt); 2007f28368bbSHuazhong Tan clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2008f28368bbSHuazhong Tan up(&hdev->reset_sem); 2009f28368bbSHuazhong Tan msleep(HCLGEVF_FLR_RETRY_WAIT_MS); 2010f28368bbSHuazhong Tan goto retry; 2011f28368bbSHuazhong Tan } 2012f28368bbSHuazhong Tan } 2013f28368bbSHuazhong Tan 2014f28368bbSHuazhong Tan /* disable misc vector before FLR done */ 2015f28368bbSHuazhong Tan hclgevf_enable_vector(&hdev->misc_vector, false); 2016f28368bbSHuazhong Tan hdev->rst_stats.flr_rst_cnt++; 2017f28368bbSHuazhong Tan } 2018f28368bbSHuazhong Tan 2019f28368bbSHuazhong Tan static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 2020f28368bbSHuazhong Tan { 2021f28368bbSHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 2022f28368bbSHuazhong Tan int ret; 2023f28368bbSHuazhong Tan 2024f28368bbSHuazhong Tan hclgevf_enable_vector(&hdev->misc_vector, true); 2025f28368bbSHuazhong Tan 2026f28368bbSHuazhong Tan ret = hclgevf_reset_rebuild(hdev); 2027f28368bbSHuazhong Tan if (ret) 2028f28368bbSHuazhong Tan dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 2029f28368bbSHuazhong Tan ret); 2030f28368bbSHuazhong Tan 2031f28368bbSHuazhong Tan hdev->reset_type = HNAE3_NONE_RESET; 2032f28368bbSHuazhong Tan clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2033f28368bbSHuazhong Tan up(&hdev->reset_sem); 20346ff3cf07SHuazhong Tan } 20356ff3cf07SHuazhong Tan 2036e2cb1decSSalil Mehta static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 2037e2cb1decSSalil Mehta { 2038e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2039e2cb1decSSalil Mehta 2040e2cb1decSSalil Mehta return hdev->fw_version; 2041e2cb1decSSalil Mehta } 2042e2cb1decSSalil Mehta 2043e2cb1decSSalil Mehta static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 2044e2cb1decSSalil Mehta { 2045e2cb1decSSalil Mehta struct hclgevf_misc_vector *vector = &hdev->misc_vector; 2046e2cb1decSSalil Mehta 2047e2cb1decSSalil Mehta vector->vector_irq = pci_irq_vector(hdev->pdev, 2048e2cb1decSSalil Mehta HCLGEVF_MISC_VECTOR_NUM); 2049e2cb1decSSalil Mehta vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 2050e2cb1decSSalil Mehta /* vector status always valid for Vector 0 */ 2051e2cb1decSSalil Mehta hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 2052e2cb1decSSalil Mehta hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 2053e2cb1decSSalil Mehta 2054e2cb1decSSalil Mehta hdev->num_msi_left -= 1; 2055e2cb1decSSalil Mehta hdev->num_msi_used += 1; 2056e2cb1decSSalil Mehta } 2057e2cb1decSSalil Mehta 205835a1e503SSalil Mehta void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 205935a1e503SSalil Mehta { 2060ff200099SYunsheng Lin if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2061ff200099SYunsheng Lin !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 2062ff200099SYunsheng Lin &hdev->state)) 20630ea68902SYunsheng Lin mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 206435a1e503SSalil Mehta } 206535a1e503SSalil Mehta 206607a0556aSSalil Mehta void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 2067e2cb1decSSalil Mehta { 2068ff200099SYunsheng Lin if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2069ff200099SYunsheng Lin !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 2070ff200099SYunsheng Lin &hdev->state)) 20710ea68902SYunsheng Lin mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 207207a0556aSSalil Mehta } 2073e2cb1decSSalil Mehta 2074ff200099SYunsheng Lin static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 2075ff200099SYunsheng Lin unsigned long delay) 2076e2cb1decSSalil Mehta { 2077d5432455SGuojia Liao if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2078d5432455SGuojia Liao !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 20790ea68902SYunsheng Lin mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 2080e2cb1decSSalil Mehta } 2081e2cb1decSSalil Mehta 2082ff200099SYunsheng Lin static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 208335a1e503SSalil Mehta { 2084d6ad7c53SGuojia Liao #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 2085d6ad7c53SGuojia Liao 2086ff200099SYunsheng Lin if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 2087ff200099SYunsheng Lin return; 2088ff200099SYunsheng Lin 2089f28368bbSHuazhong Tan down(&hdev->reset_sem); 2090f28368bbSHuazhong Tan set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 209135a1e503SSalil Mehta 2092436667d2SSalil Mehta if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 2093436667d2SSalil Mehta &hdev->reset_state)) { 2094436667d2SSalil Mehta /* PF has initmated that it is about to reset the hardware. 20959b2f3477SWeihang Li * We now have to poll & check if hardware has actually 20969b2f3477SWeihang Li * completed the reset sequence. On hardware reset completion, 20979b2f3477SWeihang Li * VF needs to reset the client and ae device. 209835a1e503SSalil Mehta */ 2099436667d2SSalil Mehta hdev->reset_attempts = 0; 2100436667d2SSalil Mehta 2101dea846e8SHuazhong Tan hdev->last_reset_time = jiffies; 2102dea846e8SHuazhong Tan while ((hdev->reset_type = 2103dea846e8SHuazhong Tan hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 21041cc9bc6eSHuazhong Tan != HNAE3_NONE_RESET) 21051cc9bc6eSHuazhong Tan hclgevf_reset(hdev); 2106436667d2SSalil Mehta } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 2107436667d2SSalil Mehta &hdev->reset_state)) { 2108436667d2SSalil Mehta /* we could be here when either of below happens: 21099b2f3477SWeihang Li * 1. reset was initiated due to watchdog timeout caused by 2110436667d2SSalil Mehta * a. IMP was earlier reset and our TX got choked down and 2111436667d2SSalil Mehta * which resulted in watchdog reacting and inducing VF 2112436667d2SSalil Mehta * reset. This also means our cmdq would be unreliable. 2113436667d2SSalil Mehta * b. problem in TX due to other lower layer(example link 2114436667d2SSalil Mehta * layer not functioning properly etc.) 2115436667d2SSalil Mehta * 2. VF reset might have been initiated due to some config 2116436667d2SSalil Mehta * change. 2117436667d2SSalil Mehta * 2118436667d2SSalil Mehta * NOTE: Theres no clear way to detect above cases than to react 2119436667d2SSalil Mehta * to the response of PF for this reset request. PF will ack the 2120436667d2SSalil Mehta * 1b and 2. cases but we will not get any intimation about 1a 2121436667d2SSalil Mehta * from PF as cmdq would be in unreliable state i.e. mailbox 2122436667d2SSalil Mehta * communication between PF and VF would be broken. 212346ee7350SGuojia Liao * 212446ee7350SGuojia Liao * if we are never geting into pending state it means either: 2125436667d2SSalil Mehta * 1. PF is not receiving our request which could be due to IMP 2126436667d2SSalil Mehta * reset 2127436667d2SSalil Mehta * 2. PF is screwed 2128436667d2SSalil Mehta * We cannot do much for 2. but to check first we can try reset 2129436667d2SSalil Mehta * our PCIe + stack and see if it alleviates the problem. 2130436667d2SSalil Mehta */ 2131d6ad7c53SGuojia Liao if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 2132436667d2SSalil Mehta /* prepare for full reset of stack + pcie interface */ 2133dea846e8SHuazhong Tan set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 2134436667d2SSalil Mehta 2135436667d2SSalil Mehta /* "defer" schedule the reset task again */ 2136436667d2SSalil Mehta set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2137436667d2SSalil Mehta } else { 2138436667d2SSalil Mehta hdev->reset_attempts++; 2139436667d2SSalil Mehta 2140dea846e8SHuazhong Tan set_bit(hdev->reset_level, &hdev->reset_pending); 2141dea846e8SHuazhong Tan set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2142436667d2SSalil Mehta } 2143dea846e8SHuazhong Tan hclgevf_reset_task_schedule(hdev); 2144436667d2SSalil Mehta } 214535a1e503SSalil Mehta 2146afb6afdbSHuazhong Tan hdev->reset_type = HNAE3_NONE_RESET; 214735a1e503SSalil Mehta clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2148f28368bbSHuazhong Tan up(&hdev->reset_sem); 214935a1e503SSalil Mehta } 215035a1e503SSalil Mehta 2151ff200099SYunsheng Lin static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 2152e2cb1decSSalil Mehta { 2153ff200099SYunsheng Lin if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2154ff200099SYunsheng Lin return; 2155e2cb1decSSalil Mehta 2156e2cb1decSSalil Mehta if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 2157e2cb1decSSalil Mehta return; 2158e2cb1decSSalil Mehta 215907a0556aSSalil Mehta hclgevf_mbx_async_handler(hdev); 2160e2cb1decSSalil Mehta 2161e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2162e2cb1decSSalil Mehta } 2163e2cb1decSSalil Mehta 2164ff200099SYunsheng Lin static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 2165a6d818e3SYunsheng Lin { 2166d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 2167a6d818e3SYunsheng Lin int ret; 2168a6d818e3SYunsheng Lin 21691416d333SHuazhong Tan if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) 2170c59a85c0SJian Shen return; 2171c59a85c0SJian Shen 2172d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 2173d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2174a6d818e3SYunsheng Lin if (ret) 2175a6d818e3SYunsheng Lin dev_err(&hdev->pdev->dev, 2176a6d818e3SYunsheng Lin "VF sends keep alive cmd failed(=%d)\n", ret); 2177a6d818e3SYunsheng Lin } 2178a6d818e3SYunsheng Lin 2179ff200099SYunsheng Lin static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 2180e2cb1decSSalil Mehta { 2181ff200099SYunsheng Lin unsigned long delta = round_jiffies_relative(HZ); 2182ff200099SYunsheng Lin struct hnae3_handle *handle = &hdev->nic; 2183e2cb1decSSalil Mehta 2184ff200099SYunsheng Lin if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 2185ff200099SYunsheng Lin delta = jiffies - hdev->last_serv_processed; 2186db01afebSliuzhongzhu 2187ff200099SYunsheng Lin if (delta < round_jiffies_relative(HZ)) { 2188ff200099SYunsheng Lin delta = round_jiffies_relative(HZ) - delta; 2189ff200099SYunsheng Lin goto out; 2190db01afebSliuzhongzhu } 2191ff200099SYunsheng Lin } 2192ff200099SYunsheng Lin 2193ff200099SYunsheng Lin hdev->serv_processed_cnt++; 2194ff200099SYunsheng Lin if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 2195ff200099SYunsheng Lin hclgevf_keep_alive(hdev); 2196ff200099SYunsheng Lin 2197ff200099SYunsheng Lin if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 2198ff200099SYunsheng Lin hdev->last_serv_processed = jiffies; 2199ff200099SYunsheng Lin goto out; 2200ff200099SYunsheng Lin } 2201ff200099SYunsheng Lin 2202ff200099SYunsheng Lin if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 2203ff200099SYunsheng Lin hclgevf_tqps_update_stats(handle); 2204e2cb1decSSalil Mehta 2205e2cb1decSSalil Mehta /* request the link status from the PF. PF would be able to tell VF 2206e2cb1decSSalil Mehta * about such updates in future so we might remove this later 2207e2cb1decSSalil Mehta */ 2208e2cb1decSSalil Mehta hclgevf_request_link_info(hdev); 2209e2cb1decSSalil Mehta 22109194d18bSliuzhongzhu hclgevf_update_link_mode(hdev); 22119194d18bSliuzhongzhu 2212fe4144d4SJian Shen hclgevf_sync_vlan_filter(hdev); 2213fe4144d4SJian Shen 2214ee4bcd3bSJian Shen hclgevf_sync_mac_table(hdev); 2215ee4bcd3bSJian Shen 2216c631c696SJian Shen hclgevf_sync_promisc_mode(hdev); 2217c631c696SJian Shen 2218ff200099SYunsheng Lin hdev->last_serv_processed = jiffies; 2219436667d2SSalil Mehta 2220ff200099SYunsheng Lin out: 2221ff200099SYunsheng Lin hclgevf_task_schedule(hdev, delta); 2222ff200099SYunsheng Lin } 2223b3c3fe8eSYunsheng Lin 2224ff200099SYunsheng Lin static void hclgevf_service_task(struct work_struct *work) 2225ff200099SYunsheng Lin { 2226ff200099SYunsheng Lin struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 2227ff200099SYunsheng Lin service_task.work); 2228ff200099SYunsheng Lin 2229ff200099SYunsheng Lin hclgevf_reset_service_task(hdev); 2230ff200099SYunsheng Lin hclgevf_mailbox_service_task(hdev); 2231ff200099SYunsheng Lin hclgevf_periodic_service_task(hdev); 2232ff200099SYunsheng Lin 2233ff200099SYunsheng Lin /* Handle reset and mbx again in case periodical task delays the 2234ff200099SYunsheng Lin * handling by calling hclgevf_task_schedule() in 2235ff200099SYunsheng Lin * hclgevf_periodic_service_task() 2236ff200099SYunsheng Lin */ 2237ff200099SYunsheng Lin hclgevf_reset_service_task(hdev); 2238ff200099SYunsheng Lin hclgevf_mailbox_service_task(hdev); 2239e2cb1decSSalil Mehta } 2240e2cb1decSSalil Mehta 2241e2cb1decSSalil Mehta static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 2242e2cb1decSSalil Mehta { 2243e2cb1decSSalil Mehta hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 2244e2cb1decSSalil Mehta } 2245e2cb1decSSalil Mehta 2246b90fcc5bSHuazhong Tan static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 2247b90fcc5bSHuazhong Tan u32 *clearval) 2248e2cb1decSSalil Mehta { 224913050921SHuazhong Tan u32 val, cmdq_stat_reg, rst_ing_reg; 2250e2cb1decSSalil Mehta 2251e2cb1decSSalil Mehta /* fetch the events from their corresponding regs */ 225213050921SHuazhong Tan cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 225313050921SHuazhong Tan HCLGEVF_VECTOR0_CMDQ_STAT_REG); 2254e2cb1decSSalil Mehta 225513050921SHuazhong Tan if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 2256b90fcc5bSHuazhong Tan rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2257b90fcc5bSHuazhong Tan dev_info(&hdev->pdev->dev, 2258b90fcc5bSHuazhong Tan "receive reset interrupt 0x%x!\n", rst_ing_reg); 2259b90fcc5bSHuazhong Tan set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 2260b90fcc5bSHuazhong Tan set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2261ef5f8e50SHuazhong Tan set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 226213050921SHuazhong Tan *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 2263c88a6e7dSHuazhong Tan hdev->rst_stats.vf_rst_cnt++; 226472e2fb07SHuazhong Tan /* set up VF hardware reset status, its PF will clear 226572e2fb07SHuazhong Tan * this status when PF has initialized done. 226672e2fb07SHuazhong Tan */ 226772e2fb07SHuazhong Tan val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 226872e2fb07SHuazhong Tan hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 226972e2fb07SHuazhong Tan val | HCLGEVF_VF_RST_ING_BIT); 2270b90fcc5bSHuazhong Tan return HCLGEVF_VECTOR0_EVENT_RST; 2271b90fcc5bSHuazhong Tan } 2272b90fcc5bSHuazhong Tan 2273e2cb1decSSalil Mehta /* check for vector0 mailbox(=CMDQ RX) event source */ 227413050921SHuazhong Tan if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 227513050921SHuazhong Tan /* for revision 0x21, clearing interrupt is writing bit 0 227613050921SHuazhong Tan * to the clear register, writing bit 1 means to keep the 227713050921SHuazhong Tan * old value. 227813050921SHuazhong Tan * for revision 0x20, the clear register is a read & write 227913050921SHuazhong Tan * register, so we should just write 0 to the bit we are 228013050921SHuazhong Tan * handling, and keep other bits as cmdq_stat_reg. 228113050921SHuazhong Tan */ 228213050921SHuazhong Tan if (hdev->pdev->revision >= 0x21) 228313050921SHuazhong Tan *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 228413050921SHuazhong Tan else 228513050921SHuazhong Tan *clearval = cmdq_stat_reg & 228613050921SHuazhong Tan ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 228713050921SHuazhong Tan 2288b90fcc5bSHuazhong Tan return HCLGEVF_VECTOR0_EVENT_MBX; 2289e2cb1decSSalil Mehta } 2290e2cb1decSSalil Mehta 2291e45afb39SHuazhong Tan /* print other vector0 event source */ 2292e45afb39SHuazhong Tan dev_info(&hdev->pdev->dev, 2293e45afb39SHuazhong Tan "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2294e45afb39SHuazhong Tan cmdq_stat_reg); 2295e2cb1decSSalil Mehta 2296b90fcc5bSHuazhong Tan return HCLGEVF_VECTOR0_EVENT_OTHER; 2297e2cb1decSSalil Mehta } 2298e2cb1decSSalil Mehta 2299e2cb1decSSalil Mehta static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2300e2cb1decSSalil Mehta { 2301b90fcc5bSHuazhong Tan enum hclgevf_evt_cause event_cause; 2302e2cb1decSSalil Mehta struct hclgevf_dev *hdev = data; 2303e2cb1decSSalil Mehta u32 clearval; 2304e2cb1decSSalil Mehta 2305e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, false); 2306b90fcc5bSHuazhong Tan event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2307e2cb1decSSalil Mehta 2308b90fcc5bSHuazhong Tan switch (event_cause) { 2309b90fcc5bSHuazhong Tan case HCLGEVF_VECTOR0_EVENT_RST: 2310b90fcc5bSHuazhong Tan hclgevf_reset_task_schedule(hdev); 2311b90fcc5bSHuazhong Tan break; 2312b90fcc5bSHuazhong Tan case HCLGEVF_VECTOR0_EVENT_MBX: 231307a0556aSSalil Mehta hclgevf_mbx_handler(hdev); 2314b90fcc5bSHuazhong Tan break; 2315b90fcc5bSHuazhong Tan default: 2316b90fcc5bSHuazhong Tan break; 2317b90fcc5bSHuazhong Tan } 2318e2cb1decSSalil Mehta 2319b90fcc5bSHuazhong Tan if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 2320e2cb1decSSalil Mehta hclgevf_clear_event_cause(hdev, clearval); 2321e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, true); 2322b90fcc5bSHuazhong Tan } 2323e2cb1decSSalil Mehta 2324e2cb1decSSalil Mehta return IRQ_HANDLED; 2325e2cb1decSSalil Mehta } 2326e2cb1decSSalil Mehta 2327e2cb1decSSalil Mehta static int hclgevf_configure(struct hclgevf_dev *hdev) 2328e2cb1decSSalil Mehta { 2329e2cb1decSSalil Mehta int ret; 2330e2cb1decSSalil Mehta 233192f11ea1SJian Shen /* get current port based vlan state from PF */ 233292f11ea1SJian Shen ret = hclgevf_get_port_base_vlan_filter_state(hdev); 233392f11ea1SJian Shen if (ret) 233492f11ea1SJian Shen return ret; 233592f11ea1SJian Shen 2336e2cb1decSSalil Mehta /* get queue configuration from PF */ 23376cee6fc3SJian Shen ret = hclgevf_get_queue_info(hdev); 2338e2cb1decSSalil Mehta if (ret) 2339e2cb1decSSalil Mehta return ret; 2340c0425944SPeng Li 2341c0425944SPeng Li /* get queue depth info from PF */ 2342c0425944SPeng Li ret = hclgevf_get_queue_depth(hdev); 2343c0425944SPeng Li if (ret) 2344c0425944SPeng Li return ret; 2345c0425944SPeng Li 23469c3e7130Sliuzhongzhu ret = hclgevf_get_pf_media_type(hdev); 23479c3e7130Sliuzhongzhu if (ret) 23489c3e7130Sliuzhongzhu return ret; 23499c3e7130Sliuzhongzhu 2350e2cb1decSSalil Mehta /* get tc configuration from PF */ 2351e2cb1decSSalil Mehta return hclgevf_get_tc_info(hdev); 2352e2cb1decSSalil Mehta } 2353e2cb1decSSalil Mehta 23547a01c897SSalil Mehta static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 23557a01c897SSalil Mehta { 23567a01c897SSalil Mehta struct pci_dev *pdev = ae_dev->pdev; 23571154bb26SPeng Li struct hclgevf_dev *hdev; 23587a01c897SSalil Mehta 23597a01c897SSalil Mehta hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 23607a01c897SSalil Mehta if (!hdev) 23617a01c897SSalil Mehta return -ENOMEM; 23627a01c897SSalil Mehta 23637a01c897SSalil Mehta hdev->pdev = pdev; 23647a01c897SSalil Mehta hdev->ae_dev = ae_dev; 23657a01c897SSalil Mehta ae_dev->priv = hdev; 23667a01c897SSalil Mehta 23677a01c897SSalil Mehta return 0; 23687a01c897SSalil Mehta } 23697a01c897SSalil Mehta 2370e2cb1decSSalil Mehta static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2371e2cb1decSSalil Mehta { 2372e2cb1decSSalil Mehta struct hnae3_handle *roce = &hdev->roce; 2373e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 2374e2cb1decSSalil Mehta 237507acf909SJian Shen roce->rinfo.num_vectors = hdev->num_roce_msix; 2376e2cb1decSSalil Mehta 2377e2cb1decSSalil Mehta if (hdev->num_msi_left < roce->rinfo.num_vectors || 2378e2cb1decSSalil Mehta hdev->num_msi_left == 0) 2379e2cb1decSSalil Mehta return -EINVAL; 2380e2cb1decSSalil Mehta 238107acf909SJian Shen roce->rinfo.base_vector = hdev->roce_base_vector; 2382e2cb1decSSalil Mehta 2383e2cb1decSSalil Mehta roce->rinfo.netdev = nic->kinfo.netdev; 2384e2cb1decSSalil Mehta roce->rinfo.roce_io_base = hdev->hw.io_base; 2385e2cb1decSSalil Mehta 2386e2cb1decSSalil Mehta roce->pdev = nic->pdev; 2387e2cb1decSSalil Mehta roce->ae_algo = nic->ae_algo; 2388e2cb1decSSalil Mehta roce->numa_node_mask = nic->numa_node_mask; 2389e2cb1decSSalil Mehta 2390e2cb1decSSalil Mehta return 0; 2391e2cb1decSSalil Mehta } 2392e2cb1decSSalil Mehta 2393b26a6feaSPeng Li static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 2394b26a6feaSPeng Li { 2395b26a6feaSPeng Li struct hclgevf_cfg_gro_status_cmd *req; 2396b26a6feaSPeng Li struct hclgevf_desc desc; 2397b26a6feaSPeng Li int ret; 2398b26a6feaSPeng Li 2399b26a6feaSPeng Li if (!hnae3_dev_gro_supported(hdev)) 2400b26a6feaSPeng Li return 0; 2401b26a6feaSPeng Li 2402b26a6feaSPeng Li hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2403b26a6feaSPeng Li false); 2404b26a6feaSPeng Li req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2405b26a6feaSPeng Li 2406b26a6feaSPeng Li req->gro_en = cpu_to_le16(en ? 1 : 0); 2407b26a6feaSPeng Li 2408b26a6feaSPeng Li ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2409b26a6feaSPeng Li if (ret) 2410b26a6feaSPeng Li dev_err(&hdev->pdev->dev, 2411b26a6feaSPeng Li "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2412b26a6feaSPeng Li 2413b26a6feaSPeng Li return ret; 2414b26a6feaSPeng Li } 2415b26a6feaSPeng Li 2416944de484SGuojia Liao static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) 2417e2cb1decSSalil Mehta { 2418e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2419944de484SGuojia Liao struct hclgevf_rss_tuple_cfg *tuple_sets; 24204093d1a2SGuangbin Huang u32 i; 2421e2cb1decSSalil Mehta 2422944de484SGuojia Liao rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 24234093d1a2SGuangbin Huang rss_cfg->rss_size = hdev->nic.kinfo.rss_size; 2424944de484SGuojia Liao tuple_sets = &rss_cfg->rss_tuple_sets; 2425374ad291SJian Shen if (hdev->pdev->revision >= 0x21) { 2426472d7eceSJian Shen rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 2427472d7eceSJian Shen memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2428374ad291SJian Shen HCLGEVF_RSS_KEY_SIZE); 2429374ad291SJian Shen 2430944de484SGuojia Liao tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2431944de484SGuojia Liao tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2432944de484SGuojia Liao tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2433944de484SGuojia Liao tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2434944de484SGuojia Liao tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2435944de484SGuojia Liao tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2436944de484SGuojia Liao tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2437944de484SGuojia Liao tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2438374ad291SJian Shen } 2439374ad291SJian Shen 24409b2f3477SWeihang Li /* Initialize RSS indirect table */ 2441e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 24424093d1a2SGuangbin Huang rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 2443944de484SGuojia Liao } 2444944de484SGuojia Liao 2445944de484SGuojia Liao static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2446944de484SGuojia Liao { 2447944de484SGuojia Liao struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2448944de484SGuojia Liao int ret; 2449944de484SGuojia Liao 2450944de484SGuojia Liao if (hdev->pdev->revision >= 0x21) { 2451944de484SGuojia Liao ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 2452944de484SGuojia Liao rss_cfg->rss_hash_key); 2453944de484SGuojia Liao if (ret) 2454944de484SGuojia Liao return ret; 2455944de484SGuojia Liao 2456944de484SGuojia Liao ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2457944de484SGuojia Liao if (ret) 2458944de484SGuojia Liao return ret; 2459944de484SGuojia Liao } 2460e2cb1decSSalil Mehta 2461e2cb1decSSalil Mehta ret = hclgevf_set_rss_indir_table(hdev); 2462e2cb1decSSalil Mehta if (ret) 2463e2cb1decSSalil Mehta return ret; 2464e2cb1decSSalil Mehta 24654093d1a2SGuangbin Huang return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); 2466e2cb1decSSalil Mehta } 2467e2cb1decSSalil Mehta 2468e2cb1decSSalil Mehta static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2469e2cb1decSSalil Mehta { 2470e2cb1decSSalil Mehta return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2471e2cb1decSSalil Mehta false); 2472e2cb1decSSalil Mehta } 2473e2cb1decSSalil Mehta 2474ff200099SYunsheng Lin static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2475ff200099SYunsheng Lin { 2476ff200099SYunsheng Lin #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2477ff200099SYunsheng Lin 2478ff200099SYunsheng Lin unsigned long last = hdev->serv_processed_cnt; 2479ff200099SYunsheng Lin int i = 0; 2480ff200099SYunsheng Lin 2481ff200099SYunsheng Lin while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2482ff200099SYunsheng Lin i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2483ff200099SYunsheng Lin last == hdev->serv_processed_cnt) 2484ff200099SYunsheng Lin usleep_range(1, 1); 2485ff200099SYunsheng Lin } 2486ff200099SYunsheng Lin 24878cdb992fSJian Shen static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 24888cdb992fSJian Shen { 24898cdb992fSJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 24908cdb992fSJian Shen 24918cdb992fSJian Shen if (enable) { 2492ff200099SYunsheng Lin hclgevf_task_schedule(hdev, 0); 24938cdb992fSJian Shen } else { 2494b3c3fe8eSYunsheng Lin set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2495ff200099SYunsheng Lin 2496ff200099SYunsheng Lin /* flush memory to make sure DOWN is seen by service task */ 2497ff200099SYunsheng Lin smp_mb__before_atomic(); 2498ff200099SYunsheng Lin hclgevf_flush_link_update(hdev); 24998cdb992fSJian Shen } 25008cdb992fSJian Shen } 25018cdb992fSJian Shen 2502e2cb1decSSalil Mehta static int hclgevf_ae_start(struct hnae3_handle *handle) 2503e2cb1decSSalil Mehta { 2504e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2505e2cb1decSSalil Mehta 2506e2cb1decSSalil Mehta hclgevf_reset_tqp_stats(handle); 2507e2cb1decSSalil Mehta 2508e2cb1decSSalil Mehta hclgevf_request_link_info(hdev); 2509e2cb1decSSalil Mehta 25109194d18bSliuzhongzhu hclgevf_update_link_mode(hdev); 25119194d18bSliuzhongzhu 2512e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2513e2cb1decSSalil Mehta 2514e2cb1decSSalil Mehta return 0; 2515e2cb1decSSalil Mehta } 2516e2cb1decSSalil Mehta 2517e2cb1decSSalil Mehta static void hclgevf_ae_stop(struct hnae3_handle *handle) 2518e2cb1decSSalil Mehta { 2519e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 252039cfbc9cSHuazhong Tan int i; 2521e2cb1decSSalil Mehta 25222f7e4896SFuyun Liang set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 25232f7e4896SFuyun Liang 2524146e92c1SHuazhong Tan if (hdev->reset_type != HNAE3_VF_RESET) 252539cfbc9cSHuazhong Tan for (i = 0; i < handle->kinfo.num_tqps; i++) 2526146e92c1SHuazhong Tan if (hclgevf_reset_tqp(handle, i)) 2527146e92c1SHuazhong Tan break; 252839cfbc9cSHuazhong Tan 2529e2cb1decSSalil Mehta hclgevf_reset_tqp_stats(handle); 25308cc6c1f7SFuyun Liang hclgevf_update_link_status(hdev, 0); 2531e2cb1decSSalil Mehta } 2532e2cb1decSSalil Mehta 2533a6d818e3SYunsheng Lin static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2534a6d818e3SYunsheng Lin { 2535d3410018SYufeng Mo #define HCLGEVF_STATE_ALIVE 1 2536d3410018SYufeng Mo #define HCLGEVF_STATE_NOT_ALIVE 0 2537a6d818e3SYunsheng Lin 2538d3410018SYufeng Mo struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2539d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 2540d3410018SYufeng Mo 2541d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2542d3410018SYufeng Mo send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2543d3410018SYufeng Mo HCLGEVF_STATE_NOT_ALIVE; 2544d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2545a6d818e3SYunsheng Lin } 2546a6d818e3SYunsheng Lin 2547a6d818e3SYunsheng Lin static int hclgevf_client_start(struct hnae3_handle *handle) 2548a6d818e3SYunsheng Lin { 2549e233516eSHuazhong Tan int ret; 2550e233516eSHuazhong Tan 2551e233516eSHuazhong Tan ret = hclgevf_set_alive(handle, true); 2552e233516eSHuazhong Tan if (ret) 2553e233516eSHuazhong Tan return ret; 2554a6d818e3SYunsheng Lin 2555e233516eSHuazhong Tan return 0; 2556a6d818e3SYunsheng Lin } 2557a6d818e3SYunsheng Lin 2558a6d818e3SYunsheng Lin static void hclgevf_client_stop(struct hnae3_handle *handle) 2559a6d818e3SYunsheng Lin { 2560a6d818e3SYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2561a6d818e3SYunsheng Lin int ret; 2562a6d818e3SYunsheng Lin 2563a6d818e3SYunsheng Lin ret = hclgevf_set_alive(handle, false); 2564a6d818e3SYunsheng Lin if (ret) 2565a6d818e3SYunsheng Lin dev_warn(&hdev->pdev->dev, 2566a6d818e3SYunsheng Lin "%s failed %d\n", __func__, ret); 2567a6d818e3SYunsheng Lin } 2568a6d818e3SYunsheng Lin 2569e2cb1decSSalil Mehta static void hclgevf_state_init(struct hclgevf_dev *hdev) 2570e2cb1decSSalil Mehta { 2571e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2572e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2573d5432455SGuojia Liao clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2574e2cb1decSSalil Mehta 2575b3c3fe8eSYunsheng Lin INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 257635a1e503SSalil Mehta 2577e2cb1decSSalil Mehta mutex_init(&hdev->mbx_resp.mbx_mutex); 2578f28368bbSHuazhong Tan sema_init(&hdev->reset_sem, 1); 2579e2cb1decSSalil Mehta 2580ee4bcd3bSJian Shen spin_lock_init(&hdev->mac_table.mac_list_lock); 2581ee4bcd3bSJian Shen INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); 2582ee4bcd3bSJian Shen INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); 2583ee4bcd3bSJian Shen 2584e2cb1decSSalil Mehta /* bring the device down */ 2585e2cb1decSSalil Mehta set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2586e2cb1decSSalil Mehta } 2587e2cb1decSSalil Mehta 2588e2cb1decSSalil Mehta static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2589e2cb1decSSalil Mehta { 2590e2cb1decSSalil Mehta set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2591acfc3d55SHuazhong Tan set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2592e2cb1decSSalil Mehta 2593b3c3fe8eSYunsheng Lin if (hdev->service_task.work.func) 2594b3c3fe8eSYunsheng Lin cancel_delayed_work_sync(&hdev->service_task); 2595e2cb1decSSalil Mehta 2596e2cb1decSSalil Mehta mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2597e2cb1decSSalil Mehta } 2598e2cb1decSSalil Mehta 2599e2cb1decSSalil Mehta static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2600e2cb1decSSalil Mehta { 2601e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 2602e2cb1decSSalil Mehta int vectors; 2603e2cb1decSSalil Mehta int i; 2604e2cb1decSSalil Mehta 2605580a05f9SYonglong Liu if (hnae3_dev_roce_supported(hdev)) 260607acf909SJian Shen vectors = pci_alloc_irq_vectors(pdev, 260707acf909SJian Shen hdev->roce_base_msix_offset + 1, 260807acf909SJian Shen hdev->num_msi, 260907acf909SJian Shen PCI_IRQ_MSIX); 261007acf909SJian Shen else 2611580a05f9SYonglong Liu vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2612580a05f9SYonglong Liu hdev->num_msi, 2613e2cb1decSSalil Mehta PCI_IRQ_MSI | PCI_IRQ_MSIX); 261407acf909SJian Shen 2615e2cb1decSSalil Mehta if (vectors < 0) { 2616e2cb1decSSalil Mehta dev_err(&pdev->dev, 2617e2cb1decSSalil Mehta "failed(%d) to allocate MSI/MSI-X vectors\n", 2618e2cb1decSSalil Mehta vectors); 2619e2cb1decSSalil Mehta return vectors; 2620e2cb1decSSalil Mehta } 2621e2cb1decSSalil Mehta if (vectors < hdev->num_msi) 2622e2cb1decSSalil Mehta dev_warn(&hdev->pdev->dev, 2623adcf738bSGuojia Liao "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2624e2cb1decSSalil Mehta hdev->num_msi, vectors); 2625e2cb1decSSalil Mehta 2626e2cb1decSSalil Mehta hdev->num_msi = vectors; 2627e2cb1decSSalil Mehta hdev->num_msi_left = vectors; 2628580a05f9SYonglong Liu 2629e2cb1decSSalil Mehta hdev->base_msi_vector = pdev->irq; 263007acf909SJian Shen hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2631e2cb1decSSalil Mehta 2632e2cb1decSSalil Mehta hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2633e2cb1decSSalil Mehta sizeof(u16), GFP_KERNEL); 2634e2cb1decSSalil Mehta if (!hdev->vector_status) { 2635e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 2636e2cb1decSSalil Mehta return -ENOMEM; 2637e2cb1decSSalil Mehta } 2638e2cb1decSSalil Mehta 2639e2cb1decSSalil Mehta for (i = 0; i < hdev->num_msi; i++) 2640e2cb1decSSalil Mehta hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2641e2cb1decSSalil Mehta 2642e2cb1decSSalil Mehta hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2643e2cb1decSSalil Mehta sizeof(int), GFP_KERNEL); 2644e2cb1decSSalil Mehta if (!hdev->vector_irq) { 2645862d969aSHuazhong Tan devm_kfree(&pdev->dev, hdev->vector_status); 2646e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 2647e2cb1decSSalil Mehta return -ENOMEM; 2648e2cb1decSSalil Mehta } 2649e2cb1decSSalil Mehta 2650e2cb1decSSalil Mehta return 0; 2651e2cb1decSSalil Mehta } 2652e2cb1decSSalil Mehta 2653e2cb1decSSalil Mehta static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2654e2cb1decSSalil Mehta { 2655e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 2656e2cb1decSSalil Mehta 2657862d969aSHuazhong Tan devm_kfree(&pdev->dev, hdev->vector_status); 2658862d969aSHuazhong Tan devm_kfree(&pdev->dev, hdev->vector_irq); 2659e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 2660e2cb1decSSalil Mehta } 2661e2cb1decSSalil Mehta 2662e2cb1decSSalil Mehta static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2663e2cb1decSSalil Mehta { 2664cdd332acSGuojia Liao int ret; 2665e2cb1decSSalil Mehta 2666e2cb1decSSalil Mehta hclgevf_get_misc_vector(hdev); 2667e2cb1decSSalil Mehta 2668f97c4d82SYonglong Liu snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2669f97c4d82SYonglong Liu HCLGEVF_NAME, pci_name(hdev->pdev)); 2670e2cb1decSSalil Mehta ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2671f97c4d82SYonglong Liu 0, hdev->misc_vector.name, hdev); 2672e2cb1decSSalil Mehta if (ret) { 2673e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2674e2cb1decSSalil Mehta hdev->misc_vector.vector_irq); 2675e2cb1decSSalil Mehta return ret; 2676e2cb1decSSalil Mehta } 2677e2cb1decSSalil Mehta 26781819e409SXi Wang hclgevf_clear_event_cause(hdev, 0); 26791819e409SXi Wang 2680e2cb1decSSalil Mehta /* enable misc. vector(vector 0) */ 2681e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, true); 2682e2cb1decSSalil Mehta 2683e2cb1decSSalil Mehta return ret; 2684e2cb1decSSalil Mehta } 2685e2cb1decSSalil Mehta 2686e2cb1decSSalil Mehta static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2687e2cb1decSSalil Mehta { 2688e2cb1decSSalil Mehta /* disable misc vector(vector 0) */ 2689e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, false); 26901819e409SXi Wang synchronize_irq(hdev->misc_vector.vector_irq); 2691e2cb1decSSalil Mehta free_irq(hdev->misc_vector.vector_irq, hdev); 2692e2cb1decSSalil Mehta hclgevf_free_vector(hdev, 0); 2693e2cb1decSSalil Mehta } 2694e2cb1decSSalil Mehta 2695bb87be87SYonglong Liu static void hclgevf_info_show(struct hclgevf_dev *hdev) 2696bb87be87SYonglong Liu { 2697bb87be87SYonglong Liu struct device *dev = &hdev->pdev->dev; 2698bb87be87SYonglong Liu 2699bb87be87SYonglong Liu dev_info(dev, "VF info begin:\n"); 2700bb87be87SYonglong Liu 2701adcf738bSGuojia Liao dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2702adcf738bSGuojia Liao dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2703adcf738bSGuojia Liao dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2704adcf738bSGuojia Liao dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2705adcf738bSGuojia Liao dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2706adcf738bSGuojia Liao dev_info(dev, "PF media type of this VF: %u\n", 2707bb87be87SYonglong Liu hdev->hw.mac.media_type); 2708bb87be87SYonglong Liu 2709bb87be87SYonglong Liu dev_info(dev, "VF info end.\n"); 2710bb87be87SYonglong Liu } 2711bb87be87SYonglong Liu 27121db58f86SHuazhong Tan static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 27131db58f86SHuazhong Tan struct hnae3_client *client) 27141db58f86SHuazhong Tan { 27151db58f86SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 27164cd5beaaSGuangbin Huang int rst_cnt = hdev->rst_stats.rst_cnt; 27171db58f86SHuazhong Tan int ret; 27181db58f86SHuazhong Tan 27191db58f86SHuazhong Tan ret = client->ops->init_instance(&hdev->nic); 27201db58f86SHuazhong Tan if (ret) 27211db58f86SHuazhong Tan return ret; 27221db58f86SHuazhong Tan 27231db58f86SHuazhong Tan set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 27244cd5beaaSGuangbin Huang if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 27254cd5beaaSGuangbin Huang rst_cnt != hdev->rst_stats.rst_cnt) { 27264cd5beaaSGuangbin Huang clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 27274cd5beaaSGuangbin Huang 27284cd5beaaSGuangbin Huang client->ops->uninit_instance(&hdev->nic, 0); 27294cd5beaaSGuangbin Huang return -EBUSY; 27304cd5beaaSGuangbin Huang } 27314cd5beaaSGuangbin Huang 27321db58f86SHuazhong Tan hnae3_set_client_init_flag(client, ae_dev, 1); 27331db58f86SHuazhong Tan 27341db58f86SHuazhong Tan if (netif_msg_drv(&hdev->nic)) 27351db58f86SHuazhong Tan hclgevf_info_show(hdev); 27361db58f86SHuazhong Tan 27371db58f86SHuazhong Tan return 0; 27381db58f86SHuazhong Tan } 27391db58f86SHuazhong Tan 27401db58f86SHuazhong Tan static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 27411db58f86SHuazhong Tan struct hnae3_client *client) 27421db58f86SHuazhong Tan { 27431db58f86SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 27441db58f86SHuazhong Tan int ret; 27451db58f86SHuazhong Tan 27461db58f86SHuazhong Tan if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 27471db58f86SHuazhong Tan !hdev->nic_client) 27481db58f86SHuazhong Tan return 0; 27491db58f86SHuazhong Tan 27501db58f86SHuazhong Tan ret = hclgevf_init_roce_base_info(hdev); 27511db58f86SHuazhong Tan if (ret) 27521db58f86SHuazhong Tan return ret; 27531db58f86SHuazhong Tan 27541db58f86SHuazhong Tan ret = client->ops->init_instance(&hdev->roce); 27551db58f86SHuazhong Tan if (ret) 27561db58f86SHuazhong Tan return ret; 27571db58f86SHuazhong Tan 27581db58f86SHuazhong Tan hnae3_set_client_init_flag(client, ae_dev, 1); 27591db58f86SHuazhong Tan 27601db58f86SHuazhong Tan return 0; 27611db58f86SHuazhong Tan } 27621db58f86SHuazhong Tan 2763e718a93fSPeng Li static int hclgevf_init_client_instance(struct hnae3_client *client, 2764e718a93fSPeng Li struct hnae3_ae_dev *ae_dev) 2765e2cb1decSSalil Mehta { 2766e718a93fSPeng Li struct hclgevf_dev *hdev = ae_dev->priv; 2767e2cb1decSSalil Mehta int ret; 2768e2cb1decSSalil Mehta 2769e2cb1decSSalil Mehta switch (client->type) { 2770e2cb1decSSalil Mehta case HNAE3_CLIENT_KNIC: 2771e2cb1decSSalil Mehta hdev->nic_client = client; 2772e2cb1decSSalil Mehta hdev->nic.client = client; 2773e2cb1decSSalil Mehta 27741db58f86SHuazhong Tan ret = hclgevf_init_nic_client_instance(ae_dev, client); 2775e2cb1decSSalil Mehta if (ret) 277649dd8054SJian Shen goto clear_nic; 2777e2cb1decSSalil Mehta 27781db58f86SHuazhong Tan ret = hclgevf_init_roce_client_instance(ae_dev, 27791db58f86SHuazhong Tan hdev->roce_client); 2780e2cb1decSSalil Mehta if (ret) 278149dd8054SJian Shen goto clear_roce; 2782d9f28fc2SJian Shen 2783e2cb1decSSalil Mehta break; 2784e2cb1decSSalil Mehta case HNAE3_CLIENT_ROCE: 2785544a7bcdSLijun Ou if (hnae3_dev_roce_supported(hdev)) { 2786e2cb1decSSalil Mehta hdev->roce_client = client; 2787e2cb1decSSalil Mehta hdev->roce.client = client; 2788544a7bcdSLijun Ou } 2789e2cb1decSSalil Mehta 27901db58f86SHuazhong Tan ret = hclgevf_init_roce_client_instance(ae_dev, client); 2791e2cb1decSSalil Mehta if (ret) 279249dd8054SJian Shen goto clear_roce; 2793e2cb1decSSalil Mehta 2794fa7a4bd5SJian Shen break; 2795fa7a4bd5SJian Shen default: 2796fa7a4bd5SJian Shen return -EINVAL; 2797e2cb1decSSalil Mehta } 2798e2cb1decSSalil Mehta 2799e2cb1decSSalil Mehta return 0; 280049dd8054SJian Shen 280149dd8054SJian Shen clear_nic: 280249dd8054SJian Shen hdev->nic_client = NULL; 280349dd8054SJian Shen hdev->nic.client = NULL; 280449dd8054SJian Shen return ret; 280549dd8054SJian Shen clear_roce: 280649dd8054SJian Shen hdev->roce_client = NULL; 280749dd8054SJian Shen hdev->roce.client = NULL; 280849dd8054SJian Shen return ret; 2809e2cb1decSSalil Mehta } 2810e2cb1decSSalil Mehta 2811e718a93fSPeng Li static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2812e718a93fSPeng Li struct hnae3_ae_dev *ae_dev) 2813e2cb1decSSalil Mehta { 2814e718a93fSPeng Li struct hclgevf_dev *hdev = ae_dev->priv; 2815e718a93fSPeng Li 2816e2cb1decSSalil Mehta /* un-init roce, if it exists */ 281749dd8054SJian Shen if (hdev->roce_client) { 2818e2cb1decSSalil Mehta hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 281949dd8054SJian Shen hdev->roce_client = NULL; 282049dd8054SJian Shen hdev->roce.client = NULL; 282149dd8054SJian Shen } 2822e2cb1decSSalil Mehta 2823e2cb1decSSalil Mehta /* un-init nic/unic, if this was not called by roce client */ 282449dd8054SJian Shen if (client->ops->uninit_instance && hdev->nic_client && 282549dd8054SJian Shen client->type != HNAE3_CLIENT_ROCE) { 282625d1817cSHuazhong Tan clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 282725d1817cSHuazhong Tan 2828e2cb1decSSalil Mehta client->ops->uninit_instance(&hdev->nic, 0); 282949dd8054SJian Shen hdev->nic_client = NULL; 283049dd8054SJian Shen hdev->nic.client = NULL; 283149dd8054SJian Shen } 2832e2cb1decSSalil Mehta } 2833e2cb1decSSalil Mehta 2834e2cb1decSSalil Mehta static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2835e2cb1decSSalil Mehta { 2836e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 2837e2cb1decSSalil Mehta struct hclgevf_hw *hw; 2838e2cb1decSSalil Mehta int ret; 2839e2cb1decSSalil Mehta 2840e2cb1decSSalil Mehta ret = pci_enable_device(pdev); 2841e2cb1decSSalil Mehta if (ret) { 2842e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed to enable PCI device\n"); 28433e249d3bSFuyun Liang return ret; 2844e2cb1decSSalil Mehta } 2845e2cb1decSSalil Mehta 2846e2cb1decSSalil Mehta ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2847e2cb1decSSalil Mehta if (ret) { 2848e2cb1decSSalil Mehta dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2849e2cb1decSSalil Mehta goto err_disable_device; 2850e2cb1decSSalil Mehta } 2851e2cb1decSSalil Mehta 2852e2cb1decSSalil Mehta ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2853e2cb1decSSalil Mehta if (ret) { 2854e2cb1decSSalil Mehta dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2855e2cb1decSSalil Mehta goto err_disable_device; 2856e2cb1decSSalil Mehta } 2857e2cb1decSSalil Mehta 2858e2cb1decSSalil Mehta pci_set_master(pdev); 2859e2cb1decSSalil Mehta hw = &hdev->hw; 2860e2cb1decSSalil Mehta hw->hdev = hdev; 28612e1ea493SPeng Li hw->io_base = pci_iomap(pdev, 2, 0); 2862e2cb1decSSalil Mehta if (!hw->io_base) { 2863e2cb1decSSalil Mehta dev_err(&pdev->dev, "can't map configuration register space\n"); 2864e2cb1decSSalil Mehta ret = -ENOMEM; 2865e2cb1decSSalil Mehta goto err_clr_master; 2866e2cb1decSSalil Mehta } 2867e2cb1decSSalil Mehta 2868e2cb1decSSalil Mehta return 0; 2869e2cb1decSSalil Mehta 2870e2cb1decSSalil Mehta err_clr_master: 2871e2cb1decSSalil Mehta pci_clear_master(pdev); 2872e2cb1decSSalil Mehta pci_release_regions(pdev); 2873e2cb1decSSalil Mehta err_disable_device: 2874e2cb1decSSalil Mehta pci_disable_device(pdev); 28753e249d3bSFuyun Liang 2876e2cb1decSSalil Mehta return ret; 2877e2cb1decSSalil Mehta } 2878e2cb1decSSalil Mehta 2879e2cb1decSSalil Mehta static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2880e2cb1decSSalil Mehta { 2881e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 2882e2cb1decSSalil Mehta 2883e2cb1decSSalil Mehta pci_iounmap(pdev, hdev->hw.io_base); 2884e2cb1decSSalil Mehta pci_clear_master(pdev); 2885e2cb1decSSalil Mehta pci_release_regions(pdev); 2886e2cb1decSSalil Mehta pci_disable_device(pdev); 2887e2cb1decSSalil Mehta } 2888e2cb1decSSalil Mehta 288907acf909SJian Shen static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 289007acf909SJian Shen { 289107acf909SJian Shen struct hclgevf_query_res_cmd *req; 289207acf909SJian Shen struct hclgevf_desc desc; 289307acf909SJian Shen int ret; 289407acf909SJian Shen 289507acf909SJian Shen hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 289607acf909SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 289707acf909SJian Shen if (ret) { 289807acf909SJian Shen dev_err(&hdev->pdev->dev, 289907acf909SJian Shen "query vf resource failed, ret = %d.\n", ret); 290007acf909SJian Shen return ret; 290107acf909SJian Shen } 290207acf909SJian Shen 290307acf909SJian Shen req = (struct hclgevf_query_res_cmd *)desc.data; 290407acf909SJian Shen 2905580a05f9SYonglong Liu if (hnae3_dev_roce_supported(hdev)) { 290607acf909SJian Shen hdev->roce_base_msix_offset = 290760df7e91SHuazhong Tan hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 290807acf909SJian Shen HCLGEVF_MSIX_OFT_ROCEE_M, 290907acf909SJian Shen HCLGEVF_MSIX_OFT_ROCEE_S); 291007acf909SJian Shen hdev->num_roce_msix = 291160df7e91SHuazhong Tan hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 291207acf909SJian Shen HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 291307acf909SJian Shen 2914580a05f9SYonglong Liu /* nic's msix numbers is always equals to the roce's. */ 2915580a05f9SYonglong Liu hdev->num_nic_msix = hdev->num_roce_msix; 2916580a05f9SYonglong Liu 291707acf909SJian Shen /* VF should have NIC vectors and Roce vectors, NIC vectors 291807acf909SJian Shen * are queued before Roce vectors. The offset is fixed to 64. 291907acf909SJian Shen */ 292007acf909SJian Shen hdev->num_msi = hdev->num_roce_msix + 292107acf909SJian Shen hdev->roce_base_msix_offset; 292207acf909SJian Shen } else { 292307acf909SJian Shen hdev->num_msi = 292460df7e91SHuazhong Tan hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 292507acf909SJian Shen HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2926580a05f9SYonglong Liu 2927580a05f9SYonglong Liu hdev->num_nic_msix = hdev->num_msi; 2928580a05f9SYonglong Liu } 2929580a05f9SYonglong Liu 2930580a05f9SYonglong Liu if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 2931580a05f9SYonglong Liu dev_err(&hdev->pdev->dev, 2932580a05f9SYonglong Liu "Just %u msi resources, not enough for vf(min:2).\n", 2933580a05f9SYonglong Liu hdev->num_nic_msix); 2934580a05f9SYonglong Liu return -EINVAL; 293507acf909SJian Shen } 293607acf909SJian Shen 293707acf909SJian Shen return 0; 293807acf909SJian Shen } 293907acf909SJian Shen 2940862d969aSHuazhong Tan static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2941862d969aSHuazhong Tan { 2942862d969aSHuazhong Tan struct pci_dev *pdev = hdev->pdev; 2943862d969aSHuazhong Tan int ret = 0; 2944862d969aSHuazhong Tan 2945862d969aSHuazhong Tan if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2946862d969aSHuazhong Tan test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2947862d969aSHuazhong Tan hclgevf_misc_irq_uninit(hdev); 2948862d969aSHuazhong Tan hclgevf_uninit_msi(hdev); 2949862d969aSHuazhong Tan clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2950862d969aSHuazhong Tan } 2951862d969aSHuazhong Tan 2952862d969aSHuazhong Tan if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2953862d969aSHuazhong Tan pci_set_master(pdev); 2954862d969aSHuazhong Tan ret = hclgevf_init_msi(hdev); 2955862d969aSHuazhong Tan if (ret) { 2956862d969aSHuazhong Tan dev_err(&pdev->dev, 2957862d969aSHuazhong Tan "failed(%d) to init MSI/MSI-X\n", ret); 2958862d969aSHuazhong Tan return ret; 2959862d969aSHuazhong Tan } 2960862d969aSHuazhong Tan 2961862d969aSHuazhong Tan ret = hclgevf_misc_irq_init(hdev); 2962862d969aSHuazhong Tan if (ret) { 2963862d969aSHuazhong Tan hclgevf_uninit_msi(hdev); 2964862d969aSHuazhong Tan dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2965862d969aSHuazhong Tan ret); 2966862d969aSHuazhong Tan return ret; 2967862d969aSHuazhong Tan } 2968862d969aSHuazhong Tan 2969862d969aSHuazhong Tan set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2970862d969aSHuazhong Tan } 2971862d969aSHuazhong Tan 2972862d969aSHuazhong Tan return ret; 2973862d969aSHuazhong Tan } 2974862d969aSHuazhong Tan 2975039ba863SJian Shen static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) 2976039ba863SJian Shen { 2977039ba863SJian Shen struct hclge_vf_to_pf_msg send_msg; 2978039ba863SJian Shen 2979039ba863SJian Shen hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, 2980039ba863SJian Shen HCLGE_MBX_VPORT_LIST_CLEAR); 2981039ba863SJian Shen return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2982039ba863SJian Shen } 2983039ba863SJian Shen 29849c6f7085SHuazhong Tan static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2985e2cb1decSSalil Mehta { 29867a01c897SSalil Mehta struct pci_dev *pdev = hdev->pdev; 2987e2cb1decSSalil Mehta int ret; 2988e2cb1decSSalil Mehta 2989862d969aSHuazhong Tan ret = hclgevf_pci_reset(hdev); 2990862d969aSHuazhong Tan if (ret) { 2991862d969aSHuazhong Tan dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2992862d969aSHuazhong Tan return ret; 2993862d969aSHuazhong Tan } 2994862d969aSHuazhong Tan 29959c6f7085SHuazhong Tan ret = hclgevf_cmd_init(hdev); 29969c6f7085SHuazhong Tan if (ret) { 29979c6f7085SHuazhong Tan dev_err(&pdev->dev, "cmd failed %d\n", ret); 29989c6f7085SHuazhong Tan return ret; 29997a01c897SSalil Mehta } 3000e2cb1decSSalil Mehta 30019c6f7085SHuazhong Tan ret = hclgevf_rss_init_hw(hdev); 30029c6f7085SHuazhong Tan if (ret) { 30039c6f7085SHuazhong Tan dev_err(&hdev->pdev->dev, 30049c6f7085SHuazhong Tan "failed(%d) to initialize RSS\n", ret); 30059c6f7085SHuazhong Tan return ret; 30069c6f7085SHuazhong Tan } 30079c6f7085SHuazhong Tan 3008b26a6feaSPeng Li ret = hclgevf_config_gro(hdev, true); 3009b26a6feaSPeng Li if (ret) 3010b26a6feaSPeng Li return ret; 3011b26a6feaSPeng Li 30129c6f7085SHuazhong Tan ret = hclgevf_init_vlan_config(hdev); 30139c6f7085SHuazhong Tan if (ret) { 30149c6f7085SHuazhong Tan dev_err(&hdev->pdev->dev, 30159c6f7085SHuazhong Tan "failed(%d) to initialize VLAN config\n", ret); 30169c6f7085SHuazhong Tan return ret; 30179c6f7085SHuazhong Tan } 30189c6f7085SHuazhong Tan 3019c631c696SJian Shen set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 3020c631c696SJian Shen 30219c6f7085SHuazhong Tan dev_info(&hdev->pdev->dev, "Reset done\n"); 30229c6f7085SHuazhong Tan 30239c6f7085SHuazhong Tan return 0; 30249c6f7085SHuazhong Tan } 30259c6f7085SHuazhong Tan 30269c6f7085SHuazhong Tan static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 30279c6f7085SHuazhong Tan { 30289c6f7085SHuazhong Tan struct pci_dev *pdev = hdev->pdev; 30299c6f7085SHuazhong Tan int ret; 30309c6f7085SHuazhong Tan 3031e2cb1decSSalil Mehta ret = hclgevf_pci_init(hdev); 303260df7e91SHuazhong Tan if (ret) 3033e2cb1decSSalil Mehta return ret; 3034e2cb1decSSalil Mehta 30358b0195a3SHuazhong Tan ret = hclgevf_cmd_queue_init(hdev); 303660df7e91SHuazhong Tan if (ret) 30378b0195a3SHuazhong Tan goto err_cmd_queue_init; 30388b0195a3SHuazhong Tan 3039eddf0462SYunsheng Lin ret = hclgevf_cmd_init(hdev); 3040eddf0462SYunsheng Lin if (ret) 3041eddf0462SYunsheng Lin goto err_cmd_init; 3042eddf0462SYunsheng Lin 304307acf909SJian Shen /* Get vf resource */ 304407acf909SJian Shen ret = hclgevf_query_vf_resource(hdev); 304560df7e91SHuazhong Tan if (ret) 30468b0195a3SHuazhong Tan goto err_cmd_init; 304707acf909SJian Shen 304807acf909SJian Shen ret = hclgevf_init_msi(hdev); 304907acf909SJian Shen if (ret) { 305007acf909SJian Shen dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 30518b0195a3SHuazhong Tan goto err_cmd_init; 305207acf909SJian Shen } 305307acf909SJian Shen 305407acf909SJian Shen hclgevf_state_init(hdev); 3055dea846e8SHuazhong Tan hdev->reset_level = HNAE3_VF_FUNC_RESET; 3056afb6afdbSHuazhong Tan hdev->reset_type = HNAE3_NONE_RESET; 305707acf909SJian Shen 3058e2cb1decSSalil Mehta ret = hclgevf_misc_irq_init(hdev); 305960df7e91SHuazhong Tan if (ret) 3060e2cb1decSSalil Mehta goto err_misc_irq_init; 3061e2cb1decSSalil Mehta 3062862d969aSHuazhong Tan set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3063862d969aSHuazhong Tan 3064e2cb1decSSalil Mehta ret = hclgevf_configure(hdev); 3065e2cb1decSSalil Mehta if (ret) { 3066e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 3067e2cb1decSSalil Mehta goto err_config; 3068e2cb1decSSalil Mehta } 3069e2cb1decSSalil Mehta 3070e2cb1decSSalil Mehta ret = hclgevf_alloc_tqps(hdev); 3071e2cb1decSSalil Mehta if (ret) { 3072e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 3073e2cb1decSSalil Mehta goto err_config; 3074e2cb1decSSalil Mehta } 3075e2cb1decSSalil Mehta 3076e2cb1decSSalil Mehta ret = hclgevf_set_handle_info(hdev); 307760df7e91SHuazhong Tan if (ret) 3078e2cb1decSSalil Mehta goto err_config; 3079e2cb1decSSalil Mehta 3080b26a6feaSPeng Li ret = hclgevf_config_gro(hdev, true); 3081b26a6feaSPeng Li if (ret) 3082b26a6feaSPeng Li goto err_config; 3083b26a6feaSPeng Li 3084e2cb1decSSalil Mehta /* Initialize RSS for this VF */ 3085944de484SGuojia Liao hclgevf_rss_init_cfg(hdev); 3086e2cb1decSSalil Mehta ret = hclgevf_rss_init_hw(hdev); 3087e2cb1decSSalil Mehta if (ret) { 3088e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 3089e2cb1decSSalil Mehta "failed(%d) to initialize RSS\n", ret); 3090e2cb1decSSalil Mehta goto err_config; 3091e2cb1decSSalil Mehta } 3092e2cb1decSSalil Mehta 3093039ba863SJian Shen /* ensure vf tbl list as empty before init*/ 3094039ba863SJian Shen ret = hclgevf_clear_vport_list(hdev); 3095039ba863SJian Shen if (ret) { 3096039ba863SJian Shen dev_err(&pdev->dev, 3097039ba863SJian Shen "failed to clear tbl list configuration, ret = %d.\n", 3098039ba863SJian Shen ret); 3099039ba863SJian Shen goto err_config; 3100039ba863SJian Shen } 3101039ba863SJian Shen 3102e2cb1decSSalil Mehta ret = hclgevf_init_vlan_config(hdev); 3103e2cb1decSSalil Mehta if (ret) { 3104e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 3105e2cb1decSSalil Mehta "failed(%d) to initialize VLAN config\n", ret); 3106e2cb1decSSalil Mehta goto err_config; 3107e2cb1decSSalil Mehta } 3108e2cb1decSSalil Mehta 31090742ed7cSHuazhong Tan hdev->last_reset_time = jiffies; 311008d80a4cSHuazhong Tan dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 311108d80a4cSHuazhong Tan HCLGEVF_DRIVER_NAME); 3112e2cb1decSSalil Mehta 3113ff200099SYunsheng Lin hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 3114ff200099SYunsheng Lin 3115e2cb1decSSalil Mehta return 0; 3116e2cb1decSSalil Mehta 3117e2cb1decSSalil Mehta err_config: 3118e2cb1decSSalil Mehta hclgevf_misc_irq_uninit(hdev); 3119e2cb1decSSalil Mehta err_misc_irq_init: 3120e2cb1decSSalil Mehta hclgevf_state_uninit(hdev); 3121e2cb1decSSalil Mehta hclgevf_uninit_msi(hdev); 312207acf909SJian Shen err_cmd_init: 31238b0195a3SHuazhong Tan hclgevf_cmd_uninit(hdev); 31248b0195a3SHuazhong Tan err_cmd_queue_init: 3125e2cb1decSSalil Mehta hclgevf_pci_uninit(hdev); 3126862d969aSHuazhong Tan clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3127e2cb1decSSalil Mehta return ret; 3128e2cb1decSSalil Mehta } 3129e2cb1decSSalil Mehta 31307a01c897SSalil Mehta static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 3131e2cb1decSSalil Mehta { 3132d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 3133d3410018SYufeng Mo 3134e2cb1decSSalil Mehta hclgevf_state_uninit(hdev); 3135862d969aSHuazhong Tan 3136d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 3137d3410018SYufeng Mo hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 313823b4201dSJian Shen 3139862d969aSHuazhong Tan if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3140eddf0462SYunsheng Lin hclgevf_misc_irq_uninit(hdev); 3141e2cb1decSSalil Mehta hclgevf_uninit_msi(hdev); 31427a01c897SSalil Mehta } 31437a01c897SSalil Mehta 3144e3338205SHuazhong Tan hclgevf_pci_uninit(hdev); 3145862d969aSHuazhong Tan hclgevf_cmd_uninit(hdev); 3146ee4bcd3bSJian Shen hclgevf_uninit_mac_list(hdev); 3147862d969aSHuazhong Tan } 3148862d969aSHuazhong Tan 31497a01c897SSalil Mehta static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 31507a01c897SSalil Mehta { 31517a01c897SSalil Mehta struct pci_dev *pdev = ae_dev->pdev; 31527a01c897SSalil Mehta int ret; 31537a01c897SSalil Mehta 31547a01c897SSalil Mehta ret = hclgevf_alloc_hdev(ae_dev); 31557a01c897SSalil Mehta if (ret) { 31567a01c897SSalil Mehta dev_err(&pdev->dev, "hclge device allocation failed\n"); 31577a01c897SSalil Mehta return ret; 31587a01c897SSalil Mehta } 31597a01c897SSalil Mehta 31607a01c897SSalil Mehta ret = hclgevf_init_hdev(ae_dev->priv); 3161a6d818e3SYunsheng Lin if (ret) { 31627a01c897SSalil Mehta dev_err(&pdev->dev, "hclge device initialization failed\n"); 31637a01c897SSalil Mehta return ret; 31647a01c897SSalil Mehta } 31657a01c897SSalil Mehta 3166a6d818e3SYunsheng Lin return 0; 3167a6d818e3SYunsheng Lin } 3168a6d818e3SYunsheng Lin 31697a01c897SSalil Mehta static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 31707a01c897SSalil Mehta { 31717a01c897SSalil Mehta struct hclgevf_dev *hdev = ae_dev->priv; 31727a01c897SSalil Mehta 31737a01c897SSalil Mehta hclgevf_uninit_hdev(hdev); 3174e2cb1decSSalil Mehta ae_dev->priv = NULL; 3175e2cb1decSSalil Mehta } 3176e2cb1decSSalil Mehta 3177849e4607SPeng Li static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3178849e4607SPeng Li { 3179849e4607SPeng Li struct hnae3_handle *nic = &hdev->nic; 3180849e4607SPeng Li struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3181849e4607SPeng Li 31828be73621SHuazhong Tan return min_t(u32, hdev->rss_size_max, 31838be73621SHuazhong Tan hdev->num_tqps / kinfo->num_tc); 3184849e4607SPeng Li } 3185849e4607SPeng Li 3186849e4607SPeng Li /** 3187849e4607SPeng Li * hclgevf_get_channels - Get the current channels enabled and max supported. 3188849e4607SPeng Li * @handle: hardware information for network interface 3189849e4607SPeng Li * @ch: ethtool channels structure 3190849e4607SPeng Li * 3191849e4607SPeng Li * We don't support separate tx and rx queues as channels. The other count 3192849e4607SPeng Li * represents how many queues are being used for control. max_combined counts 3193849e4607SPeng Li * how many queue pairs we can support. They may not be mapped 1 to 1 with 3194849e4607SPeng Li * q_vectors since we support a lot more queue pairs than q_vectors. 3195849e4607SPeng Li **/ 3196849e4607SPeng Li static void hclgevf_get_channels(struct hnae3_handle *handle, 3197849e4607SPeng Li struct ethtool_channels *ch) 3198849e4607SPeng Li { 3199849e4607SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3200849e4607SPeng Li 3201849e4607SPeng Li ch->max_combined = hclgevf_get_max_channels(hdev); 3202849e4607SPeng Li ch->other_count = 0; 3203849e4607SPeng Li ch->max_other = 0; 32048be73621SHuazhong Tan ch->combined_count = handle->kinfo.rss_size; 3205849e4607SPeng Li } 3206849e4607SPeng Li 3207cc719218SPeng Li static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 32080d43bf45SHuazhong Tan u16 *alloc_tqps, u16 *max_rss_size) 3209cc719218SPeng Li { 3210cc719218SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3211cc719218SPeng Li 32120d43bf45SHuazhong Tan *alloc_tqps = hdev->num_tqps; 3213cc719218SPeng Li *max_rss_size = hdev->rss_size_max; 3214cc719218SPeng Li } 3215cc719218SPeng Li 32164093d1a2SGuangbin Huang static void hclgevf_update_rss_size(struct hnae3_handle *handle, 32174093d1a2SGuangbin Huang u32 new_tqps_num) 32184093d1a2SGuangbin Huang { 32194093d1a2SGuangbin Huang struct hnae3_knic_private_info *kinfo = &handle->kinfo; 32204093d1a2SGuangbin Huang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 32214093d1a2SGuangbin Huang u16 max_rss_size; 32224093d1a2SGuangbin Huang 32234093d1a2SGuangbin Huang kinfo->req_rss_size = new_tqps_num; 32244093d1a2SGuangbin Huang 32254093d1a2SGuangbin Huang max_rss_size = min_t(u16, hdev->rss_size_max, 32264093d1a2SGuangbin Huang hdev->num_tqps / kinfo->num_tc); 32274093d1a2SGuangbin Huang 32284093d1a2SGuangbin Huang /* Use the user's configuration when it is not larger than 32294093d1a2SGuangbin Huang * max_rss_size, otherwise, use the maximum specification value. 32304093d1a2SGuangbin Huang */ 32314093d1a2SGuangbin Huang if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 32324093d1a2SGuangbin Huang kinfo->req_rss_size <= max_rss_size) 32334093d1a2SGuangbin Huang kinfo->rss_size = kinfo->req_rss_size; 32344093d1a2SGuangbin Huang else if (kinfo->rss_size > max_rss_size || 32354093d1a2SGuangbin Huang (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 32364093d1a2SGuangbin Huang kinfo->rss_size = max_rss_size; 32374093d1a2SGuangbin Huang 32384093d1a2SGuangbin Huang kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; 32394093d1a2SGuangbin Huang } 32404093d1a2SGuangbin Huang 32414093d1a2SGuangbin Huang static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 32424093d1a2SGuangbin Huang bool rxfh_configured) 32434093d1a2SGuangbin Huang { 32444093d1a2SGuangbin Huang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 32454093d1a2SGuangbin Huang struct hnae3_knic_private_info *kinfo = &handle->kinfo; 32464093d1a2SGuangbin Huang u16 cur_rss_size = kinfo->rss_size; 32474093d1a2SGuangbin Huang u16 cur_tqps = kinfo->num_tqps; 32484093d1a2SGuangbin Huang u32 *rss_indir; 32494093d1a2SGuangbin Huang unsigned int i; 32504093d1a2SGuangbin Huang int ret; 32514093d1a2SGuangbin Huang 32524093d1a2SGuangbin Huang hclgevf_update_rss_size(handle, new_tqps_num); 32534093d1a2SGuangbin Huang 32544093d1a2SGuangbin Huang ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); 32554093d1a2SGuangbin Huang if (ret) 32564093d1a2SGuangbin Huang return ret; 32574093d1a2SGuangbin Huang 32584093d1a2SGuangbin Huang /* RSS indirection table has been configuared by user */ 32594093d1a2SGuangbin Huang if (rxfh_configured) 32604093d1a2SGuangbin Huang goto out; 32614093d1a2SGuangbin Huang 32624093d1a2SGuangbin Huang /* Reinitializes the rss indirect table according to the new RSS size */ 32634093d1a2SGuangbin Huang rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 32644093d1a2SGuangbin Huang if (!rss_indir) 32654093d1a2SGuangbin Huang return -ENOMEM; 32664093d1a2SGuangbin Huang 32674093d1a2SGuangbin Huang for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 32684093d1a2SGuangbin Huang rss_indir[i] = i % kinfo->rss_size; 32694093d1a2SGuangbin Huang 3270944de484SGuojia Liao hdev->rss_cfg.rss_size = kinfo->rss_size; 3271944de484SGuojia Liao 32724093d1a2SGuangbin Huang ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 32734093d1a2SGuangbin Huang if (ret) 32744093d1a2SGuangbin Huang dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 32754093d1a2SGuangbin Huang ret); 32764093d1a2SGuangbin Huang 32774093d1a2SGuangbin Huang kfree(rss_indir); 32784093d1a2SGuangbin Huang 32794093d1a2SGuangbin Huang out: 32804093d1a2SGuangbin Huang if (!ret) 32814093d1a2SGuangbin Huang dev_info(&hdev->pdev->dev, 32824093d1a2SGuangbin Huang "Channels changed, rss_size from %u to %u, tqps from %u to %u", 32834093d1a2SGuangbin Huang cur_rss_size, kinfo->rss_size, 32844093d1a2SGuangbin Huang cur_tqps, kinfo->rss_size * kinfo->num_tc); 32854093d1a2SGuangbin Huang 32864093d1a2SGuangbin Huang return ret; 32874093d1a2SGuangbin Huang } 32884093d1a2SGuangbin Huang 3289175ec96bSFuyun Liang static int hclgevf_get_status(struct hnae3_handle *handle) 3290175ec96bSFuyun Liang { 3291175ec96bSFuyun Liang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3292175ec96bSFuyun Liang 3293175ec96bSFuyun Liang return hdev->hw.mac.link; 3294175ec96bSFuyun Liang } 3295175ec96bSFuyun Liang 32964a152de9SFuyun Liang static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 32974a152de9SFuyun Liang u8 *auto_neg, u32 *speed, 32984a152de9SFuyun Liang u8 *duplex) 32994a152de9SFuyun Liang { 33004a152de9SFuyun Liang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33014a152de9SFuyun Liang 33024a152de9SFuyun Liang if (speed) 33034a152de9SFuyun Liang *speed = hdev->hw.mac.speed; 33044a152de9SFuyun Liang if (duplex) 33054a152de9SFuyun Liang *duplex = hdev->hw.mac.duplex; 33064a152de9SFuyun Liang if (auto_neg) 33074a152de9SFuyun Liang *auto_neg = AUTONEG_DISABLE; 33084a152de9SFuyun Liang } 33094a152de9SFuyun Liang 33104a152de9SFuyun Liang void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 33114a152de9SFuyun Liang u8 duplex) 33124a152de9SFuyun Liang { 33134a152de9SFuyun Liang hdev->hw.mac.speed = speed; 33144a152de9SFuyun Liang hdev->hw.mac.duplex = duplex; 33154a152de9SFuyun Liang } 33164a152de9SFuyun Liang 33171731be4cSYonglong Liu static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 33185c9f6b39SPeng Li { 33195c9f6b39SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33205c9f6b39SPeng Li 33215c9f6b39SPeng Li return hclgevf_config_gro(hdev, enable); 33225c9f6b39SPeng Li } 33235c9f6b39SPeng Li 332488d10bd6SJian Shen static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 332588d10bd6SJian Shen u8 *module_type) 3326c136b884SPeng Li { 3327c136b884SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 332888d10bd6SJian Shen 3329c136b884SPeng Li if (media_type) 3330c136b884SPeng Li *media_type = hdev->hw.mac.media_type; 333188d10bd6SJian Shen 333288d10bd6SJian Shen if (module_type) 333388d10bd6SJian Shen *module_type = hdev->hw.mac.module_type; 3334c136b884SPeng Li } 3335c136b884SPeng Li 33364d60291bSHuazhong Tan static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 33374d60291bSHuazhong Tan { 33384d60291bSHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33394d60291bSHuazhong Tan 3340aa5c4f17SHuazhong Tan return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 33414d60291bSHuazhong Tan } 33424d60291bSHuazhong Tan 33434d60291bSHuazhong Tan static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 33444d60291bSHuazhong Tan { 33454d60291bSHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33464d60291bSHuazhong Tan 33474d60291bSHuazhong Tan return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 33484d60291bSHuazhong Tan } 33494d60291bSHuazhong Tan 33504d60291bSHuazhong Tan static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 33514d60291bSHuazhong Tan { 33524d60291bSHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33534d60291bSHuazhong Tan 3354c88a6e7dSHuazhong Tan return hdev->rst_stats.hw_rst_done_cnt; 33554d60291bSHuazhong Tan } 33564d60291bSHuazhong Tan 33579194d18bSliuzhongzhu static void hclgevf_get_link_mode(struct hnae3_handle *handle, 33589194d18bSliuzhongzhu unsigned long *supported, 33599194d18bSliuzhongzhu unsigned long *advertising) 33609194d18bSliuzhongzhu { 33619194d18bSliuzhongzhu struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33629194d18bSliuzhongzhu 33639194d18bSliuzhongzhu *supported = hdev->hw.mac.supported; 33649194d18bSliuzhongzhu *advertising = hdev->hw.mac.advertising; 33659194d18bSliuzhongzhu } 33669194d18bSliuzhongzhu 33671600c3e5SJian Shen #define MAX_SEPARATE_NUM 4 33681600c3e5SJian Shen #define SEPARATOR_VALUE 0xFFFFFFFF 33691600c3e5SJian Shen #define REG_NUM_PER_LINE 4 33701600c3e5SJian Shen #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 33711600c3e5SJian Shen 33721600c3e5SJian Shen static int hclgevf_get_regs_len(struct hnae3_handle *handle) 33731600c3e5SJian Shen { 33741600c3e5SJian Shen int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 33751600c3e5SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33761600c3e5SJian Shen 33771600c3e5SJian Shen cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 33781600c3e5SJian Shen common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 33791600c3e5SJian Shen ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 33801600c3e5SJian Shen tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 33811600c3e5SJian Shen 33821600c3e5SJian Shen return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 33831600c3e5SJian Shen tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 33841600c3e5SJian Shen } 33851600c3e5SJian Shen 33861600c3e5SJian Shen static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 33871600c3e5SJian Shen void *data) 33881600c3e5SJian Shen { 33891600c3e5SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33901600c3e5SJian Shen int i, j, reg_um, separator_num; 33911600c3e5SJian Shen u32 *reg = data; 33921600c3e5SJian Shen 33931600c3e5SJian Shen *version = hdev->fw_version; 33941600c3e5SJian Shen 33951600c3e5SJian Shen /* fetching per-VF registers values from VF PCIe register space */ 33961600c3e5SJian Shen reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 33971600c3e5SJian Shen separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 33981600c3e5SJian Shen for (i = 0; i < reg_um; i++) 33991600c3e5SJian Shen *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 34001600c3e5SJian Shen for (i = 0; i < separator_num; i++) 34011600c3e5SJian Shen *reg++ = SEPARATOR_VALUE; 34021600c3e5SJian Shen 34031600c3e5SJian Shen reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 34041600c3e5SJian Shen separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 34051600c3e5SJian Shen for (i = 0; i < reg_um; i++) 34061600c3e5SJian Shen *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 34071600c3e5SJian Shen for (i = 0; i < separator_num; i++) 34081600c3e5SJian Shen *reg++ = SEPARATOR_VALUE; 34091600c3e5SJian Shen 34101600c3e5SJian Shen reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 34111600c3e5SJian Shen separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 34121600c3e5SJian Shen for (j = 0; j < hdev->num_tqps; j++) { 34131600c3e5SJian Shen for (i = 0; i < reg_um; i++) 34141600c3e5SJian Shen *reg++ = hclgevf_read_dev(&hdev->hw, 34151600c3e5SJian Shen ring_reg_addr_list[i] + 34161600c3e5SJian Shen 0x200 * j); 34171600c3e5SJian Shen for (i = 0; i < separator_num; i++) 34181600c3e5SJian Shen *reg++ = SEPARATOR_VALUE; 34191600c3e5SJian Shen } 34201600c3e5SJian Shen 34211600c3e5SJian Shen reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 34221600c3e5SJian Shen separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 34231600c3e5SJian Shen for (j = 0; j < hdev->num_msi_used - 1; j++) { 34241600c3e5SJian Shen for (i = 0; i < reg_um; i++) 34251600c3e5SJian Shen *reg++ = hclgevf_read_dev(&hdev->hw, 34261600c3e5SJian Shen tqp_intr_reg_addr_list[i] + 34271600c3e5SJian Shen 4 * j); 34281600c3e5SJian Shen for (i = 0; i < separator_num; i++) 34291600c3e5SJian Shen *reg++ = SEPARATOR_VALUE; 34301600c3e5SJian Shen } 34311600c3e5SJian Shen } 34321600c3e5SJian Shen 343392f11ea1SJian Shen void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 343492f11ea1SJian Shen u8 *port_base_vlan_info, u8 data_size) 343592f11ea1SJian Shen { 343692f11ea1SJian Shen struct hnae3_handle *nic = &hdev->nic; 3437d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 343892f11ea1SJian Shen 343992f11ea1SJian Shen rtnl_lock(); 344092f11ea1SJian Shen hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 344192f11ea1SJian Shen rtnl_unlock(); 344292f11ea1SJian Shen 344392f11ea1SJian Shen /* send msg to PF and wait update port based vlan info */ 3444d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3445d3410018SYufeng Mo HCLGE_MBX_PORT_BASE_VLAN_CFG); 3446d3410018SYufeng Mo memcpy(send_msg.data, port_base_vlan_info, data_size); 3447d3410018SYufeng Mo hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 344892f11ea1SJian Shen 344992f11ea1SJian Shen if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 345092f11ea1SJian Shen nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; 345192f11ea1SJian Shen else 345292f11ea1SJian Shen nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 345392f11ea1SJian Shen 345492f11ea1SJian Shen rtnl_lock(); 345592f11ea1SJian Shen hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 345692f11ea1SJian Shen rtnl_unlock(); 345792f11ea1SJian Shen } 345892f11ea1SJian Shen 3459e2cb1decSSalil Mehta static const struct hnae3_ae_ops hclgevf_ops = { 3460e2cb1decSSalil Mehta .init_ae_dev = hclgevf_init_ae_dev, 3461e2cb1decSSalil Mehta .uninit_ae_dev = hclgevf_uninit_ae_dev, 34626ff3cf07SHuazhong Tan .flr_prepare = hclgevf_flr_prepare, 34636ff3cf07SHuazhong Tan .flr_done = hclgevf_flr_done, 3464e718a93fSPeng Li .init_client_instance = hclgevf_init_client_instance, 3465e718a93fSPeng Li .uninit_client_instance = hclgevf_uninit_client_instance, 3466e2cb1decSSalil Mehta .start = hclgevf_ae_start, 3467e2cb1decSSalil Mehta .stop = hclgevf_ae_stop, 3468a6d818e3SYunsheng Lin .client_start = hclgevf_client_start, 3469a6d818e3SYunsheng Lin .client_stop = hclgevf_client_stop, 3470e2cb1decSSalil Mehta .map_ring_to_vector = hclgevf_map_ring_to_vector, 3471e2cb1decSSalil Mehta .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3472e2cb1decSSalil Mehta .get_vector = hclgevf_get_vector, 34730d3e6631SYunsheng Lin .put_vector = hclgevf_put_vector, 3474e2cb1decSSalil Mehta .reset_queue = hclgevf_reset_tqp, 3475e2cb1decSSalil Mehta .get_mac_addr = hclgevf_get_mac_addr, 3476e2cb1decSSalil Mehta .set_mac_addr = hclgevf_set_mac_addr, 3477e2cb1decSSalil Mehta .add_uc_addr = hclgevf_add_uc_addr, 3478e2cb1decSSalil Mehta .rm_uc_addr = hclgevf_rm_uc_addr, 3479e2cb1decSSalil Mehta .add_mc_addr = hclgevf_add_mc_addr, 3480e2cb1decSSalil Mehta .rm_mc_addr = hclgevf_rm_mc_addr, 3481e2cb1decSSalil Mehta .get_stats = hclgevf_get_stats, 3482e2cb1decSSalil Mehta .update_stats = hclgevf_update_stats, 3483e2cb1decSSalil Mehta .get_strings = hclgevf_get_strings, 3484e2cb1decSSalil Mehta .get_sset_count = hclgevf_get_sset_count, 3485e2cb1decSSalil Mehta .get_rss_key_size = hclgevf_get_rss_key_size, 3486e2cb1decSSalil Mehta .get_rss_indir_size = hclgevf_get_rss_indir_size, 3487e2cb1decSSalil Mehta .get_rss = hclgevf_get_rss, 3488e2cb1decSSalil Mehta .set_rss = hclgevf_set_rss, 3489d97b3072SJian Shen .get_rss_tuple = hclgevf_get_rss_tuple, 3490d97b3072SJian Shen .set_rss_tuple = hclgevf_set_rss_tuple, 3491e2cb1decSSalil Mehta .get_tc_size = hclgevf_get_tc_size, 3492e2cb1decSSalil Mehta .get_fw_version = hclgevf_get_fw_version, 3493e2cb1decSSalil Mehta .set_vlan_filter = hclgevf_set_vlan_filter, 3494b2641e2aSYunsheng Lin .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 34956d4c3981SSalil Mehta .reset_event = hclgevf_reset_event, 3496720bd583SHuazhong Tan .set_default_reset_request = hclgevf_set_def_reset_request, 34974093d1a2SGuangbin Huang .set_channels = hclgevf_set_channels, 3498849e4607SPeng Li .get_channels = hclgevf_get_channels, 3499cc719218SPeng Li .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 35001600c3e5SJian Shen .get_regs_len = hclgevf_get_regs_len, 35011600c3e5SJian Shen .get_regs = hclgevf_get_regs, 3502175ec96bSFuyun Liang .get_status = hclgevf_get_status, 35034a152de9SFuyun Liang .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3504c136b884SPeng Li .get_media_type = hclgevf_get_media_type, 35054d60291bSHuazhong Tan .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 35064d60291bSHuazhong Tan .ae_dev_resetting = hclgevf_ae_dev_resetting, 35074d60291bSHuazhong Tan .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 35085c9f6b39SPeng Li .set_gro_en = hclgevf_gro_en, 3509818f1675SYunsheng Lin .set_mtu = hclgevf_set_mtu, 35100c29d191Sliuzhongzhu .get_global_queue_id = hclgevf_get_qid_global, 35118cdb992fSJian Shen .set_timer_task = hclgevf_set_timer_task, 35129194d18bSliuzhongzhu .get_link_mode = hclgevf_get_link_mode, 3513e196ec75SJian Shen .set_promisc_mode = hclgevf_set_promisc_mode, 3514c631c696SJian Shen .request_update_promisc_mode = hclgevf_request_update_promisc_mode, 3515e2cb1decSSalil Mehta }; 3516e2cb1decSSalil Mehta 3517e2cb1decSSalil Mehta static struct hnae3_ae_algo ae_algovf = { 3518e2cb1decSSalil Mehta .ops = &hclgevf_ops, 3519e2cb1decSSalil Mehta .pdev_id_table = ae_algovf_pci_tbl, 3520e2cb1decSSalil Mehta }; 3521e2cb1decSSalil Mehta 3522e2cb1decSSalil Mehta static int hclgevf_init(void) 3523e2cb1decSSalil Mehta { 3524e2cb1decSSalil Mehta pr_info("%s is initializing\n", HCLGEVF_NAME); 3525e2cb1decSSalil Mehta 352616deaef2SYunsheng Lin hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME); 35270ea68902SYunsheng Lin if (!hclgevf_wq) { 35280ea68902SYunsheng Lin pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 35290ea68902SYunsheng Lin return -ENOMEM; 35300ea68902SYunsheng Lin } 35310ea68902SYunsheng Lin 3532854cf33aSFuyun Liang hnae3_register_ae_algo(&ae_algovf); 3533854cf33aSFuyun Liang 3534854cf33aSFuyun Liang return 0; 3535e2cb1decSSalil Mehta } 3536e2cb1decSSalil Mehta 3537e2cb1decSSalil Mehta static void hclgevf_exit(void) 3538e2cb1decSSalil Mehta { 3539e2cb1decSSalil Mehta hnae3_unregister_ae_algo(&ae_algovf); 35400ea68902SYunsheng Lin destroy_workqueue(hclgevf_wq); 3541e2cb1decSSalil Mehta } 3542e2cb1decSSalil Mehta module_init(hclgevf_init); 3543e2cb1decSSalil Mehta module_exit(hclgevf_exit); 3544e2cb1decSSalil Mehta 3545e2cb1decSSalil Mehta MODULE_LICENSE("GPL"); 3546e2cb1decSSalil Mehta MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3547e2cb1decSSalil Mehta MODULE_DESCRIPTION("HCLGEVF Driver"); 3548e2cb1decSSalil Mehta MODULE_VERSION(HCLGEVF_MOD_VERSION); 3549