1e2cb1decSSalil Mehta // SPDX-License-Identifier: GPL-2.0+ 2e2cb1decSSalil Mehta // Copyright (c) 2016-2017 Hisilicon Limited. 3e2cb1decSSalil Mehta 4e2cb1decSSalil Mehta #include <linux/etherdevice.h> 5aa5c4f17SHuazhong Tan #include <linux/iopoll.h> 66988eb2aSSalil Mehta #include <net/rtnetlink.h> 7e2cb1decSSalil Mehta #include "hclgevf_cmd.h" 8e2cb1decSSalil Mehta #include "hclgevf_main.h" 9e2cb1decSSalil Mehta #include "hclge_mbx.h" 10e2cb1decSSalil Mehta #include "hnae3.h" 11e2cb1decSSalil Mehta 12e2cb1decSSalil Mehta #define HCLGEVF_NAME "hclgevf" 13e2cb1decSSalil Mehta 14bbe6540eSHuazhong Tan #define HCLGEVF_RESET_MAX_FAIL_CNT 5 15bbe6540eSHuazhong Tan 169c6f7085SHuazhong Tan static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 17e2cb1decSSalil Mehta static struct hnae3_ae_algo ae_algovf; 18e2cb1decSSalil Mehta 190ea68902SYunsheng Lin static struct workqueue_struct *hclgevf_wq; 200ea68902SYunsheng Lin 21e2cb1decSSalil Mehta static const struct pci_device_id ae_algovf_pci_tbl[] = { 22e2cb1decSSalil Mehta {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 23e2cb1decSSalil Mehta {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 24e2cb1decSSalil Mehta /* required last entry */ 25e2cb1decSSalil Mehta {0, } 26e2cb1decSSalil Mehta }; 27e2cb1decSSalil Mehta 28472d7eceSJian Shen static const u8 hclgevf_hash_key[] = { 29472d7eceSJian Shen 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 30472d7eceSJian Shen 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 31472d7eceSJian Shen 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 32472d7eceSJian Shen 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 33472d7eceSJian Shen 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 34472d7eceSJian Shen }; 35472d7eceSJian Shen 362f550a46SYunsheng Lin MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 372f550a46SYunsheng Lin 381600c3e5SJian Shen static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 391600c3e5SJian Shen HCLGEVF_CMDQ_TX_ADDR_H_REG, 401600c3e5SJian Shen HCLGEVF_CMDQ_TX_DEPTH_REG, 411600c3e5SJian Shen HCLGEVF_CMDQ_TX_TAIL_REG, 421600c3e5SJian Shen HCLGEVF_CMDQ_TX_HEAD_REG, 431600c3e5SJian Shen HCLGEVF_CMDQ_RX_ADDR_L_REG, 441600c3e5SJian Shen HCLGEVF_CMDQ_RX_ADDR_H_REG, 451600c3e5SJian Shen HCLGEVF_CMDQ_RX_DEPTH_REG, 461600c3e5SJian Shen HCLGEVF_CMDQ_RX_TAIL_REG, 471600c3e5SJian Shen HCLGEVF_CMDQ_RX_HEAD_REG, 481600c3e5SJian Shen HCLGEVF_VECTOR0_CMDQ_SRC_REG, 499cee2e8dSHuazhong Tan HCLGEVF_VECTOR0_CMDQ_STATE_REG, 501600c3e5SJian Shen HCLGEVF_CMDQ_INTR_EN_REG, 511600c3e5SJian Shen HCLGEVF_CMDQ_INTR_GEN_REG}; 521600c3e5SJian Shen 531600c3e5SJian Shen static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 541600c3e5SJian Shen HCLGEVF_RST_ING, 551600c3e5SJian Shen HCLGEVF_GRO_EN_REG}; 561600c3e5SJian Shen 571600c3e5SJian Shen static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 581600c3e5SJian Shen HCLGEVF_RING_RX_ADDR_H_REG, 591600c3e5SJian Shen HCLGEVF_RING_RX_BD_NUM_REG, 601600c3e5SJian Shen HCLGEVF_RING_RX_BD_LENGTH_REG, 611600c3e5SJian Shen HCLGEVF_RING_RX_MERGE_EN_REG, 621600c3e5SJian Shen HCLGEVF_RING_RX_TAIL_REG, 631600c3e5SJian Shen HCLGEVF_RING_RX_HEAD_REG, 641600c3e5SJian Shen HCLGEVF_RING_RX_FBD_NUM_REG, 651600c3e5SJian Shen HCLGEVF_RING_RX_OFFSET_REG, 661600c3e5SJian Shen HCLGEVF_RING_RX_FBD_OFFSET_REG, 671600c3e5SJian Shen HCLGEVF_RING_RX_STASH_REG, 681600c3e5SJian Shen HCLGEVF_RING_RX_BD_ERR_REG, 691600c3e5SJian Shen HCLGEVF_RING_TX_ADDR_L_REG, 701600c3e5SJian Shen HCLGEVF_RING_TX_ADDR_H_REG, 711600c3e5SJian Shen HCLGEVF_RING_TX_BD_NUM_REG, 721600c3e5SJian Shen HCLGEVF_RING_TX_PRIORITY_REG, 731600c3e5SJian Shen HCLGEVF_RING_TX_TC_REG, 741600c3e5SJian Shen HCLGEVF_RING_TX_MERGE_EN_REG, 751600c3e5SJian Shen HCLGEVF_RING_TX_TAIL_REG, 761600c3e5SJian Shen HCLGEVF_RING_TX_HEAD_REG, 771600c3e5SJian Shen HCLGEVF_RING_TX_FBD_NUM_REG, 781600c3e5SJian Shen HCLGEVF_RING_TX_OFFSET_REG, 791600c3e5SJian Shen HCLGEVF_RING_TX_EBD_NUM_REG, 801600c3e5SJian Shen HCLGEVF_RING_TX_EBD_OFFSET_REG, 811600c3e5SJian Shen HCLGEVF_RING_TX_BD_ERR_REG, 821600c3e5SJian Shen HCLGEVF_RING_EN_REG}; 831600c3e5SJian Shen 841600c3e5SJian Shen static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 851600c3e5SJian Shen HCLGEVF_TQP_INTR_GL0_REG, 861600c3e5SJian Shen HCLGEVF_TQP_INTR_GL1_REG, 871600c3e5SJian Shen HCLGEVF_TQP_INTR_GL2_REG, 881600c3e5SJian Shen HCLGEVF_TQP_INTR_RL_REG}; 891600c3e5SJian Shen 909b2f3477SWeihang Li static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 91e2cb1decSSalil Mehta { 92eed9535fSPeng Li if (!handle->client) 93eed9535fSPeng Li return container_of(handle, struct hclgevf_dev, nic); 94eed9535fSPeng Li else if (handle->client->type == HNAE3_CLIENT_ROCE) 95eed9535fSPeng Li return container_of(handle, struct hclgevf_dev, roce); 96eed9535fSPeng Li else 97e2cb1decSSalil Mehta return container_of(handle, struct hclgevf_dev, nic); 98e2cb1decSSalil Mehta } 99e2cb1decSSalil Mehta 100e2cb1decSSalil Mehta static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 101e2cb1decSSalil Mehta { 102b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 103e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 104e2cb1decSSalil Mehta struct hclgevf_desc desc; 105e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 106e2cb1decSSalil Mehta int status; 107e2cb1decSSalil Mehta int i; 108e2cb1decSSalil Mehta 109b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 110b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 111e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, 112e2cb1decSSalil Mehta HCLGEVF_OPC_QUERY_RX_STATUS, 113e2cb1decSSalil Mehta true); 114e2cb1decSSalil Mehta 115e2cb1decSSalil Mehta desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 116e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 117e2cb1decSSalil Mehta if (status) { 118e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 119e2cb1decSSalil Mehta "Query tqp stat fail, status = %d,queue = %d\n", 120e2cb1decSSalil Mehta status, i); 121e2cb1decSSalil Mehta return status; 122e2cb1decSSalil Mehta } 123e2cb1decSSalil Mehta tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 124cf72fa63SJian Shen le32_to_cpu(desc.data[1]); 125e2cb1decSSalil Mehta 126e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 127e2cb1decSSalil Mehta true); 128e2cb1decSSalil Mehta 129e2cb1decSSalil Mehta desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 130e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 131e2cb1decSSalil Mehta if (status) { 132e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 133e2cb1decSSalil Mehta "Query tqp stat fail, status = %d,queue = %d\n", 134e2cb1decSSalil Mehta status, i); 135e2cb1decSSalil Mehta return status; 136e2cb1decSSalil Mehta } 137e2cb1decSSalil Mehta tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 138cf72fa63SJian Shen le32_to_cpu(desc.data[1]); 139e2cb1decSSalil Mehta } 140e2cb1decSSalil Mehta 141e2cb1decSSalil Mehta return 0; 142e2cb1decSSalil Mehta } 143e2cb1decSSalil Mehta 144e2cb1decSSalil Mehta static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 145e2cb1decSSalil Mehta { 146e2cb1decSSalil Mehta struct hnae3_knic_private_info *kinfo = &handle->kinfo; 147e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 148e2cb1decSSalil Mehta u64 *buff = data; 149e2cb1decSSalil Mehta int i; 150e2cb1decSSalil Mehta 151b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 152b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 153e2cb1decSSalil Mehta *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 154e2cb1decSSalil Mehta } 155e2cb1decSSalil Mehta for (i = 0; i < kinfo->num_tqps; i++) { 156b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 157e2cb1decSSalil Mehta *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 158e2cb1decSSalil Mehta } 159e2cb1decSSalil Mehta 160e2cb1decSSalil Mehta return buff; 161e2cb1decSSalil Mehta } 162e2cb1decSSalil Mehta 163e2cb1decSSalil Mehta static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 164e2cb1decSSalil Mehta { 165b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 166e2cb1decSSalil Mehta 167b4f1d303SJian Shen return kinfo->num_tqps * 2; 168e2cb1decSSalil Mehta } 169e2cb1decSSalil Mehta 170e2cb1decSSalil Mehta static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 171e2cb1decSSalil Mehta { 172b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 173e2cb1decSSalil Mehta u8 *buff = data; 174e2cb1decSSalil Mehta int i = 0; 175e2cb1decSSalil Mehta 176b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 177b4f1d303SJian Shen struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 178e2cb1decSSalil Mehta struct hclgevf_tqp, q); 1790c218123SJian Shen snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 180e2cb1decSSalil Mehta tqp->index); 181e2cb1decSSalil Mehta buff += ETH_GSTRING_LEN; 182e2cb1decSSalil Mehta } 183e2cb1decSSalil Mehta 184b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 185b4f1d303SJian Shen struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 186e2cb1decSSalil Mehta struct hclgevf_tqp, q); 1870c218123SJian Shen snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 188e2cb1decSSalil Mehta tqp->index); 189e2cb1decSSalil Mehta buff += ETH_GSTRING_LEN; 190e2cb1decSSalil Mehta } 191e2cb1decSSalil Mehta 192e2cb1decSSalil Mehta return buff; 193e2cb1decSSalil Mehta } 194e2cb1decSSalil Mehta 195e2cb1decSSalil Mehta static void hclgevf_update_stats(struct hnae3_handle *handle, 196e2cb1decSSalil Mehta struct net_device_stats *net_stats) 197e2cb1decSSalil Mehta { 198e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 199e2cb1decSSalil Mehta int status; 200e2cb1decSSalil Mehta 201e2cb1decSSalil Mehta status = hclgevf_tqps_update_stats(handle); 202e2cb1decSSalil Mehta if (status) 203e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 204e2cb1decSSalil Mehta "VF update of TQPS stats fail, status = %d.\n", 205e2cb1decSSalil Mehta status); 206e2cb1decSSalil Mehta } 207e2cb1decSSalil Mehta 208e2cb1decSSalil Mehta static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 209e2cb1decSSalil Mehta { 210e2cb1decSSalil Mehta if (strset == ETH_SS_TEST) 211e2cb1decSSalil Mehta return -EOPNOTSUPP; 212e2cb1decSSalil Mehta else if (strset == ETH_SS_STATS) 213e2cb1decSSalil Mehta return hclgevf_tqps_get_sset_count(handle, strset); 214e2cb1decSSalil Mehta 215e2cb1decSSalil Mehta return 0; 216e2cb1decSSalil Mehta } 217e2cb1decSSalil Mehta 218e2cb1decSSalil Mehta static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 219e2cb1decSSalil Mehta u8 *data) 220e2cb1decSSalil Mehta { 221e2cb1decSSalil Mehta u8 *p = (char *)data; 222e2cb1decSSalil Mehta 223e2cb1decSSalil Mehta if (strset == ETH_SS_STATS) 224e2cb1decSSalil Mehta p = hclgevf_tqps_get_strings(handle, p); 225e2cb1decSSalil Mehta } 226e2cb1decSSalil Mehta 227e2cb1decSSalil Mehta static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 228e2cb1decSSalil Mehta { 229e2cb1decSSalil Mehta hclgevf_tqps_get_stats(handle, data); 230e2cb1decSSalil Mehta } 231e2cb1decSSalil Mehta 232d3410018SYufeng Mo static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 233d3410018SYufeng Mo u8 subcode) 234d3410018SYufeng Mo { 235d3410018SYufeng Mo if (msg) { 236d3410018SYufeng Mo memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 237d3410018SYufeng Mo msg->code = code; 238d3410018SYufeng Mo msg->subcode = subcode; 239d3410018SYufeng Mo } 240d3410018SYufeng Mo } 241d3410018SYufeng Mo 242e2cb1decSSalil Mehta static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 243e2cb1decSSalil Mehta { 244d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 245e2cb1decSSalil Mehta u8 resp_msg; 246e2cb1decSSalil Mehta int status; 247e2cb1decSSalil Mehta 248d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0); 249d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 250d3410018SYufeng Mo sizeof(resp_msg)); 251e2cb1decSSalil Mehta if (status) { 252e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 253e2cb1decSSalil Mehta "VF request to get TC info from PF failed %d", 254e2cb1decSSalil Mehta status); 255e2cb1decSSalil Mehta return status; 256e2cb1decSSalil Mehta } 257e2cb1decSSalil Mehta 258e2cb1decSSalil Mehta hdev->hw_tc_map = resp_msg; 259e2cb1decSSalil Mehta 260e2cb1decSSalil Mehta return 0; 261e2cb1decSSalil Mehta } 262e2cb1decSSalil Mehta 26392f11ea1SJian Shen static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 26492f11ea1SJian Shen { 26592f11ea1SJian Shen struct hnae3_handle *nic = &hdev->nic; 266d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 26792f11ea1SJian Shen u8 resp_msg; 26892f11ea1SJian Shen int ret; 26992f11ea1SJian Shen 270d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 271d3410018SYufeng Mo HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 272d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 273d3410018SYufeng Mo sizeof(u8)); 27492f11ea1SJian Shen if (ret) { 27592f11ea1SJian Shen dev_err(&hdev->pdev->dev, 27692f11ea1SJian Shen "VF request to get port based vlan state failed %d", 27792f11ea1SJian Shen ret); 27892f11ea1SJian Shen return ret; 27992f11ea1SJian Shen } 28092f11ea1SJian Shen 28192f11ea1SJian Shen nic->port_base_vlan_state = resp_msg; 28292f11ea1SJian Shen 28392f11ea1SJian Shen return 0; 28492f11ea1SJian Shen } 28592f11ea1SJian Shen 2866cee6fc3SJian Shen static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 287e2cb1decSSalil Mehta { 288c0425944SPeng Li #define HCLGEVF_TQPS_RSS_INFO_LEN 6 289d3410018SYufeng Mo #define HCLGEVF_TQPS_ALLOC_OFFSET 0 290d3410018SYufeng Mo #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 291d3410018SYufeng Mo #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 292d3410018SYufeng Mo 293e2cb1decSSalil Mehta u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 294d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 295e2cb1decSSalil Mehta int status; 296e2cb1decSSalil Mehta 297d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 298d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 299e2cb1decSSalil Mehta HCLGEVF_TQPS_RSS_INFO_LEN); 300e2cb1decSSalil Mehta if (status) { 301e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 302e2cb1decSSalil Mehta "VF request to get tqp info from PF failed %d", 303e2cb1decSSalil Mehta status); 304e2cb1decSSalil Mehta return status; 305e2cb1decSSalil Mehta } 306e2cb1decSSalil Mehta 307d3410018SYufeng Mo memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], 308d3410018SYufeng Mo sizeof(u16)); 309d3410018SYufeng Mo memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], 310d3410018SYufeng Mo sizeof(u16)); 311d3410018SYufeng Mo memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], 312d3410018SYufeng Mo sizeof(u16)); 313c0425944SPeng Li 314c0425944SPeng Li return 0; 315c0425944SPeng Li } 316c0425944SPeng Li 317c0425944SPeng Li static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 318c0425944SPeng Li { 319c0425944SPeng Li #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 320d3410018SYufeng Mo #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 321d3410018SYufeng Mo #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 322d3410018SYufeng Mo 323c0425944SPeng Li u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 324d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 325c0425944SPeng Li int ret; 326c0425944SPeng Li 327d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 328d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 329c0425944SPeng Li HCLGEVF_TQPS_DEPTH_INFO_LEN); 330c0425944SPeng Li if (ret) { 331c0425944SPeng Li dev_err(&hdev->pdev->dev, 332c0425944SPeng Li "VF request to get tqp depth info from PF failed %d", 333c0425944SPeng Li ret); 334c0425944SPeng Li return ret; 335c0425944SPeng Li } 336c0425944SPeng Li 337d3410018SYufeng Mo memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], 338d3410018SYufeng Mo sizeof(u16)); 339d3410018SYufeng Mo memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], 340d3410018SYufeng Mo sizeof(u16)); 341e2cb1decSSalil Mehta 342e2cb1decSSalil Mehta return 0; 343e2cb1decSSalil Mehta } 344e2cb1decSSalil Mehta 3450c29d191Sliuzhongzhu static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 3460c29d191Sliuzhongzhu { 3470c29d191Sliuzhongzhu struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 348d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 3490c29d191Sliuzhongzhu u16 qid_in_pf = 0; 350d3410018SYufeng Mo u8 resp_data[2]; 3510c29d191Sliuzhongzhu int ret; 3520c29d191Sliuzhongzhu 353d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 354d3410018SYufeng Mo memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 355d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 35663cbf7a9SYufeng Mo sizeof(resp_data)); 3570c29d191Sliuzhongzhu if (!ret) 3580c29d191Sliuzhongzhu qid_in_pf = *(u16 *)resp_data; 3590c29d191Sliuzhongzhu 3600c29d191Sliuzhongzhu return qid_in_pf; 3610c29d191Sliuzhongzhu } 3620c29d191Sliuzhongzhu 3639c3e7130Sliuzhongzhu static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 3649c3e7130Sliuzhongzhu { 365d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 36688d10bd6SJian Shen u8 resp_msg[2]; 3679c3e7130Sliuzhongzhu int ret; 3689c3e7130Sliuzhongzhu 369d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 370d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 371d3410018SYufeng Mo sizeof(resp_msg)); 3729c3e7130Sliuzhongzhu if (ret) { 3739c3e7130Sliuzhongzhu dev_err(&hdev->pdev->dev, 3749c3e7130Sliuzhongzhu "VF request to get the pf port media type failed %d", 3759c3e7130Sliuzhongzhu ret); 3769c3e7130Sliuzhongzhu return ret; 3779c3e7130Sliuzhongzhu } 3789c3e7130Sliuzhongzhu 37988d10bd6SJian Shen hdev->hw.mac.media_type = resp_msg[0]; 38088d10bd6SJian Shen hdev->hw.mac.module_type = resp_msg[1]; 3819c3e7130Sliuzhongzhu 3829c3e7130Sliuzhongzhu return 0; 3839c3e7130Sliuzhongzhu } 3849c3e7130Sliuzhongzhu 385e2cb1decSSalil Mehta static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 386e2cb1decSSalil Mehta { 387e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 388e2cb1decSSalil Mehta int i; 389e2cb1decSSalil Mehta 390e2cb1decSSalil Mehta hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 391e2cb1decSSalil Mehta sizeof(struct hclgevf_tqp), GFP_KERNEL); 392e2cb1decSSalil Mehta if (!hdev->htqp) 393e2cb1decSSalil Mehta return -ENOMEM; 394e2cb1decSSalil Mehta 395e2cb1decSSalil Mehta tqp = hdev->htqp; 396e2cb1decSSalil Mehta 397e2cb1decSSalil Mehta for (i = 0; i < hdev->num_tqps; i++) { 398e2cb1decSSalil Mehta tqp->dev = &hdev->pdev->dev; 399e2cb1decSSalil Mehta tqp->index = i; 400e2cb1decSSalil Mehta 401e2cb1decSSalil Mehta tqp->q.ae_algo = &ae_algovf; 402e2cb1decSSalil Mehta tqp->q.buf_size = hdev->rx_buf_len; 403c0425944SPeng Li tqp->q.tx_desc_num = hdev->num_tx_desc; 404c0425944SPeng Li tqp->q.rx_desc_num = hdev->num_rx_desc; 405e2cb1decSSalil Mehta tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 406e2cb1decSSalil Mehta i * HCLGEVF_TQP_REG_SIZE; 407e2cb1decSSalil Mehta 408e2cb1decSSalil Mehta tqp++; 409e2cb1decSSalil Mehta } 410e2cb1decSSalil Mehta 411e2cb1decSSalil Mehta return 0; 412e2cb1decSSalil Mehta } 413e2cb1decSSalil Mehta 414e2cb1decSSalil Mehta static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 415e2cb1decSSalil Mehta { 416e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 417e2cb1decSSalil Mehta struct hnae3_knic_private_info *kinfo; 418e2cb1decSSalil Mehta u16 new_tqps = hdev->num_tqps; 419ebaf1908SWeihang Li unsigned int i; 420e2cb1decSSalil Mehta 421e2cb1decSSalil Mehta kinfo = &nic->kinfo; 422e2cb1decSSalil Mehta kinfo->num_tc = 0; 423c0425944SPeng Li kinfo->num_tx_desc = hdev->num_tx_desc; 424c0425944SPeng Li kinfo->num_rx_desc = hdev->num_rx_desc; 425e2cb1decSSalil Mehta kinfo->rx_buf_len = hdev->rx_buf_len; 426e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 427e2cb1decSSalil Mehta if (hdev->hw_tc_map & BIT(i)) 428e2cb1decSSalil Mehta kinfo->num_tc++; 429e2cb1decSSalil Mehta 430e2cb1decSSalil Mehta kinfo->rss_size 431e2cb1decSSalil Mehta = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 432e2cb1decSSalil Mehta new_tqps = kinfo->rss_size * kinfo->num_tc; 433e2cb1decSSalil Mehta kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 434e2cb1decSSalil Mehta 435e2cb1decSSalil Mehta kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 436e2cb1decSSalil Mehta sizeof(struct hnae3_queue *), GFP_KERNEL); 437e2cb1decSSalil Mehta if (!kinfo->tqp) 438e2cb1decSSalil Mehta return -ENOMEM; 439e2cb1decSSalil Mehta 440e2cb1decSSalil Mehta for (i = 0; i < kinfo->num_tqps; i++) { 441e2cb1decSSalil Mehta hdev->htqp[i].q.handle = &hdev->nic; 442e2cb1decSSalil Mehta hdev->htqp[i].q.tqp_index = i; 443e2cb1decSSalil Mehta kinfo->tqp[i] = &hdev->htqp[i].q; 444e2cb1decSSalil Mehta } 445e2cb1decSSalil Mehta 446580a05f9SYonglong Liu /* after init the max rss_size and tqps, adjust the default tqp numbers 447580a05f9SYonglong Liu * and rss size with the actual vector numbers 448580a05f9SYonglong Liu */ 449580a05f9SYonglong Liu kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 450580a05f9SYonglong Liu kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc, 451580a05f9SYonglong Liu kinfo->rss_size); 452580a05f9SYonglong Liu 453e2cb1decSSalil Mehta return 0; 454e2cb1decSSalil Mehta } 455e2cb1decSSalil Mehta 456e2cb1decSSalil Mehta static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 457e2cb1decSSalil Mehta { 458d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 459e2cb1decSSalil Mehta int status; 460e2cb1decSSalil Mehta 461d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 462d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 463e2cb1decSSalil Mehta if (status) 464e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 465e2cb1decSSalil Mehta "VF failed to fetch link status(%d) from PF", status); 466e2cb1decSSalil Mehta } 467e2cb1decSSalil Mehta 468e2cb1decSSalil Mehta void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 469e2cb1decSSalil Mehta { 47045e92b7eSPeng Li struct hnae3_handle *rhandle = &hdev->roce; 471e2cb1decSSalil Mehta struct hnae3_handle *handle = &hdev->nic; 47245e92b7eSPeng Li struct hnae3_client *rclient; 473e2cb1decSSalil Mehta struct hnae3_client *client; 474e2cb1decSSalil Mehta 475ff200099SYunsheng Lin if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 476ff200099SYunsheng Lin return; 477ff200099SYunsheng Lin 478e2cb1decSSalil Mehta client = handle->client; 47945e92b7eSPeng Li rclient = hdev->roce_client; 480e2cb1decSSalil Mehta 481582d37bbSPeng Li link_state = 482582d37bbSPeng Li test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 483582d37bbSPeng Li 484e2cb1decSSalil Mehta if (link_state != hdev->hw.mac.link) { 485e2cb1decSSalil Mehta client->ops->link_status_change(handle, !!link_state); 48645e92b7eSPeng Li if (rclient && rclient->ops->link_status_change) 48745e92b7eSPeng Li rclient->ops->link_status_change(rhandle, !!link_state); 488e2cb1decSSalil Mehta hdev->hw.mac.link = link_state; 489e2cb1decSSalil Mehta } 490ff200099SYunsheng Lin 491ff200099SYunsheng Lin clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 492e2cb1decSSalil Mehta } 493e2cb1decSSalil Mehta 494538abaf3SYueHaibing static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 4959194d18bSliuzhongzhu { 4969194d18bSliuzhongzhu #define HCLGEVF_ADVERTISING 0 4979194d18bSliuzhongzhu #define HCLGEVF_SUPPORTED 1 4989194d18bSliuzhongzhu 499d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 500d3410018SYufeng Mo 501d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 502d3410018SYufeng Mo send_msg.data[0] = HCLGEVF_ADVERTISING; 503d3410018SYufeng Mo hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 504d3410018SYufeng Mo send_msg.data[0] = HCLGEVF_SUPPORTED; 505d3410018SYufeng Mo hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 5069194d18bSliuzhongzhu } 5079194d18bSliuzhongzhu 508e2cb1decSSalil Mehta static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 509e2cb1decSSalil Mehta { 510e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 511e2cb1decSSalil Mehta int ret; 512e2cb1decSSalil Mehta 513e2cb1decSSalil Mehta nic->ae_algo = &ae_algovf; 514e2cb1decSSalil Mehta nic->pdev = hdev->pdev; 515e2cb1decSSalil Mehta nic->numa_node_mask = hdev->numa_node_mask; 516424eb834SSalil Mehta nic->flags |= HNAE3_SUPPORT_VF; 517e2cb1decSSalil Mehta 518e2cb1decSSalil Mehta ret = hclgevf_knic_setup(hdev); 519e2cb1decSSalil Mehta if (ret) 520e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 521e2cb1decSSalil Mehta ret); 522e2cb1decSSalil Mehta return ret; 523e2cb1decSSalil Mehta } 524e2cb1decSSalil Mehta 525e2cb1decSSalil Mehta static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 526e2cb1decSSalil Mehta { 52736cbbdf6SPeng Li if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 52836cbbdf6SPeng Li dev_warn(&hdev->pdev->dev, 52936cbbdf6SPeng Li "vector(vector_id %d) has been freed.\n", vector_id); 53036cbbdf6SPeng Li return; 53136cbbdf6SPeng Li } 53236cbbdf6SPeng Li 533e2cb1decSSalil Mehta hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 534e2cb1decSSalil Mehta hdev->num_msi_left += 1; 535e2cb1decSSalil Mehta hdev->num_msi_used -= 1; 536e2cb1decSSalil Mehta } 537e2cb1decSSalil Mehta 538e2cb1decSSalil Mehta static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 539e2cb1decSSalil Mehta struct hnae3_vector_info *vector_info) 540e2cb1decSSalil Mehta { 541e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 542e2cb1decSSalil Mehta struct hnae3_vector_info *vector = vector_info; 543e2cb1decSSalil Mehta int alloc = 0; 544e2cb1decSSalil Mehta int i, j; 545e2cb1decSSalil Mehta 546580a05f9SYonglong Liu vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 547e2cb1decSSalil Mehta vector_num = min(hdev->num_msi_left, vector_num); 548e2cb1decSSalil Mehta 549e2cb1decSSalil Mehta for (j = 0; j < vector_num; j++) { 550e2cb1decSSalil Mehta for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 551e2cb1decSSalil Mehta if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 552e2cb1decSSalil Mehta vector->vector = pci_irq_vector(hdev->pdev, i); 553e2cb1decSSalil Mehta vector->io_addr = hdev->hw.io_base + 554e2cb1decSSalil Mehta HCLGEVF_VECTOR_REG_BASE + 555e2cb1decSSalil Mehta (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 556e2cb1decSSalil Mehta hdev->vector_status[i] = 0; 557e2cb1decSSalil Mehta hdev->vector_irq[i] = vector->vector; 558e2cb1decSSalil Mehta 559e2cb1decSSalil Mehta vector++; 560e2cb1decSSalil Mehta alloc++; 561e2cb1decSSalil Mehta 562e2cb1decSSalil Mehta break; 563e2cb1decSSalil Mehta } 564e2cb1decSSalil Mehta } 565e2cb1decSSalil Mehta } 566e2cb1decSSalil Mehta hdev->num_msi_left -= alloc; 567e2cb1decSSalil Mehta hdev->num_msi_used += alloc; 568e2cb1decSSalil Mehta 569e2cb1decSSalil Mehta return alloc; 570e2cb1decSSalil Mehta } 571e2cb1decSSalil Mehta 572e2cb1decSSalil Mehta static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 573e2cb1decSSalil Mehta { 574e2cb1decSSalil Mehta int i; 575e2cb1decSSalil Mehta 576e2cb1decSSalil Mehta for (i = 0; i < hdev->num_msi; i++) 577e2cb1decSSalil Mehta if (vector == hdev->vector_irq[i]) 578e2cb1decSSalil Mehta return i; 579e2cb1decSSalil Mehta 580e2cb1decSSalil Mehta return -EINVAL; 581e2cb1decSSalil Mehta } 582e2cb1decSSalil Mehta 583374ad291SJian Shen static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 584374ad291SJian Shen const u8 hfunc, const u8 *key) 585374ad291SJian Shen { 586374ad291SJian Shen struct hclgevf_rss_config_cmd *req; 587ebaf1908SWeihang Li unsigned int key_offset = 0; 588374ad291SJian Shen struct hclgevf_desc desc; 5893caf772bSYufeng Mo int key_counts; 590374ad291SJian Shen int key_size; 591374ad291SJian Shen int ret; 592374ad291SJian Shen 5933caf772bSYufeng Mo key_counts = HCLGEVF_RSS_KEY_SIZE; 594374ad291SJian Shen req = (struct hclgevf_rss_config_cmd *)desc.data; 595374ad291SJian Shen 5963caf772bSYufeng Mo while (key_counts) { 597374ad291SJian Shen hclgevf_cmd_setup_basic_desc(&desc, 598374ad291SJian Shen HCLGEVF_OPC_RSS_GENERIC_CONFIG, 599374ad291SJian Shen false); 600374ad291SJian Shen 601374ad291SJian Shen req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 602374ad291SJian Shen req->hash_config |= 603374ad291SJian Shen (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 604374ad291SJian Shen 6053caf772bSYufeng Mo key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 606374ad291SJian Shen memcpy(req->hash_key, 607374ad291SJian Shen key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 608374ad291SJian Shen 6093caf772bSYufeng Mo key_counts -= key_size; 6103caf772bSYufeng Mo key_offset++; 611374ad291SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 612374ad291SJian Shen if (ret) { 613374ad291SJian Shen dev_err(&hdev->pdev->dev, 614374ad291SJian Shen "Configure RSS config fail, status = %d\n", 615374ad291SJian Shen ret); 616374ad291SJian Shen return ret; 617374ad291SJian Shen } 618374ad291SJian Shen } 619374ad291SJian Shen 620374ad291SJian Shen return 0; 621374ad291SJian Shen } 622374ad291SJian Shen 623e2cb1decSSalil Mehta static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 624e2cb1decSSalil Mehta { 625e2cb1decSSalil Mehta return HCLGEVF_RSS_KEY_SIZE; 626e2cb1decSSalil Mehta } 627e2cb1decSSalil Mehta 628e2cb1decSSalil Mehta static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 629e2cb1decSSalil Mehta { 630e2cb1decSSalil Mehta return HCLGEVF_RSS_IND_TBL_SIZE; 631e2cb1decSSalil Mehta } 632e2cb1decSSalil Mehta 633e2cb1decSSalil Mehta static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 634e2cb1decSSalil Mehta { 635e2cb1decSSalil Mehta const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 636e2cb1decSSalil Mehta struct hclgevf_rss_indirection_table_cmd *req; 637e2cb1decSSalil Mehta struct hclgevf_desc desc; 638e2cb1decSSalil Mehta int status; 639e2cb1decSSalil Mehta int i, j; 640e2cb1decSSalil Mehta 641e2cb1decSSalil Mehta req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 642e2cb1decSSalil Mehta 643e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 644e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 645e2cb1decSSalil Mehta false); 646e2cb1decSSalil Mehta req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 647e2cb1decSSalil Mehta req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 648e2cb1decSSalil Mehta for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 649e2cb1decSSalil Mehta req->rss_result[j] = 650e2cb1decSSalil Mehta indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 651e2cb1decSSalil Mehta 652e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 653e2cb1decSSalil Mehta if (status) { 654e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 655e2cb1decSSalil Mehta "VF failed(=%d) to set RSS indirection table\n", 656e2cb1decSSalil Mehta status); 657e2cb1decSSalil Mehta return status; 658e2cb1decSSalil Mehta } 659e2cb1decSSalil Mehta } 660e2cb1decSSalil Mehta 661e2cb1decSSalil Mehta return 0; 662e2cb1decSSalil Mehta } 663e2cb1decSSalil Mehta 664e2cb1decSSalil Mehta static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 665e2cb1decSSalil Mehta { 666e2cb1decSSalil Mehta struct hclgevf_rss_tc_mode_cmd *req; 667e2cb1decSSalil Mehta u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 668e2cb1decSSalil Mehta u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 669e2cb1decSSalil Mehta u16 tc_size[HCLGEVF_MAX_TC_NUM]; 670e2cb1decSSalil Mehta struct hclgevf_desc desc; 671e2cb1decSSalil Mehta u16 roundup_size; 672ebaf1908SWeihang Li unsigned int i; 6732adb8187SHuazhong Tan int status; 674e2cb1decSSalil Mehta 675e2cb1decSSalil Mehta req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 676e2cb1decSSalil Mehta 677e2cb1decSSalil Mehta roundup_size = roundup_pow_of_two(rss_size); 678e2cb1decSSalil Mehta roundup_size = ilog2(roundup_size); 679e2cb1decSSalil Mehta 680e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 681e2cb1decSSalil Mehta tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 682e2cb1decSSalil Mehta tc_size[i] = roundup_size; 683e2cb1decSSalil Mehta tc_offset[i] = rss_size * i; 684e2cb1decSSalil Mehta } 685e2cb1decSSalil Mehta 686e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 687e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 688e4e87715SPeng Li hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 689e2cb1decSSalil Mehta (tc_valid[i] & 0x1)); 690e4e87715SPeng Li hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 691e2cb1decSSalil Mehta HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 692e4e87715SPeng Li hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 693e2cb1decSSalil Mehta HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 694e2cb1decSSalil Mehta } 695e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 696e2cb1decSSalil Mehta if (status) 697e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 698e2cb1decSSalil Mehta "VF failed(=%d) to set rss tc mode\n", status); 699e2cb1decSSalil Mehta 700e2cb1decSSalil Mehta return status; 701e2cb1decSSalil Mehta } 702e2cb1decSSalil Mehta 703a638b1d8SJian Shen /* for revision 0x20, vf shared the same rss config with pf */ 704a638b1d8SJian Shen static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 705a638b1d8SJian Shen { 706a638b1d8SJian Shen #define HCLGEVF_RSS_MBX_RESP_LEN 8 707a638b1d8SJian Shen struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 708a638b1d8SJian Shen u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 709d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 710a638b1d8SJian Shen u16 msg_num, hash_key_index; 711a638b1d8SJian Shen u8 index; 712a638b1d8SJian Shen int ret; 713a638b1d8SJian Shen 714d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 715a638b1d8SJian Shen msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 716a638b1d8SJian Shen HCLGEVF_RSS_MBX_RESP_LEN; 717a638b1d8SJian Shen for (index = 0; index < msg_num; index++) { 718d3410018SYufeng Mo send_msg.data[0] = index; 719d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 720a638b1d8SJian Shen HCLGEVF_RSS_MBX_RESP_LEN); 721a638b1d8SJian Shen if (ret) { 722a638b1d8SJian Shen dev_err(&hdev->pdev->dev, 723a638b1d8SJian Shen "VF get rss hash key from PF failed, ret=%d", 724a638b1d8SJian Shen ret); 725a638b1d8SJian Shen return ret; 726a638b1d8SJian Shen } 727a638b1d8SJian Shen 728a638b1d8SJian Shen hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 729a638b1d8SJian Shen if (index == msg_num - 1) 730a638b1d8SJian Shen memcpy(&rss_cfg->rss_hash_key[hash_key_index], 731a638b1d8SJian Shen &resp_msg[0], 732a638b1d8SJian Shen HCLGEVF_RSS_KEY_SIZE - hash_key_index); 733a638b1d8SJian Shen else 734a638b1d8SJian Shen memcpy(&rss_cfg->rss_hash_key[hash_key_index], 735a638b1d8SJian Shen &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 736a638b1d8SJian Shen } 737a638b1d8SJian Shen 738a638b1d8SJian Shen return 0; 739a638b1d8SJian Shen } 740a638b1d8SJian Shen 741e2cb1decSSalil Mehta static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 742e2cb1decSSalil Mehta u8 *hfunc) 743e2cb1decSSalil Mehta { 744e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 745e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 746a638b1d8SJian Shen int i, ret; 747e2cb1decSSalil Mehta 748374ad291SJian Shen if (handle->pdev->revision >= 0x21) { 749374ad291SJian Shen /* Get hash algorithm */ 750374ad291SJian Shen if (hfunc) { 751374ad291SJian Shen switch (rss_cfg->hash_algo) { 752374ad291SJian Shen case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 753374ad291SJian Shen *hfunc = ETH_RSS_HASH_TOP; 754374ad291SJian Shen break; 755374ad291SJian Shen case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 756374ad291SJian Shen *hfunc = ETH_RSS_HASH_XOR; 757374ad291SJian Shen break; 758374ad291SJian Shen default: 759374ad291SJian Shen *hfunc = ETH_RSS_HASH_UNKNOWN; 760374ad291SJian Shen break; 761374ad291SJian Shen } 762374ad291SJian Shen } 763374ad291SJian Shen 764374ad291SJian Shen /* Get the RSS Key required by the user */ 765374ad291SJian Shen if (key) 766374ad291SJian Shen memcpy(key, rss_cfg->rss_hash_key, 767374ad291SJian Shen HCLGEVF_RSS_KEY_SIZE); 768a638b1d8SJian Shen } else { 769a638b1d8SJian Shen if (hfunc) 770a638b1d8SJian Shen *hfunc = ETH_RSS_HASH_TOP; 771a638b1d8SJian Shen if (key) { 772a638b1d8SJian Shen ret = hclgevf_get_rss_hash_key(hdev); 773a638b1d8SJian Shen if (ret) 774a638b1d8SJian Shen return ret; 775a638b1d8SJian Shen memcpy(key, rss_cfg->rss_hash_key, 776a638b1d8SJian Shen HCLGEVF_RSS_KEY_SIZE); 777a638b1d8SJian Shen } 778374ad291SJian Shen } 779374ad291SJian Shen 780e2cb1decSSalil Mehta if (indir) 781e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 782e2cb1decSSalil Mehta indir[i] = rss_cfg->rss_indirection_tbl[i]; 783e2cb1decSSalil Mehta 784374ad291SJian Shen return 0; 785e2cb1decSSalil Mehta } 786e2cb1decSSalil Mehta 787e2cb1decSSalil Mehta static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 788e2cb1decSSalil Mehta const u8 *key, const u8 hfunc) 789e2cb1decSSalil Mehta { 790e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 791e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 792374ad291SJian Shen int ret, i; 793374ad291SJian Shen 794374ad291SJian Shen if (handle->pdev->revision >= 0x21) { 795374ad291SJian Shen /* Set the RSS Hash Key if specififed by the user */ 796374ad291SJian Shen if (key) { 797374ad291SJian Shen switch (hfunc) { 798374ad291SJian Shen case ETH_RSS_HASH_TOP: 799374ad291SJian Shen rss_cfg->hash_algo = 800374ad291SJian Shen HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 801374ad291SJian Shen break; 802374ad291SJian Shen case ETH_RSS_HASH_XOR: 803374ad291SJian Shen rss_cfg->hash_algo = 804374ad291SJian Shen HCLGEVF_RSS_HASH_ALGO_SIMPLE; 805374ad291SJian Shen break; 806374ad291SJian Shen case ETH_RSS_HASH_NO_CHANGE: 807374ad291SJian Shen break; 808374ad291SJian Shen default: 809374ad291SJian Shen return -EINVAL; 810374ad291SJian Shen } 811374ad291SJian Shen 812374ad291SJian Shen ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 813374ad291SJian Shen key); 814374ad291SJian Shen if (ret) 815374ad291SJian Shen return ret; 816374ad291SJian Shen 817374ad291SJian Shen /* Update the shadow RSS key with user specified qids */ 818374ad291SJian Shen memcpy(rss_cfg->rss_hash_key, key, 819374ad291SJian Shen HCLGEVF_RSS_KEY_SIZE); 820374ad291SJian Shen } 821374ad291SJian Shen } 822e2cb1decSSalil Mehta 823e2cb1decSSalil Mehta /* update the shadow RSS table with user specified qids */ 824e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 825e2cb1decSSalil Mehta rss_cfg->rss_indirection_tbl[i] = indir[i]; 826e2cb1decSSalil Mehta 827e2cb1decSSalil Mehta /* update the hardware */ 828e2cb1decSSalil Mehta return hclgevf_set_rss_indir_table(hdev); 829e2cb1decSSalil Mehta } 830e2cb1decSSalil Mehta 831d97b3072SJian Shen static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 832d97b3072SJian Shen { 833d97b3072SJian Shen u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 834d97b3072SJian Shen 835d97b3072SJian Shen if (nfc->data & RXH_L4_B_2_3) 836d97b3072SJian Shen hash_sets |= HCLGEVF_D_PORT_BIT; 837d97b3072SJian Shen else 838d97b3072SJian Shen hash_sets &= ~HCLGEVF_D_PORT_BIT; 839d97b3072SJian Shen 840d97b3072SJian Shen if (nfc->data & RXH_IP_SRC) 841d97b3072SJian Shen hash_sets |= HCLGEVF_S_IP_BIT; 842d97b3072SJian Shen else 843d97b3072SJian Shen hash_sets &= ~HCLGEVF_S_IP_BIT; 844d97b3072SJian Shen 845d97b3072SJian Shen if (nfc->data & RXH_IP_DST) 846d97b3072SJian Shen hash_sets |= HCLGEVF_D_IP_BIT; 847d97b3072SJian Shen else 848d97b3072SJian Shen hash_sets &= ~HCLGEVF_D_IP_BIT; 849d97b3072SJian Shen 850d97b3072SJian Shen if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 851d97b3072SJian Shen hash_sets |= HCLGEVF_V_TAG_BIT; 852d97b3072SJian Shen 853d97b3072SJian Shen return hash_sets; 854d97b3072SJian Shen } 855d97b3072SJian Shen 856d97b3072SJian Shen static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 857d97b3072SJian Shen struct ethtool_rxnfc *nfc) 858d97b3072SJian Shen { 859d97b3072SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 860d97b3072SJian Shen struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 861d97b3072SJian Shen struct hclgevf_rss_input_tuple_cmd *req; 862d97b3072SJian Shen struct hclgevf_desc desc; 863d97b3072SJian Shen u8 tuple_sets; 864d97b3072SJian Shen int ret; 865d97b3072SJian Shen 866d97b3072SJian Shen if (handle->pdev->revision == 0x20) 867d97b3072SJian Shen return -EOPNOTSUPP; 868d97b3072SJian Shen 869d97b3072SJian Shen if (nfc->data & 870d97b3072SJian Shen ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 871d97b3072SJian Shen return -EINVAL; 872d97b3072SJian Shen 873d97b3072SJian Shen req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 874d97b3072SJian Shen hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 875d97b3072SJian Shen 876d97b3072SJian Shen req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 877d97b3072SJian Shen req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 878d97b3072SJian Shen req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 879d97b3072SJian Shen req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 880d97b3072SJian Shen req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 881d97b3072SJian Shen req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 882d97b3072SJian Shen req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 883d97b3072SJian Shen req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 884d97b3072SJian Shen 885d97b3072SJian Shen tuple_sets = hclgevf_get_rss_hash_bits(nfc); 886d97b3072SJian Shen switch (nfc->flow_type) { 887d97b3072SJian Shen case TCP_V4_FLOW: 888d97b3072SJian Shen req->ipv4_tcp_en = tuple_sets; 889d97b3072SJian Shen break; 890d97b3072SJian Shen case TCP_V6_FLOW: 891d97b3072SJian Shen req->ipv6_tcp_en = tuple_sets; 892d97b3072SJian Shen break; 893d97b3072SJian Shen case UDP_V4_FLOW: 894d97b3072SJian Shen req->ipv4_udp_en = tuple_sets; 895d97b3072SJian Shen break; 896d97b3072SJian Shen case UDP_V6_FLOW: 897d97b3072SJian Shen req->ipv6_udp_en = tuple_sets; 898d97b3072SJian Shen break; 899d97b3072SJian Shen case SCTP_V4_FLOW: 900d97b3072SJian Shen req->ipv4_sctp_en = tuple_sets; 901d97b3072SJian Shen break; 902d97b3072SJian Shen case SCTP_V6_FLOW: 903d97b3072SJian Shen if ((nfc->data & RXH_L4_B_0_1) || 904d97b3072SJian Shen (nfc->data & RXH_L4_B_2_3)) 905d97b3072SJian Shen return -EINVAL; 906d97b3072SJian Shen 907d97b3072SJian Shen req->ipv6_sctp_en = tuple_sets; 908d97b3072SJian Shen break; 909d97b3072SJian Shen case IPV4_FLOW: 910d97b3072SJian Shen req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 911d97b3072SJian Shen break; 912d97b3072SJian Shen case IPV6_FLOW: 913d97b3072SJian Shen req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 914d97b3072SJian Shen break; 915d97b3072SJian Shen default: 916d97b3072SJian Shen return -EINVAL; 917d97b3072SJian Shen } 918d97b3072SJian Shen 919d97b3072SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 920d97b3072SJian Shen if (ret) { 921d97b3072SJian Shen dev_err(&hdev->pdev->dev, 922d97b3072SJian Shen "Set rss tuple fail, status = %d\n", ret); 923d97b3072SJian Shen return ret; 924d97b3072SJian Shen } 925d97b3072SJian Shen 926d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 927d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 928d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 929d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 930d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 931d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 932d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 933d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 934d97b3072SJian Shen return 0; 935d97b3072SJian Shen } 936d97b3072SJian Shen 937d97b3072SJian Shen static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 938d97b3072SJian Shen struct ethtool_rxnfc *nfc) 939d97b3072SJian Shen { 940d97b3072SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 941d97b3072SJian Shen struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 942d97b3072SJian Shen u8 tuple_sets; 943d97b3072SJian Shen 944d97b3072SJian Shen if (handle->pdev->revision == 0x20) 945d97b3072SJian Shen return -EOPNOTSUPP; 946d97b3072SJian Shen 947d97b3072SJian Shen nfc->data = 0; 948d97b3072SJian Shen 949d97b3072SJian Shen switch (nfc->flow_type) { 950d97b3072SJian Shen case TCP_V4_FLOW: 951d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 952d97b3072SJian Shen break; 953d97b3072SJian Shen case UDP_V4_FLOW: 954d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 955d97b3072SJian Shen break; 956d97b3072SJian Shen case TCP_V6_FLOW: 957d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 958d97b3072SJian Shen break; 959d97b3072SJian Shen case UDP_V6_FLOW: 960d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 961d97b3072SJian Shen break; 962d97b3072SJian Shen case SCTP_V4_FLOW: 963d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 964d97b3072SJian Shen break; 965d97b3072SJian Shen case SCTP_V6_FLOW: 966d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 967d97b3072SJian Shen break; 968d97b3072SJian Shen case IPV4_FLOW: 969d97b3072SJian Shen case IPV6_FLOW: 970d97b3072SJian Shen tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 971d97b3072SJian Shen break; 972d97b3072SJian Shen default: 973d97b3072SJian Shen return -EINVAL; 974d97b3072SJian Shen } 975d97b3072SJian Shen 976d97b3072SJian Shen if (!tuple_sets) 977d97b3072SJian Shen return 0; 978d97b3072SJian Shen 979d97b3072SJian Shen if (tuple_sets & HCLGEVF_D_PORT_BIT) 980d97b3072SJian Shen nfc->data |= RXH_L4_B_2_3; 981d97b3072SJian Shen if (tuple_sets & HCLGEVF_S_PORT_BIT) 982d97b3072SJian Shen nfc->data |= RXH_L4_B_0_1; 983d97b3072SJian Shen if (tuple_sets & HCLGEVF_D_IP_BIT) 984d97b3072SJian Shen nfc->data |= RXH_IP_DST; 985d97b3072SJian Shen if (tuple_sets & HCLGEVF_S_IP_BIT) 986d97b3072SJian Shen nfc->data |= RXH_IP_SRC; 987d97b3072SJian Shen 988d97b3072SJian Shen return 0; 989d97b3072SJian Shen } 990d97b3072SJian Shen 991d97b3072SJian Shen static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 992d97b3072SJian Shen struct hclgevf_rss_cfg *rss_cfg) 993d97b3072SJian Shen { 994d97b3072SJian Shen struct hclgevf_rss_input_tuple_cmd *req; 995d97b3072SJian Shen struct hclgevf_desc desc; 996d97b3072SJian Shen int ret; 997d97b3072SJian Shen 998d97b3072SJian Shen hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 999d97b3072SJian Shen 1000d97b3072SJian Shen req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 1001d97b3072SJian Shen 1002d97b3072SJian Shen req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 1003d97b3072SJian Shen req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 1004d97b3072SJian Shen req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 1005d97b3072SJian Shen req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 1006d97b3072SJian Shen req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 1007d97b3072SJian Shen req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 1008d97b3072SJian Shen req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 1009d97b3072SJian Shen req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 1010d97b3072SJian Shen 1011d97b3072SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1012d97b3072SJian Shen if (ret) 1013d97b3072SJian Shen dev_err(&hdev->pdev->dev, 1014d97b3072SJian Shen "Configure rss input fail, status = %d\n", ret); 1015d97b3072SJian Shen return ret; 1016d97b3072SJian Shen } 1017d97b3072SJian Shen 1018e2cb1decSSalil Mehta static int hclgevf_get_tc_size(struct hnae3_handle *handle) 1019e2cb1decSSalil Mehta { 1020e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1021e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1022e2cb1decSSalil Mehta 1023e2cb1decSSalil Mehta return rss_cfg->rss_size; 1024e2cb1decSSalil Mehta } 1025e2cb1decSSalil Mehta 1026e2cb1decSSalil Mehta static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 1027b204bc74SPeng Li int vector_id, 1028e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 1029e2cb1decSSalil Mehta { 1030e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1031d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1032e2cb1decSSalil Mehta struct hnae3_ring_chain_node *node; 1033e2cb1decSSalil Mehta int status; 1034d3410018SYufeng Mo int i = 0; 1035e2cb1decSSalil Mehta 1036d3410018SYufeng Mo memset(&send_msg, 0, sizeof(send_msg)); 1037d3410018SYufeng Mo send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 1038c09ba484SPeng Li HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1039d3410018SYufeng Mo send_msg.vector_id = vector_id; 1040e2cb1decSSalil Mehta 1041e2cb1decSSalil Mehta for (node = ring_chain; node; node = node->next) { 1042d3410018SYufeng Mo send_msg.param[i].ring_type = 1043e4e87715SPeng Li hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1044d3410018SYufeng Mo 1045d3410018SYufeng Mo send_msg.param[i].tqp_index = node->tqp_index; 1046d3410018SYufeng Mo send_msg.param[i].int_gl_index = 1047d3410018SYufeng Mo hnae3_get_field(node->int_gl_idx, 104879eee410SFuyun Liang HNAE3_RING_GL_IDX_M, 104979eee410SFuyun Liang HNAE3_RING_GL_IDX_S); 105079eee410SFuyun Liang 10515d02a58dSYunsheng Lin i++; 1052d3410018SYufeng Mo if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 1053d3410018SYufeng Mo send_msg.ring_num = i; 1054e2cb1decSSalil Mehta 1055d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 1056d3410018SYufeng Mo NULL, 0); 1057e2cb1decSSalil Mehta if (status) { 1058e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 1059e2cb1decSSalil Mehta "Map TQP fail, status is %d.\n", 1060e2cb1decSSalil Mehta status); 1061e2cb1decSSalil Mehta return status; 1062e2cb1decSSalil Mehta } 1063e2cb1decSSalil Mehta i = 0; 1064e2cb1decSSalil Mehta } 1065e2cb1decSSalil Mehta } 1066e2cb1decSSalil Mehta 1067e2cb1decSSalil Mehta return 0; 1068e2cb1decSSalil Mehta } 1069e2cb1decSSalil Mehta 1070e2cb1decSSalil Mehta static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1071e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 1072e2cb1decSSalil Mehta { 1073b204bc74SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1074b204bc74SPeng Li int vector_id; 1075b204bc74SPeng Li 1076b204bc74SPeng Li vector_id = hclgevf_get_vector_index(hdev, vector); 1077b204bc74SPeng Li if (vector_id < 0) { 1078b204bc74SPeng Li dev_err(&handle->pdev->dev, 1079b204bc74SPeng Li "Get vector index fail. ret =%d\n", vector_id); 1080b204bc74SPeng Li return vector_id; 1081b204bc74SPeng Li } 1082b204bc74SPeng Li 1083b204bc74SPeng Li return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1084e2cb1decSSalil Mehta } 1085e2cb1decSSalil Mehta 1086e2cb1decSSalil Mehta static int hclgevf_unmap_ring_from_vector( 1087e2cb1decSSalil Mehta struct hnae3_handle *handle, 1088e2cb1decSSalil Mehta int vector, 1089e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 1090e2cb1decSSalil Mehta { 1091e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1092e2cb1decSSalil Mehta int ret, vector_id; 1093e2cb1decSSalil Mehta 1094dea846e8SHuazhong Tan if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1095dea846e8SHuazhong Tan return 0; 1096dea846e8SHuazhong Tan 1097e2cb1decSSalil Mehta vector_id = hclgevf_get_vector_index(hdev, vector); 1098e2cb1decSSalil Mehta if (vector_id < 0) { 1099e2cb1decSSalil Mehta dev_err(&handle->pdev->dev, 1100e2cb1decSSalil Mehta "Get vector index fail. ret =%d\n", vector_id); 1101e2cb1decSSalil Mehta return vector_id; 1102e2cb1decSSalil Mehta } 1103e2cb1decSSalil Mehta 1104b204bc74SPeng Li ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 11050d3e6631SYunsheng Lin if (ret) 1106e2cb1decSSalil Mehta dev_err(&handle->pdev->dev, 1107e2cb1decSSalil Mehta "Unmap ring from vector fail. vector=%d, ret =%d\n", 1108e2cb1decSSalil Mehta vector_id, 1109e2cb1decSSalil Mehta ret); 11100d3e6631SYunsheng Lin 1111e2cb1decSSalil Mehta return ret; 1112e2cb1decSSalil Mehta } 1113e2cb1decSSalil Mehta 11140d3e6631SYunsheng Lin static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 11150d3e6631SYunsheng Lin { 11160d3e6631SYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 111703718db9SYunsheng Lin int vector_id; 11180d3e6631SYunsheng Lin 111903718db9SYunsheng Lin vector_id = hclgevf_get_vector_index(hdev, vector); 112003718db9SYunsheng Lin if (vector_id < 0) { 112103718db9SYunsheng Lin dev_err(&handle->pdev->dev, 112203718db9SYunsheng Lin "hclgevf_put_vector get vector index fail. ret =%d\n", 112303718db9SYunsheng Lin vector_id); 112403718db9SYunsheng Lin return vector_id; 112503718db9SYunsheng Lin } 112603718db9SYunsheng Lin 112703718db9SYunsheng Lin hclgevf_free_vector(hdev, vector_id); 1128e2cb1decSSalil Mehta 1129e2cb1decSSalil Mehta return 0; 1130e2cb1decSSalil Mehta } 1131e2cb1decSSalil Mehta 11323b75c3dfSPeng Li static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1133e196ec75SJian Shen bool en_uc_pmc, bool en_mc_pmc, 1134f01f5559SJian Shen bool en_bc_pmc) 1135e2cb1decSSalil Mehta { 1136d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1137f01f5559SJian Shen int ret; 1138e2cb1decSSalil Mehta 1139d3410018SYufeng Mo memset(&send_msg, 0, sizeof(send_msg)); 1140d3410018SYufeng Mo send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 1141d3410018SYufeng Mo send_msg.en_bc = en_bc_pmc ? 1 : 0; 1142d3410018SYufeng Mo send_msg.en_uc = en_uc_pmc ? 1 : 0; 1143d3410018SYufeng Mo send_msg.en_mc = en_mc_pmc ? 1 : 0; 1144e2cb1decSSalil Mehta 1145d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1146f01f5559SJian Shen if (ret) 1147e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 1148f01f5559SJian Shen "Set promisc mode fail, status is %d.\n", ret); 1149e2cb1decSSalil Mehta 1150f01f5559SJian Shen return ret; 1151e2cb1decSSalil Mehta } 1152e2cb1decSSalil Mehta 1153e196ec75SJian Shen static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 1154e196ec75SJian Shen bool en_mc_pmc) 1155e2cb1decSSalil Mehta { 1156e196ec75SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1157e196ec75SJian Shen struct pci_dev *pdev = hdev->pdev; 1158e196ec75SJian Shen bool en_bc_pmc; 1159e196ec75SJian Shen 1160e196ec75SJian Shen en_bc_pmc = pdev->revision != 0x20; 1161e196ec75SJian Shen 1162e196ec75SJian Shen return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 1163e196ec75SJian Shen en_bc_pmc); 1164e2cb1decSSalil Mehta } 1165e2cb1decSSalil Mehta 1166c631c696SJian Shen static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) 1167c631c696SJian Shen { 1168c631c696SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1169c631c696SJian Shen 1170c631c696SJian Shen set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1171c631c696SJian Shen } 1172c631c696SJian Shen 1173c631c696SJian Shen static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) 1174c631c696SJian Shen { 1175c631c696SJian Shen struct hnae3_handle *handle = &hdev->nic; 1176c631c696SJian Shen bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; 1177c631c696SJian Shen bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; 1178c631c696SJian Shen int ret; 1179c631c696SJian Shen 1180c631c696SJian Shen if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { 1181c631c696SJian Shen ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); 1182c631c696SJian Shen if (!ret) 1183c631c696SJian Shen clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1184c631c696SJian Shen } 1185c631c696SJian Shen } 1186c631c696SJian Shen 1187ebaf1908SWeihang Li static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, 1188e2cb1decSSalil Mehta int stream_id, bool enable) 1189e2cb1decSSalil Mehta { 1190e2cb1decSSalil Mehta struct hclgevf_cfg_com_tqp_queue_cmd *req; 1191e2cb1decSSalil Mehta struct hclgevf_desc desc; 1192e2cb1decSSalil Mehta int status; 1193e2cb1decSSalil Mehta 1194e2cb1decSSalil Mehta req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1195e2cb1decSSalil Mehta 1196e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1197e2cb1decSSalil Mehta false); 1198e2cb1decSSalil Mehta req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1199e2cb1decSSalil Mehta req->stream_id = cpu_to_le16(stream_id); 1200ebaf1908SWeihang Li if (enable) 1201ebaf1908SWeihang Li req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1202e2cb1decSSalil Mehta 1203e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1204e2cb1decSSalil Mehta if (status) 1205e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 1206e2cb1decSSalil Mehta "TQP enable fail, status =%d.\n", status); 1207e2cb1decSSalil Mehta 1208e2cb1decSSalil Mehta return status; 1209e2cb1decSSalil Mehta } 1210e2cb1decSSalil Mehta 1211e2cb1decSSalil Mehta static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1212e2cb1decSSalil Mehta { 1213b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1214e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 1215e2cb1decSSalil Mehta int i; 1216e2cb1decSSalil Mehta 1217b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 1218b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1219e2cb1decSSalil Mehta memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1220e2cb1decSSalil Mehta } 1221e2cb1decSSalil Mehta } 1222e2cb1decSSalil Mehta 12238e6de441SHuazhong Tan static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 12248e6de441SHuazhong Tan { 1225d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 12268e6de441SHuazhong Tan u8 host_mac[ETH_ALEN]; 12278e6de441SHuazhong Tan int status; 12288e6de441SHuazhong Tan 1229d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 1230d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 1231d3410018SYufeng Mo ETH_ALEN); 12328e6de441SHuazhong Tan if (status) { 12338e6de441SHuazhong Tan dev_err(&hdev->pdev->dev, 12348e6de441SHuazhong Tan "fail to get VF MAC from host %d", status); 12358e6de441SHuazhong Tan return status; 12368e6de441SHuazhong Tan } 12378e6de441SHuazhong Tan 12388e6de441SHuazhong Tan ether_addr_copy(p, host_mac); 12398e6de441SHuazhong Tan 12408e6de441SHuazhong Tan return 0; 12418e6de441SHuazhong Tan } 12428e6de441SHuazhong Tan 1243e2cb1decSSalil Mehta static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1244e2cb1decSSalil Mehta { 1245e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 12468e6de441SHuazhong Tan u8 host_mac_addr[ETH_ALEN]; 1247e2cb1decSSalil Mehta 12488e6de441SHuazhong Tan if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 12498e6de441SHuazhong Tan return; 12508e6de441SHuazhong Tan 12518e6de441SHuazhong Tan hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 12528e6de441SHuazhong Tan if (hdev->has_pf_mac) 12538e6de441SHuazhong Tan ether_addr_copy(p, host_mac_addr); 12548e6de441SHuazhong Tan else 1255e2cb1decSSalil Mehta ether_addr_copy(p, hdev->hw.mac.mac_addr); 1256e2cb1decSSalil Mehta } 1257e2cb1decSSalil Mehta 125859098055SFuyun Liang static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 125959098055SFuyun Liang bool is_first) 1260e2cb1decSSalil Mehta { 1261e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1262e2cb1decSSalil Mehta u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1263d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1264e2cb1decSSalil Mehta u8 *new_mac_addr = (u8 *)p; 1265e2cb1decSSalil Mehta int status; 1266e2cb1decSSalil Mehta 1267d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 1268ee4bcd3bSJian Shen send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1269d3410018SYufeng Mo ether_addr_copy(send_msg.data, new_mac_addr); 1270ee4bcd3bSJian Shen if (is_first && !hdev->has_pf_mac) 1271ee4bcd3bSJian Shen eth_zero_addr(&send_msg.data[ETH_ALEN]); 1272ee4bcd3bSJian Shen else 1273d3410018SYufeng Mo ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 1274d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1275e2cb1decSSalil Mehta if (!status) 1276e2cb1decSSalil Mehta ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1277e2cb1decSSalil Mehta 1278e2cb1decSSalil Mehta return status; 1279e2cb1decSSalil Mehta } 1280e2cb1decSSalil Mehta 1281ee4bcd3bSJian Shen static struct hclgevf_mac_addr_node * 1282ee4bcd3bSJian Shen hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) 1283ee4bcd3bSJian Shen { 1284ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp; 1285ee4bcd3bSJian Shen 1286ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, list, node) 1287ee4bcd3bSJian Shen if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 1288ee4bcd3bSJian Shen return mac_node; 1289ee4bcd3bSJian Shen 1290ee4bcd3bSJian Shen return NULL; 1291ee4bcd3bSJian Shen } 1292ee4bcd3bSJian Shen 1293ee4bcd3bSJian Shen static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, 1294ee4bcd3bSJian Shen enum HCLGEVF_MAC_NODE_STATE state) 1295ee4bcd3bSJian Shen { 1296ee4bcd3bSJian Shen switch (state) { 1297ee4bcd3bSJian Shen /* from set_rx_mode or tmp_add_list */ 1298ee4bcd3bSJian Shen case HCLGEVF_MAC_TO_ADD: 1299ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_DEL) 1300ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_ACTIVE; 1301ee4bcd3bSJian Shen break; 1302ee4bcd3bSJian Shen /* only from set_rx_mode */ 1303ee4bcd3bSJian Shen case HCLGEVF_MAC_TO_DEL: 1304ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1305ee4bcd3bSJian Shen list_del(&mac_node->node); 1306ee4bcd3bSJian Shen kfree(mac_node); 1307ee4bcd3bSJian Shen } else { 1308ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_TO_DEL; 1309ee4bcd3bSJian Shen } 1310ee4bcd3bSJian Shen break; 1311ee4bcd3bSJian Shen /* only from tmp_add_list, the mac_node->state won't be 1312ee4bcd3bSJian Shen * HCLGEVF_MAC_ACTIVE 1313ee4bcd3bSJian Shen */ 1314ee4bcd3bSJian Shen case HCLGEVF_MAC_ACTIVE: 1315ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1316ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_ACTIVE; 1317ee4bcd3bSJian Shen break; 1318ee4bcd3bSJian Shen } 1319ee4bcd3bSJian Shen } 1320ee4bcd3bSJian Shen 1321ee4bcd3bSJian Shen static int hclgevf_update_mac_list(struct hnae3_handle *handle, 1322ee4bcd3bSJian Shen enum HCLGEVF_MAC_NODE_STATE state, 1323ee4bcd3bSJian Shen enum HCLGEVF_MAC_ADDR_TYPE mac_type, 1324e2cb1decSSalil Mehta const unsigned char *addr) 1325e2cb1decSSalil Mehta { 1326e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1327ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node; 1328ee4bcd3bSJian Shen struct list_head *list; 1329e2cb1decSSalil Mehta 1330ee4bcd3bSJian Shen list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1331ee4bcd3bSJian Shen &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1332ee4bcd3bSJian Shen 1333ee4bcd3bSJian Shen spin_lock_bh(&hdev->mac_table.mac_list_lock); 1334ee4bcd3bSJian Shen 1335ee4bcd3bSJian Shen /* if the mac addr is already in the mac list, no need to add a new 1336ee4bcd3bSJian Shen * one into it, just check the mac addr state, convert it to a new 1337ee4bcd3bSJian Shen * new state, or just remove it, or do nothing. 1338ee4bcd3bSJian Shen */ 1339ee4bcd3bSJian Shen mac_node = hclgevf_find_mac_node(list, addr); 1340ee4bcd3bSJian Shen if (mac_node) { 1341ee4bcd3bSJian Shen hclgevf_update_mac_node(mac_node, state); 1342ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1343ee4bcd3bSJian Shen return 0; 1344ee4bcd3bSJian Shen } 1345ee4bcd3bSJian Shen /* if this address is never added, unnecessary to delete */ 1346ee4bcd3bSJian Shen if (state == HCLGEVF_MAC_TO_DEL) { 1347ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1348ee4bcd3bSJian Shen return -ENOENT; 1349ee4bcd3bSJian Shen } 1350ee4bcd3bSJian Shen 1351ee4bcd3bSJian Shen mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 1352ee4bcd3bSJian Shen if (!mac_node) { 1353ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1354ee4bcd3bSJian Shen return -ENOMEM; 1355ee4bcd3bSJian Shen } 1356ee4bcd3bSJian Shen 1357ee4bcd3bSJian Shen mac_node->state = state; 1358ee4bcd3bSJian Shen ether_addr_copy(mac_node->mac_addr, addr); 1359ee4bcd3bSJian Shen list_add_tail(&mac_node->node, list); 1360ee4bcd3bSJian Shen 1361ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1362ee4bcd3bSJian Shen return 0; 1363ee4bcd3bSJian Shen } 1364ee4bcd3bSJian Shen 1365ee4bcd3bSJian Shen static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1366ee4bcd3bSJian Shen const unsigned char *addr) 1367ee4bcd3bSJian Shen { 1368ee4bcd3bSJian Shen return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1369ee4bcd3bSJian Shen HCLGEVF_MAC_ADDR_UC, addr); 1370e2cb1decSSalil Mehta } 1371e2cb1decSSalil Mehta 1372e2cb1decSSalil Mehta static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1373e2cb1decSSalil Mehta const unsigned char *addr) 1374e2cb1decSSalil Mehta { 1375ee4bcd3bSJian Shen return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1376ee4bcd3bSJian Shen HCLGEVF_MAC_ADDR_UC, addr); 1377e2cb1decSSalil Mehta } 1378e2cb1decSSalil Mehta 1379e2cb1decSSalil Mehta static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1380e2cb1decSSalil Mehta const unsigned char *addr) 1381e2cb1decSSalil Mehta { 1382ee4bcd3bSJian Shen return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1383ee4bcd3bSJian Shen HCLGEVF_MAC_ADDR_MC, addr); 1384e2cb1decSSalil Mehta } 1385e2cb1decSSalil Mehta 1386e2cb1decSSalil Mehta static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1387e2cb1decSSalil Mehta const unsigned char *addr) 1388e2cb1decSSalil Mehta { 1389ee4bcd3bSJian Shen return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1390ee4bcd3bSJian Shen HCLGEVF_MAC_ADDR_MC, addr); 1391ee4bcd3bSJian Shen } 1392e2cb1decSSalil Mehta 1393ee4bcd3bSJian Shen static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, 1394ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, 1395ee4bcd3bSJian Shen enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1396ee4bcd3bSJian Shen { 1397ee4bcd3bSJian Shen struct hclge_vf_to_pf_msg send_msg; 1398ee4bcd3bSJian Shen u8 code, subcode; 1399ee4bcd3bSJian Shen 1400ee4bcd3bSJian Shen if (mac_type == HCLGEVF_MAC_ADDR_UC) { 1401ee4bcd3bSJian Shen code = HCLGE_MBX_SET_UNICAST; 1402ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1403ee4bcd3bSJian Shen subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; 1404ee4bcd3bSJian Shen else 1405ee4bcd3bSJian Shen subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; 1406ee4bcd3bSJian Shen } else { 1407ee4bcd3bSJian Shen code = HCLGE_MBX_SET_MULTICAST; 1408ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1409ee4bcd3bSJian Shen subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; 1410ee4bcd3bSJian Shen else 1411ee4bcd3bSJian Shen subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; 1412ee4bcd3bSJian Shen } 1413ee4bcd3bSJian Shen 1414ee4bcd3bSJian Shen hclgevf_build_send_msg(&send_msg, code, subcode); 1415ee4bcd3bSJian Shen ether_addr_copy(send_msg.data, mac_node->mac_addr); 1416d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1417e2cb1decSSalil Mehta } 1418e2cb1decSSalil Mehta 1419ee4bcd3bSJian Shen static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, 1420ee4bcd3bSJian Shen struct list_head *list, 1421ee4bcd3bSJian Shen enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1422ee4bcd3bSJian Shen { 1423ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp; 1424ee4bcd3bSJian Shen int ret; 1425ee4bcd3bSJian Shen 1426ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, list, node) { 1427ee4bcd3bSJian Shen ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); 1428ee4bcd3bSJian Shen if (ret) { 1429ee4bcd3bSJian Shen dev_err(&hdev->pdev->dev, 1430ee4bcd3bSJian Shen "failed to configure mac %pM, state = %d, ret = %d\n", 1431ee4bcd3bSJian Shen mac_node->mac_addr, mac_node->state, ret); 1432ee4bcd3bSJian Shen return; 1433ee4bcd3bSJian Shen } 1434ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1435ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_ACTIVE; 1436ee4bcd3bSJian Shen } else { 1437ee4bcd3bSJian Shen list_del(&mac_node->node); 1438ee4bcd3bSJian Shen kfree(mac_node); 1439ee4bcd3bSJian Shen } 1440ee4bcd3bSJian Shen } 1441ee4bcd3bSJian Shen } 1442ee4bcd3bSJian Shen 1443ee4bcd3bSJian Shen static void hclgevf_sync_from_add_list(struct list_head *add_list, 1444ee4bcd3bSJian Shen struct list_head *mac_list) 1445ee4bcd3bSJian Shen { 1446ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1447ee4bcd3bSJian Shen 1448ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, add_list, node) { 1449ee4bcd3bSJian Shen /* if the mac address from tmp_add_list is not in the 1450ee4bcd3bSJian Shen * uc/mc_mac_list, it means have received a TO_DEL request 1451ee4bcd3bSJian Shen * during the time window of sending mac config request to PF 1452ee4bcd3bSJian Shen * If mac_node state is ACTIVE, then change its state to TO_DEL, 1453ee4bcd3bSJian Shen * then it will be removed at next time. If is TO_ADD, it means 1454ee4bcd3bSJian Shen * send TO_ADD request failed, so just remove the mac node. 1455ee4bcd3bSJian Shen */ 1456ee4bcd3bSJian Shen new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1457ee4bcd3bSJian Shen if (new_node) { 1458ee4bcd3bSJian Shen hclgevf_update_mac_node(new_node, mac_node->state); 1459ee4bcd3bSJian Shen list_del(&mac_node->node); 1460ee4bcd3bSJian Shen kfree(mac_node); 1461ee4bcd3bSJian Shen } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { 1462ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_TO_DEL; 1463ee4bcd3bSJian Shen list_del(&mac_node->node); 1464ee4bcd3bSJian Shen list_add_tail(&mac_node->node, mac_list); 1465ee4bcd3bSJian Shen } else { 1466ee4bcd3bSJian Shen list_del(&mac_node->node); 1467ee4bcd3bSJian Shen kfree(mac_node); 1468ee4bcd3bSJian Shen } 1469ee4bcd3bSJian Shen } 1470ee4bcd3bSJian Shen } 1471ee4bcd3bSJian Shen 1472ee4bcd3bSJian Shen static void hclgevf_sync_from_del_list(struct list_head *del_list, 1473ee4bcd3bSJian Shen struct list_head *mac_list) 1474ee4bcd3bSJian Shen { 1475ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1476ee4bcd3bSJian Shen 1477ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, del_list, node) { 1478ee4bcd3bSJian Shen new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1479ee4bcd3bSJian Shen if (new_node) { 1480ee4bcd3bSJian Shen /* If the mac addr is exist in the mac list, it means 1481ee4bcd3bSJian Shen * received a new request TO_ADD during the time window 1482ee4bcd3bSJian Shen * of sending mac addr configurrequest to PF, so just 1483ee4bcd3bSJian Shen * change the mac state to ACTIVE. 1484ee4bcd3bSJian Shen */ 1485ee4bcd3bSJian Shen new_node->state = HCLGEVF_MAC_ACTIVE; 1486ee4bcd3bSJian Shen list_del(&mac_node->node); 1487ee4bcd3bSJian Shen kfree(mac_node); 1488ee4bcd3bSJian Shen } else { 1489ee4bcd3bSJian Shen list_del(&mac_node->node); 1490ee4bcd3bSJian Shen list_add_tail(&mac_node->node, mac_list); 1491ee4bcd3bSJian Shen } 1492ee4bcd3bSJian Shen } 1493ee4bcd3bSJian Shen } 1494ee4bcd3bSJian Shen 1495ee4bcd3bSJian Shen static void hclgevf_clear_list(struct list_head *list) 1496ee4bcd3bSJian Shen { 1497ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp; 1498ee4bcd3bSJian Shen 1499ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, list, node) { 1500ee4bcd3bSJian Shen list_del(&mac_node->node); 1501ee4bcd3bSJian Shen kfree(mac_node); 1502ee4bcd3bSJian Shen } 1503ee4bcd3bSJian Shen } 1504ee4bcd3bSJian Shen 1505ee4bcd3bSJian Shen static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, 1506ee4bcd3bSJian Shen enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1507ee4bcd3bSJian Shen { 1508ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1509ee4bcd3bSJian Shen struct list_head tmp_add_list, tmp_del_list; 1510ee4bcd3bSJian Shen struct list_head *list; 1511ee4bcd3bSJian Shen 1512ee4bcd3bSJian Shen INIT_LIST_HEAD(&tmp_add_list); 1513ee4bcd3bSJian Shen INIT_LIST_HEAD(&tmp_del_list); 1514ee4bcd3bSJian Shen 1515ee4bcd3bSJian Shen /* move the mac addr to the tmp_add_list and tmp_del_list, then 1516ee4bcd3bSJian Shen * we can add/delete these mac addr outside the spin lock 1517ee4bcd3bSJian Shen */ 1518ee4bcd3bSJian Shen list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1519ee4bcd3bSJian Shen &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1520ee4bcd3bSJian Shen 1521ee4bcd3bSJian Shen spin_lock_bh(&hdev->mac_table.mac_list_lock); 1522ee4bcd3bSJian Shen 1523ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, list, node) { 1524ee4bcd3bSJian Shen switch (mac_node->state) { 1525ee4bcd3bSJian Shen case HCLGEVF_MAC_TO_DEL: 1526ee4bcd3bSJian Shen list_del(&mac_node->node); 1527ee4bcd3bSJian Shen list_add_tail(&mac_node->node, &tmp_del_list); 1528ee4bcd3bSJian Shen break; 1529ee4bcd3bSJian Shen case HCLGEVF_MAC_TO_ADD: 1530ee4bcd3bSJian Shen new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 1531ee4bcd3bSJian Shen if (!new_node) 1532ee4bcd3bSJian Shen goto stop_traverse; 1533ee4bcd3bSJian Shen 1534ee4bcd3bSJian Shen ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 1535ee4bcd3bSJian Shen new_node->state = mac_node->state; 1536ee4bcd3bSJian Shen list_add_tail(&new_node->node, &tmp_add_list); 1537ee4bcd3bSJian Shen break; 1538ee4bcd3bSJian Shen default: 1539ee4bcd3bSJian Shen break; 1540ee4bcd3bSJian Shen } 1541ee4bcd3bSJian Shen } 1542ee4bcd3bSJian Shen 1543ee4bcd3bSJian Shen stop_traverse: 1544ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1545ee4bcd3bSJian Shen 1546ee4bcd3bSJian Shen /* delete first, in order to get max mac table space for adding */ 1547ee4bcd3bSJian Shen hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); 1548ee4bcd3bSJian Shen hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); 1549ee4bcd3bSJian Shen 1550ee4bcd3bSJian Shen /* if some mac addresses were added/deleted fail, move back to the 1551ee4bcd3bSJian Shen * mac_list, and retry at next time. 1552ee4bcd3bSJian Shen */ 1553ee4bcd3bSJian Shen spin_lock_bh(&hdev->mac_table.mac_list_lock); 1554ee4bcd3bSJian Shen 1555ee4bcd3bSJian Shen hclgevf_sync_from_del_list(&tmp_del_list, list); 1556ee4bcd3bSJian Shen hclgevf_sync_from_add_list(&tmp_add_list, list); 1557ee4bcd3bSJian Shen 1558ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1559ee4bcd3bSJian Shen } 1560ee4bcd3bSJian Shen 1561ee4bcd3bSJian Shen static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) 1562ee4bcd3bSJian Shen { 1563ee4bcd3bSJian Shen hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); 1564ee4bcd3bSJian Shen hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); 1565ee4bcd3bSJian Shen } 1566ee4bcd3bSJian Shen 1567ee4bcd3bSJian Shen static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) 1568ee4bcd3bSJian Shen { 1569ee4bcd3bSJian Shen spin_lock_bh(&hdev->mac_table.mac_list_lock); 1570ee4bcd3bSJian Shen 1571ee4bcd3bSJian Shen hclgevf_clear_list(&hdev->mac_table.uc_mac_list); 1572ee4bcd3bSJian Shen hclgevf_clear_list(&hdev->mac_table.mc_mac_list); 1573ee4bcd3bSJian Shen 1574ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1575ee4bcd3bSJian Shen } 1576ee4bcd3bSJian Shen 1577e2cb1decSSalil Mehta static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1578e2cb1decSSalil Mehta __be16 proto, u16 vlan_id, 1579e2cb1decSSalil Mehta bool is_kill) 1580e2cb1decSSalil Mehta { 1581d3410018SYufeng Mo #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 1582d3410018SYufeng Mo #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 1583d3410018SYufeng Mo #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 1584d3410018SYufeng Mo 1585e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1586d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1587fe4144d4SJian Shen int ret; 1588e2cb1decSSalil Mehta 1589b37ce587SYufeng Mo if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1590e2cb1decSSalil Mehta return -EINVAL; 1591e2cb1decSSalil Mehta 1592e2cb1decSSalil Mehta if (proto != htons(ETH_P_8021Q)) 1593e2cb1decSSalil Mehta return -EPROTONOSUPPORT; 1594e2cb1decSSalil Mehta 1595fe4144d4SJian Shen /* When device is resetting, firmware is unable to handle 1596fe4144d4SJian Shen * mailbox. Just record the vlan id, and remove it after 1597fe4144d4SJian Shen * reset finished. 1598fe4144d4SJian Shen */ 1599fe4144d4SJian Shen if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) { 1600fe4144d4SJian Shen set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1601fe4144d4SJian Shen return -EBUSY; 1602fe4144d4SJian Shen } 1603fe4144d4SJian Shen 1604d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1605d3410018SYufeng Mo HCLGE_MBX_VLAN_FILTER); 1606d3410018SYufeng Mo send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; 1607d3410018SYufeng Mo memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, 1608d3410018SYufeng Mo sizeof(vlan_id)); 1609d3410018SYufeng Mo memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, 1610d3410018SYufeng Mo sizeof(proto)); 161146ee7350SGuojia Liao /* when remove hw vlan filter failed, record the vlan id, 1612fe4144d4SJian Shen * and try to remove it from hw later, to be consistence 1613fe4144d4SJian Shen * with stack. 1614fe4144d4SJian Shen */ 1615d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1616fe4144d4SJian Shen if (is_kill && ret) 1617fe4144d4SJian Shen set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1618fe4144d4SJian Shen 1619fe4144d4SJian Shen return ret; 1620fe4144d4SJian Shen } 1621fe4144d4SJian Shen 1622fe4144d4SJian Shen static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1623fe4144d4SJian Shen { 1624fe4144d4SJian Shen #define HCLGEVF_MAX_SYNC_COUNT 60 1625fe4144d4SJian Shen struct hnae3_handle *handle = &hdev->nic; 1626fe4144d4SJian Shen int ret, sync_cnt = 0; 1627fe4144d4SJian Shen u16 vlan_id; 1628fe4144d4SJian Shen 1629fe4144d4SJian Shen vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1630fe4144d4SJian Shen while (vlan_id != VLAN_N_VID) { 1631fe4144d4SJian Shen ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1632fe4144d4SJian Shen vlan_id, true); 1633fe4144d4SJian Shen if (ret) 1634fe4144d4SJian Shen return; 1635fe4144d4SJian Shen 1636fe4144d4SJian Shen clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1637fe4144d4SJian Shen sync_cnt++; 1638fe4144d4SJian Shen if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1639fe4144d4SJian Shen return; 1640fe4144d4SJian Shen 1641fe4144d4SJian Shen vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1642fe4144d4SJian Shen } 1643e2cb1decSSalil Mehta } 1644e2cb1decSSalil Mehta 1645b2641e2aSYunsheng Lin static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1646b2641e2aSYunsheng Lin { 1647b2641e2aSYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1648d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1649b2641e2aSYunsheng Lin 1650d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1651d3410018SYufeng Mo HCLGE_MBX_VLAN_RX_OFF_CFG); 1652d3410018SYufeng Mo send_msg.data[0] = enable ? 1 : 0; 1653d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1654b2641e2aSYunsheng Lin } 1655b2641e2aSYunsheng Lin 16567fa6be4fSHuazhong Tan static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1657e2cb1decSSalil Mehta { 1658e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1659d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 16601a426f8bSPeng Li int ret; 1661e2cb1decSSalil Mehta 16621a426f8bSPeng Li /* disable vf queue before send queue reset msg to PF */ 16631a426f8bSPeng Li ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 16641a426f8bSPeng Li if (ret) 16657fa6be4fSHuazhong Tan return ret; 16661a426f8bSPeng Li 1667d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1668d3410018SYufeng Mo memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 1669d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1670e2cb1decSSalil Mehta } 1671e2cb1decSSalil Mehta 1672818f1675SYunsheng Lin static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1673818f1675SYunsheng Lin { 1674818f1675SYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1675d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1676818f1675SYunsheng Lin 1677d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1678d3410018SYufeng Mo memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); 1679d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1680818f1675SYunsheng Lin } 1681818f1675SYunsheng Lin 16826988eb2aSSalil Mehta static int hclgevf_notify_client(struct hclgevf_dev *hdev, 16836988eb2aSSalil Mehta enum hnae3_reset_notify_type type) 16846988eb2aSSalil Mehta { 16856988eb2aSSalil Mehta struct hnae3_client *client = hdev->nic_client; 16866988eb2aSSalil Mehta struct hnae3_handle *handle = &hdev->nic; 16876a5f6fa3SHuazhong Tan int ret; 16886988eb2aSSalil Mehta 168925d1817cSHuazhong Tan if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 169025d1817cSHuazhong Tan !client) 169125d1817cSHuazhong Tan return 0; 169225d1817cSHuazhong Tan 16936988eb2aSSalil Mehta if (!client->ops->reset_notify) 16946988eb2aSSalil Mehta return -EOPNOTSUPP; 16956988eb2aSSalil Mehta 16966a5f6fa3SHuazhong Tan ret = client->ops->reset_notify(handle, type); 16976a5f6fa3SHuazhong Tan if (ret) 16986a5f6fa3SHuazhong Tan dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 16996a5f6fa3SHuazhong Tan type, ret); 17006a5f6fa3SHuazhong Tan 17016a5f6fa3SHuazhong Tan return ret; 17026988eb2aSSalil Mehta } 17036988eb2aSSalil Mehta 17046988eb2aSSalil Mehta static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 17056988eb2aSSalil Mehta { 1706aa5c4f17SHuazhong Tan #define HCLGEVF_RESET_WAIT_US 20000 1707aa5c4f17SHuazhong Tan #define HCLGEVF_RESET_WAIT_CNT 2000 1708aa5c4f17SHuazhong Tan #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1709aa5c4f17SHuazhong Tan (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1710aa5c4f17SHuazhong Tan 1711aa5c4f17SHuazhong Tan u32 val; 1712aa5c4f17SHuazhong Tan int ret; 17136988eb2aSSalil Mehta 1714f28368bbSHuazhong Tan if (hdev->reset_type == HNAE3_VF_RESET) 171572e2fb07SHuazhong Tan ret = readl_poll_timeout(hdev->hw.io_base + 171672e2fb07SHuazhong Tan HCLGEVF_VF_RST_ING, val, 171772e2fb07SHuazhong Tan !(val & HCLGEVF_VF_RST_ING_BIT), 171872e2fb07SHuazhong Tan HCLGEVF_RESET_WAIT_US, 171972e2fb07SHuazhong Tan HCLGEVF_RESET_WAIT_TIMEOUT_US); 172072e2fb07SHuazhong Tan else 172172e2fb07SHuazhong Tan ret = readl_poll_timeout(hdev->hw.io_base + 172272e2fb07SHuazhong Tan HCLGEVF_RST_ING, val, 1723aa5c4f17SHuazhong Tan !(val & HCLGEVF_RST_ING_BITS), 1724aa5c4f17SHuazhong Tan HCLGEVF_RESET_WAIT_US, 1725aa5c4f17SHuazhong Tan HCLGEVF_RESET_WAIT_TIMEOUT_US); 17266988eb2aSSalil Mehta 17276988eb2aSSalil Mehta /* hardware completion status should be available by this time */ 1728aa5c4f17SHuazhong Tan if (ret) { 1729aa5c4f17SHuazhong Tan dev_err(&hdev->pdev->dev, 17306988eb2aSSalil Mehta "could'nt get reset done status from h/w, timeout!\n"); 1731aa5c4f17SHuazhong Tan return ret; 17326988eb2aSSalil Mehta } 17336988eb2aSSalil Mehta 17346988eb2aSSalil Mehta /* we will wait a bit more to let reset of the stack to complete. This 17356988eb2aSSalil Mehta * might happen in case reset assertion was made by PF. Yes, this also 17366988eb2aSSalil Mehta * means we might end up waiting bit more even for VF reset. 17376988eb2aSSalil Mehta */ 17386988eb2aSSalil Mehta msleep(5000); 17396988eb2aSSalil Mehta 17406988eb2aSSalil Mehta return 0; 17416988eb2aSSalil Mehta } 17426988eb2aSSalil Mehta 17436b428b4fSHuazhong Tan static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 17446b428b4fSHuazhong Tan { 17456b428b4fSHuazhong Tan u32 reg_val; 17466b428b4fSHuazhong Tan 17476b428b4fSHuazhong Tan reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); 17486b428b4fSHuazhong Tan if (enable) 17496b428b4fSHuazhong Tan reg_val |= HCLGEVF_NIC_SW_RST_RDY; 17506b428b4fSHuazhong Tan else 17516b428b4fSHuazhong Tan reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 17526b428b4fSHuazhong Tan 17536b428b4fSHuazhong Tan hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 17546b428b4fSHuazhong Tan reg_val); 17556b428b4fSHuazhong Tan } 17566b428b4fSHuazhong Tan 17576988eb2aSSalil Mehta static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 17586988eb2aSSalil Mehta { 17597a01c897SSalil Mehta int ret; 17607a01c897SSalil Mehta 17616988eb2aSSalil Mehta /* uninitialize the nic client */ 17626a5f6fa3SHuazhong Tan ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 17636a5f6fa3SHuazhong Tan if (ret) 17646a5f6fa3SHuazhong Tan return ret; 17656988eb2aSSalil Mehta 17667a01c897SSalil Mehta /* re-initialize the hclge device */ 17679c6f7085SHuazhong Tan ret = hclgevf_reset_hdev(hdev); 17687a01c897SSalil Mehta if (ret) { 17697a01c897SSalil Mehta dev_err(&hdev->pdev->dev, 17707a01c897SSalil Mehta "hclge device re-init failed, VF is disabled!\n"); 17717a01c897SSalil Mehta return ret; 17727a01c897SSalil Mehta } 17736988eb2aSSalil Mehta 17746988eb2aSSalil Mehta /* bring up the nic client again */ 17756a5f6fa3SHuazhong Tan ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 17766a5f6fa3SHuazhong Tan if (ret) 17776a5f6fa3SHuazhong Tan return ret; 17786988eb2aSSalil Mehta 17796b428b4fSHuazhong Tan /* clear handshake status with IMP */ 17806b428b4fSHuazhong Tan hclgevf_reset_handshake(hdev, false); 17816b428b4fSHuazhong Tan 17821cc9bc6eSHuazhong Tan /* bring up the nic to enable TX/RX again */ 17831cc9bc6eSHuazhong Tan return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 17846988eb2aSSalil Mehta } 17856988eb2aSSalil Mehta 1786dea846e8SHuazhong Tan static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1787dea846e8SHuazhong Tan { 1788ada13ee3SHuazhong Tan #define HCLGEVF_RESET_SYNC_TIME 100 1789ada13ee3SHuazhong Tan 1790d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1791dea846e8SHuazhong Tan int ret = 0; 1792dea846e8SHuazhong Tan 1793f28368bbSHuazhong Tan if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1794d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1795d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1796c88a6e7dSHuazhong Tan hdev->rst_stats.vf_func_rst_cnt++; 1797dea846e8SHuazhong Tan } 1798dea846e8SHuazhong Tan 1799ef5f8e50SHuazhong Tan set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1800ada13ee3SHuazhong Tan /* inform hardware that preparatory work is done */ 1801ada13ee3SHuazhong Tan msleep(HCLGEVF_RESET_SYNC_TIME); 18026b428b4fSHuazhong Tan hclgevf_reset_handshake(hdev, true); 1803dea846e8SHuazhong Tan dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1804dea846e8SHuazhong Tan hdev->reset_type, ret); 1805dea846e8SHuazhong Tan 1806dea846e8SHuazhong Tan return ret; 1807dea846e8SHuazhong Tan } 1808dea846e8SHuazhong Tan 18093d77d0cbSHuazhong Tan static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 18103d77d0cbSHuazhong Tan { 18113d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 18123d77d0cbSHuazhong Tan hdev->rst_stats.vf_func_rst_cnt); 18133d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 18143d77d0cbSHuazhong Tan hdev->rst_stats.flr_rst_cnt); 18153d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 18163d77d0cbSHuazhong Tan hdev->rst_stats.vf_rst_cnt); 18173d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "reset done count: %u\n", 18183d77d0cbSHuazhong Tan hdev->rst_stats.rst_done_cnt); 18193d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 18203d77d0cbSHuazhong Tan hdev->rst_stats.hw_rst_done_cnt); 18213d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "reset count: %u\n", 18223d77d0cbSHuazhong Tan hdev->rst_stats.rst_cnt); 18233d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 18243d77d0cbSHuazhong Tan hdev->rst_stats.rst_fail_cnt); 18253d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 18263d77d0cbSHuazhong Tan hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 18273d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 18289cee2e8dSHuazhong Tan hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG)); 18293d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 18303d77d0cbSHuazhong Tan hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); 18313d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 18323d77d0cbSHuazhong Tan hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 18333d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 18343d77d0cbSHuazhong Tan } 18353d77d0cbSHuazhong Tan 1836bbe6540eSHuazhong Tan static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1837bbe6540eSHuazhong Tan { 18386b428b4fSHuazhong Tan /* recover handshake status with IMP when reset fail */ 18396b428b4fSHuazhong Tan hclgevf_reset_handshake(hdev, true); 1840bbe6540eSHuazhong Tan hdev->rst_stats.rst_fail_cnt++; 1841adcf738bSGuojia Liao dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1842bbe6540eSHuazhong Tan hdev->rst_stats.rst_fail_cnt); 1843bbe6540eSHuazhong Tan 1844bbe6540eSHuazhong Tan if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1845bbe6540eSHuazhong Tan set_bit(hdev->reset_type, &hdev->reset_pending); 1846bbe6540eSHuazhong Tan 1847bbe6540eSHuazhong Tan if (hclgevf_is_reset_pending(hdev)) { 1848bbe6540eSHuazhong Tan set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1849bbe6540eSHuazhong Tan hclgevf_reset_task_schedule(hdev); 18503d77d0cbSHuazhong Tan } else { 1851d5432455SGuojia Liao set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 18523d77d0cbSHuazhong Tan hclgevf_dump_rst_info(hdev); 1853bbe6540eSHuazhong Tan } 1854bbe6540eSHuazhong Tan } 1855bbe6540eSHuazhong Tan 18561cc9bc6eSHuazhong Tan static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 18576988eb2aSSalil Mehta { 18586988eb2aSSalil Mehta int ret; 18596988eb2aSSalil Mehta 1860c88a6e7dSHuazhong Tan hdev->rst_stats.rst_cnt++; 18616988eb2aSSalil Mehta 18621cc9bc6eSHuazhong Tan rtnl_lock(); 18636988eb2aSSalil Mehta /* bring down the nic to stop any ongoing TX/RX */ 18646a5f6fa3SHuazhong Tan ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 186529118ab9SHuazhong Tan rtnl_unlock(); 18666a5f6fa3SHuazhong Tan if (ret) 18671cc9bc6eSHuazhong Tan return ret; 1868dea846e8SHuazhong Tan 18691cc9bc6eSHuazhong Tan return hclgevf_reset_prepare_wait(hdev); 18706988eb2aSSalil Mehta } 18716988eb2aSSalil Mehta 18721cc9bc6eSHuazhong Tan static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 18731cc9bc6eSHuazhong Tan { 18741cc9bc6eSHuazhong Tan int ret; 18751cc9bc6eSHuazhong Tan 1876c88a6e7dSHuazhong Tan hdev->rst_stats.hw_rst_done_cnt++; 1877c88a6e7dSHuazhong Tan 187829118ab9SHuazhong Tan rtnl_lock(); 18796988eb2aSSalil Mehta /* now, re-initialize the nic client and ae device */ 18806988eb2aSSalil Mehta ret = hclgevf_reset_stack(hdev); 18811cc9bc6eSHuazhong Tan rtnl_unlock(); 18826a5f6fa3SHuazhong Tan if (ret) { 18836988eb2aSSalil Mehta dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 18841cc9bc6eSHuazhong Tan return ret; 18856a5f6fa3SHuazhong Tan } 18866988eb2aSSalil Mehta 1887b644a8d4SHuazhong Tan hdev->last_reset_time = jiffies; 1888c88a6e7dSHuazhong Tan hdev->rst_stats.rst_done_cnt++; 1889bbe6540eSHuazhong Tan hdev->rst_stats.rst_fail_cnt = 0; 1890d5432455SGuojia Liao clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1891b644a8d4SHuazhong Tan 18921cc9bc6eSHuazhong Tan return 0; 18931cc9bc6eSHuazhong Tan } 18941cc9bc6eSHuazhong Tan 18951cc9bc6eSHuazhong Tan static void hclgevf_reset(struct hclgevf_dev *hdev) 18961cc9bc6eSHuazhong Tan { 18971cc9bc6eSHuazhong Tan if (hclgevf_reset_prepare(hdev)) 18981cc9bc6eSHuazhong Tan goto err_reset; 18991cc9bc6eSHuazhong Tan 19001cc9bc6eSHuazhong Tan /* check if VF could successfully fetch the hardware reset completion 19011cc9bc6eSHuazhong Tan * status from the hardware 19021cc9bc6eSHuazhong Tan */ 19031cc9bc6eSHuazhong Tan if (hclgevf_reset_wait(hdev)) { 19041cc9bc6eSHuazhong Tan /* can't do much in this situation, will disable VF */ 19051cc9bc6eSHuazhong Tan dev_err(&hdev->pdev->dev, 19061cc9bc6eSHuazhong Tan "failed to fetch H/W reset completion status\n"); 19071cc9bc6eSHuazhong Tan goto err_reset; 19081cc9bc6eSHuazhong Tan } 19091cc9bc6eSHuazhong Tan 19101cc9bc6eSHuazhong Tan if (hclgevf_reset_rebuild(hdev)) 19111cc9bc6eSHuazhong Tan goto err_reset; 19121cc9bc6eSHuazhong Tan 19131cc9bc6eSHuazhong Tan return; 19141cc9bc6eSHuazhong Tan 19156a5f6fa3SHuazhong Tan err_reset: 1916bbe6540eSHuazhong Tan hclgevf_reset_err_handle(hdev); 19176988eb2aSSalil Mehta } 19186988eb2aSSalil Mehta 1919720bd583SHuazhong Tan static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1920720bd583SHuazhong Tan unsigned long *addr) 1921720bd583SHuazhong Tan { 1922720bd583SHuazhong Tan enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1923720bd583SHuazhong Tan 1924dea846e8SHuazhong Tan /* return the highest priority reset level amongst all */ 1925b90fcc5bSHuazhong Tan if (test_bit(HNAE3_VF_RESET, addr)) { 1926b90fcc5bSHuazhong Tan rst_level = HNAE3_VF_RESET; 1927b90fcc5bSHuazhong Tan clear_bit(HNAE3_VF_RESET, addr); 1928b90fcc5bSHuazhong Tan clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1929b90fcc5bSHuazhong Tan clear_bit(HNAE3_VF_FUNC_RESET, addr); 1930b90fcc5bSHuazhong Tan } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1931dea846e8SHuazhong Tan rst_level = HNAE3_VF_FULL_RESET; 1932dea846e8SHuazhong Tan clear_bit(HNAE3_VF_FULL_RESET, addr); 1933dea846e8SHuazhong Tan clear_bit(HNAE3_VF_FUNC_RESET, addr); 1934aa5c4f17SHuazhong Tan } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1935aa5c4f17SHuazhong Tan rst_level = HNAE3_VF_PF_FUNC_RESET; 1936aa5c4f17SHuazhong Tan clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1937aa5c4f17SHuazhong Tan clear_bit(HNAE3_VF_FUNC_RESET, addr); 1938dea846e8SHuazhong Tan } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1939dea846e8SHuazhong Tan rst_level = HNAE3_VF_FUNC_RESET; 1940dea846e8SHuazhong Tan clear_bit(HNAE3_VF_FUNC_RESET, addr); 19416ff3cf07SHuazhong Tan } else if (test_bit(HNAE3_FLR_RESET, addr)) { 19426ff3cf07SHuazhong Tan rst_level = HNAE3_FLR_RESET; 19436ff3cf07SHuazhong Tan clear_bit(HNAE3_FLR_RESET, addr); 1944720bd583SHuazhong Tan } 1945720bd583SHuazhong Tan 1946720bd583SHuazhong Tan return rst_level; 1947720bd583SHuazhong Tan } 1948720bd583SHuazhong Tan 19496ae4e733SShiju Jose static void hclgevf_reset_event(struct pci_dev *pdev, 19506ae4e733SShiju Jose struct hnae3_handle *handle) 19516d4c3981SSalil Mehta { 19526ff3cf07SHuazhong Tan struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 19536ff3cf07SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 19546d4c3981SSalil Mehta 19556d4c3981SSalil Mehta dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 19566d4c3981SSalil Mehta 19576ff3cf07SHuazhong Tan if (hdev->default_reset_request) 19580742ed7cSHuazhong Tan hdev->reset_level = 1959720bd583SHuazhong Tan hclgevf_get_reset_level(hdev, 1960720bd583SHuazhong Tan &hdev->default_reset_request); 1961720bd583SHuazhong Tan else 1962dea846e8SHuazhong Tan hdev->reset_level = HNAE3_VF_FUNC_RESET; 19636d4c3981SSalil Mehta 1964436667d2SSalil Mehta /* reset of this VF requested */ 1965436667d2SSalil Mehta set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1966436667d2SSalil Mehta hclgevf_reset_task_schedule(hdev); 19676d4c3981SSalil Mehta 19680742ed7cSHuazhong Tan hdev->last_reset_time = jiffies; 19696d4c3981SSalil Mehta } 19706d4c3981SSalil Mehta 1971720bd583SHuazhong Tan static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1972720bd583SHuazhong Tan enum hnae3_reset_type rst_type) 1973720bd583SHuazhong Tan { 1974720bd583SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 1975720bd583SHuazhong Tan 1976720bd583SHuazhong Tan set_bit(rst_type, &hdev->default_reset_request); 1977720bd583SHuazhong Tan } 1978720bd583SHuazhong Tan 1979f28368bbSHuazhong Tan static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1980f28368bbSHuazhong Tan { 1981f28368bbSHuazhong Tan writel(en ? 1 : 0, vector->addr); 1982f28368bbSHuazhong Tan } 1983f28368bbSHuazhong Tan 19846ff3cf07SHuazhong Tan static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 19856ff3cf07SHuazhong Tan { 1986f28368bbSHuazhong Tan #define HCLGEVF_FLR_RETRY_WAIT_MS 500 1987f28368bbSHuazhong Tan #define HCLGEVF_FLR_RETRY_CNT 5 1988f28368bbSHuazhong Tan 19896ff3cf07SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 1990f28368bbSHuazhong Tan int retry_cnt = 0; 1991f28368bbSHuazhong Tan int ret; 19926ff3cf07SHuazhong Tan 1993f28368bbSHuazhong Tan retry: 1994f28368bbSHuazhong Tan down(&hdev->reset_sem); 1995f28368bbSHuazhong Tan set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1996f28368bbSHuazhong Tan hdev->reset_type = HNAE3_FLR_RESET; 1997f28368bbSHuazhong Tan ret = hclgevf_reset_prepare(hdev); 1998f28368bbSHuazhong Tan if (ret) { 1999f28368bbSHuazhong Tan dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", 2000f28368bbSHuazhong Tan ret); 2001f28368bbSHuazhong Tan if (hdev->reset_pending || 2002f28368bbSHuazhong Tan retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) { 20036ff3cf07SHuazhong Tan dev_err(&hdev->pdev->dev, 2004f28368bbSHuazhong Tan "reset_pending:0x%lx, retry_cnt:%d\n", 2005f28368bbSHuazhong Tan hdev->reset_pending, retry_cnt); 2006f28368bbSHuazhong Tan clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2007f28368bbSHuazhong Tan up(&hdev->reset_sem); 2008f28368bbSHuazhong Tan msleep(HCLGEVF_FLR_RETRY_WAIT_MS); 2009f28368bbSHuazhong Tan goto retry; 2010f28368bbSHuazhong Tan } 2011f28368bbSHuazhong Tan } 2012f28368bbSHuazhong Tan 2013f28368bbSHuazhong Tan /* disable misc vector before FLR done */ 2014f28368bbSHuazhong Tan hclgevf_enable_vector(&hdev->misc_vector, false); 2015f28368bbSHuazhong Tan hdev->rst_stats.flr_rst_cnt++; 2016f28368bbSHuazhong Tan } 2017f28368bbSHuazhong Tan 2018f28368bbSHuazhong Tan static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 2019f28368bbSHuazhong Tan { 2020f28368bbSHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 2021f28368bbSHuazhong Tan int ret; 2022f28368bbSHuazhong Tan 2023f28368bbSHuazhong Tan hclgevf_enable_vector(&hdev->misc_vector, true); 2024f28368bbSHuazhong Tan 2025f28368bbSHuazhong Tan ret = hclgevf_reset_rebuild(hdev); 2026f28368bbSHuazhong Tan if (ret) 2027f28368bbSHuazhong Tan dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 2028f28368bbSHuazhong Tan ret); 2029f28368bbSHuazhong Tan 2030f28368bbSHuazhong Tan hdev->reset_type = HNAE3_NONE_RESET; 2031f28368bbSHuazhong Tan clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2032f28368bbSHuazhong Tan up(&hdev->reset_sem); 20336ff3cf07SHuazhong Tan } 20346ff3cf07SHuazhong Tan 2035e2cb1decSSalil Mehta static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 2036e2cb1decSSalil Mehta { 2037e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2038e2cb1decSSalil Mehta 2039e2cb1decSSalil Mehta return hdev->fw_version; 2040e2cb1decSSalil Mehta } 2041e2cb1decSSalil Mehta 2042e2cb1decSSalil Mehta static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 2043e2cb1decSSalil Mehta { 2044e2cb1decSSalil Mehta struct hclgevf_misc_vector *vector = &hdev->misc_vector; 2045e2cb1decSSalil Mehta 2046e2cb1decSSalil Mehta vector->vector_irq = pci_irq_vector(hdev->pdev, 2047e2cb1decSSalil Mehta HCLGEVF_MISC_VECTOR_NUM); 2048e2cb1decSSalil Mehta vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 2049e2cb1decSSalil Mehta /* vector status always valid for Vector 0 */ 2050e2cb1decSSalil Mehta hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 2051e2cb1decSSalil Mehta hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 2052e2cb1decSSalil Mehta 2053e2cb1decSSalil Mehta hdev->num_msi_left -= 1; 2054e2cb1decSSalil Mehta hdev->num_msi_used += 1; 2055e2cb1decSSalil Mehta } 2056e2cb1decSSalil Mehta 205735a1e503SSalil Mehta void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 205835a1e503SSalil Mehta { 2059ff200099SYunsheng Lin if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2060ff200099SYunsheng Lin !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 2061ff200099SYunsheng Lin &hdev->state)) 20620ea68902SYunsheng Lin mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 206335a1e503SSalil Mehta } 206435a1e503SSalil Mehta 206507a0556aSSalil Mehta void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 2066e2cb1decSSalil Mehta { 2067ff200099SYunsheng Lin if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2068ff200099SYunsheng Lin !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 2069ff200099SYunsheng Lin &hdev->state)) 20700ea68902SYunsheng Lin mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 207107a0556aSSalil Mehta } 2072e2cb1decSSalil Mehta 2073ff200099SYunsheng Lin static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 2074ff200099SYunsheng Lin unsigned long delay) 2075e2cb1decSSalil Mehta { 2076d5432455SGuojia Liao if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2077d5432455SGuojia Liao !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 20780ea68902SYunsheng Lin mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 2079e2cb1decSSalil Mehta } 2080e2cb1decSSalil Mehta 2081ff200099SYunsheng Lin static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 208235a1e503SSalil Mehta { 2083d6ad7c53SGuojia Liao #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 2084d6ad7c53SGuojia Liao 2085ff200099SYunsheng Lin if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 2086ff200099SYunsheng Lin return; 2087ff200099SYunsheng Lin 2088f28368bbSHuazhong Tan down(&hdev->reset_sem); 2089f28368bbSHuazhong Tan set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 209035a1e503SSalil Mehta 2091436667d2SSalil Mehta if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 2092436667d2SSalil Mehta &hdev->reset_state)) { 2093436667d2SSalil Mehta /* PF has initmated that it is about to reset the hardware. 20949b2f3477SWeihang Li * We now have to poll & check if hardware has actually 20959b2f3477SWeihang Li * completed the reset sequence. On hardware reset completion, 20969b2f3477SWeihang Li * VF needs to reset the client and ae device. 209735a1e503SSalil Mehta */ 2098436667d2SSalil Mehta hdev->reset_attempts = 0; 2099436667d2SSalil Mehta 2100dea846e8SHuazhong Tan hdev->last_reset_time = jiffies; 2101dea846e8SHuazhong Tan while ((hdev->reset_type = 2102dea846e8SHuazhong Tan hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 21031cc9bc6eSHuazhong Tan != HNAE3_NONE_RESET) 21041cc9bc6eSHuazhong Tan hclgevf_reset(hdev); 2105436667d2SSalil Mehta } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 2106436667d2SSalil Mehta &hdev->reset_state)) { 2107436667d2SSalil Mehta /* we could be here when either of below happens: 21089b2f3477SWeihang Li * 1. reset was initiated due to watchdog timeout caused by 2109436667d2SSalil Mehta * a. IMP was earlier reset and our TX got choked down and 2110436667d2SSalil Mehta * which resulted in watchdog reacting and inducing VF 2111436667d2SSalil Mehta * reset. This also means our cmdq would be unreliable. 2112436667d2SSalil Mehta * b. problem in TX due to other lower layer(example link 2113436667d2SSalil Mehta * layer not functioning properly etc.) 2114436667d2SSalil Mehta * 2. VF reset might have been initiated due to some config 2115436667d2SSalil Mehta * change. 2116436667d2SSalil Mehta * 2117436667d2SSalil Mehta * NOTE: Theres no clear way to detect above cases than to react 2118436667d2SSalil Mehta * to the response of PF for this reset request. PF will ack the 2119436667d2SSalil Mehta * 1b and 2. cases but we will not get any intimation about 1a 2120436667d2SSalil Mehta * from PF as cmdq would be in unreliable state i.e. mailbox 2121436667d2SSalil Mehta * communication between PF and VF would be broken. 212246ee7350SGuojia Liao * 212346ee7350SGuojia Liao * if we are never geting into pending state it means either: 2124436667d2SSalil Mehta * 1. PF is not receiving our request which could be due to IMP 2125436667d2SSalil Mehta * reset 2126436667d2SSalil Mehta * 2. PF is screwed 2127436667d2SSalil Mehta * We cannot do much for 2. but to check first we can try reset 2128436667d2SSalil Mehta * our PCIe + stack and see if it alleviates the problem. 2129436667d2SSalil Mehta */ 2130d6ad7c53SGuojia Liao if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 2131436667d2SSalil Mehta /* prepare for full reset of stack + pcie interface */ 2132dea846e8SHuazhong Tan set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 2133436667d2SSalil Mehta 2134436667d2SSalil Mehta /* "defer" schedule the reset task again */ 2135436667d2SSalil Mehta set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2136436667d2SSalil Mehta } else { 2137436667d2SSalil Mehta hdev->reset_attempts++; 2138436667d2SSalil Mehta 2139dea846e8SHuazhong Tan set_bit(hdev->reset_level, &hdev->reset_pending); 2140dea846e8SHuazhong Tan set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2141436667d2SSalil Mehta } 2142dea846e8SHuazhong Tan hclgevf_reset_task_schedule(hdev); 2143436667d2SSalil Mehta } 214435a1e503SSalil Mehta 2145afb6afdbSHuazhong Tan hdev->reset_type = HNAE3_NONE_RESET; 214635a1e503SSalil Mehta clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2147f28368bbSHuazhong Tan up(&hdev->reset_sem); 214835a1e503SSalil Mehta } 214935a1e503SSalil Mehta 2150ff200099SYunsheng Lin static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 2151e2cb1decSSalil Mehta { 2152ff200099SYunsheng Lin if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2153ff200099SYunsheng Lin return; 2154e2cb1decSSalil Mehta 2155e2cb1decSSalil Mehta if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 2156e2cb1decSSalil Mehta return; 2157e2cb1decSSalil Mehta 215807a0556aSSalil Mehta hclgevf_mbx_async_handler(hdev); 2159e2cb1decSSalil Mehta 2160e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2161e2cb1decSSalil Mehta } 2162e2cb1decSSalil Mehta 2163ff200099SYunsheng Lin static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 2164a6d818e3SYunsheng Lin { 2165d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 2166a6d818e3SYunsheng Lin int ret; 2167a6d818e3SYunsheng Lin 21681416d333SHuazhong Tan if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) 2169c59a85c0SJian Shen return; 2170c59a85c0SJian Shen 2171d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 2172d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2173a6d818e3SYunsheng Lin if (ret) 2174a6d818e3SYunsheng Lin dev_err(&hdev->pdev->dev, 2175a6d818e3SYunsheng Lin "VF sends keep alive cmd failed(=%d)\n", ret); 2176a6d818e3SYunsheng Lin } 2177a6d818e3SYunsheng Lin 2178ff200099SYunsheng Lin static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 2179e2cb1decSSalil Mehta { 2180ff200099SYunsheng Lin unsigned long delta = round_jiffies_relative(HZ); 2181ff200099SYunsheng Lin struct hnae3_handle *handle = &hdev->nic; 2182e2cb1decSSalil Mehta 2183ff200099SYunsheng Lin if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 2184ff200099SYunsheng Lin delta = jiffies - hdev->last_serv_processed; 2185db01afebSliuzhongzhu 2186ff200099SYunsheng Lin if (delta < round_jiffies_relative(HZ)) { 2187ff200099SYunsheng Lin delta = round_jiffies_relative(HZ) - delta; 2188ff200099SYunsheng Lin goto out; 2189db01afebSliuzhongzhu } 2190ff200099SYunsheng Lin } 2191ff200099SYunsheng Lin 2192ff200099SYunsheng Lin hdev->serv_processed_cnt++; 2193ff200099SYunsheng Lin if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 2194ff200099SYunsheng Lin hclgevf_keep_alive(hdev); 2195ff200099SYunsheng Lin 2196ff200099SYunsheng Lin if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 2197ff200099SYunsheng Lin hdev->last_serv_processed = jiffies; 2198ff200099SYunsheng Lin goto out; 2199ff200099SYunsheng Lin } 2200ff200099SYunsheng Lin 2201ff200099SYunsheng Lin if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 2202ff200099SYunsheng Lin hclgevf_tqps_update_stats(handle); 2203e2cb1decSSalil Mehta 2204e2cb1decSSalil Mehta /* request the link status from the PF. PF would be able to tell VF 2205e2cb1decSSalil Mehta * about such updates in future so we might remove this later 2206e2cb1decSSalil Mehta */ 2207e2cb1decSSalil Mehta hclgevf_request_link_info(hdev); 2208e2cb1decSSalil Mehta 22099194d18bSliuzhongzhu hclgevf_update_link_mode(hdev); 22109194d18bSliuzhongzhu 2211fe4144d4SJian Shen hclgevf_sync_vlan_filter(hdev); 2212fe4144d4SJian Shen 2213ee4bcd3bSJian Shen hclgevf_sync_mac_table(hdev); 2214ee4bcd3bSJian Shen 2215c631c696SJian Shen hclgevf_sync_promisc_mode(hdev); 2216c631c696SJian Shen 2217ff200099SYunsheng Lin hdev->last_serv_processed = jiffies; 2218436667d2SSalil Mehta 2219ff200099SYunsheng Lin out: 2220ff200099SYunsheng Lin hclgevf_task_schedule(hdev, delta); 2221ff200099SYunsheng Lin } 2222b3c3fe8eSYunsheng Lin 2223ff200099SYunsheng Lin static void hclgevf_service_task(struct work_struct *work) 2224ff200099SYunsheng Lin { 2225ff200099SYunsheng Lin struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 2226ff200099SYunsheng Lin service_task.work); 2227ff200099SYunsheng Lin 2228ff200099SYunsheng Lin hclgevf_reset_service_task(hdev); 2229ff200099SYunsheng Lin hclgevf_mailbox_service_task(hdev); 2230ff200099SYunsheng Lin hclgevf_periodic_service_task(hdev); 2231ff200099SYunsheng Lin 2232ff200099SYunsheng Lin /* Handle reset and mbx again in case periodical task delays the 2233ff200099SYunsheng Lin * handling by calling hclgevf_task_schedule() in 2234ff200099SYunsheng Lin * hclgevf_periodic_service_task() 2235ff200099SYunsheng Lin */ 2236ff200099SYunsheng Lin hclgevf_reset_service_task(hdev); 2237ff200099SYunsheng Lin hclgevf_mailbox_service_task(hdev); 2238e2cb1decSSalil Mehta } 2239e2cb1decSSalil Mehta 2240e2cb1decSSalil Mehta static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 2241e2cb1decSSalil Mehta { 2242e2cb1decSSalil Mehta hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 2243e2cb1decSSalil Mehta } 2244e2cb1decSSalil Mehta 2245b90fcc5bSHuazhong Tan static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 2246b90fcc5bSHuazhong Tan u32 *clearval) 2247e2cb1decSSalil Mehta { 224813050921SHuazhong Tan u32 val, cmdq_stat_reg, rst_ing_reg; 2249e2cb1decSSalil Mehta 2250e2cb1decSSalil Mehta /* fetch the events from their corresponding regs */ 225113050921SHuazhong Tan cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 22529cee2e8dSHuazhong Tan HCLGEVF_VECTOR0_CMDQ_STATE_REG); 2253e2cb1decSSalil Mehta 225413050921SHuazhong Tan if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 2255b90fcc5bSHuazhong Tan rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2256b90fcc5bSHuazhong Tan dev_info(&hdev->pdev->dev, 2257b90fcc5bSHuazhong Tan "receive reset interrupt 0x%x!\n", rst_ing_reg); 2258b90fcc5bSHuazhong Tan set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 2259b90fcc5bSHuazhong Tan set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2260ef5f8e50SHuazhong Tan set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 226113050921SHuazhong Tan *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 2262c88a6e7dSHuazhong Tan hdev->rst_stats.vf_rst_cnt++; 226372e2fb07SHuazhong Tan /* set up VF hardware reset status, its PF will clear 226472e2fb07SHuazhong Tan * this status when PF has initialized done. 226572e2fb07SHuazhong Tan */ 226672e2fb07SHuazhong Tan val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 226772e2fb07SHuazhong Tan hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 226872e2fb07SHuazhong Tan val | HCLGEVF_VF_RST_ING_BIT); 2269b90fcc5bSHuazhong Tan return HCLGEVF_VECTOR0_EVENT_RST; 2270b90fcc5bSHuazhong Tan } 2271b90fcc5bSHuazhong Tan 2272e2cb1decSSalil Mehta /* check for vector0 mailbox(=CMDQ RX) event source */ 227313050921SHuazhong Tan if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 227413050921SHuazhong Tan /* for revision 0x21, clearing interrupt is writing bit 0 227513050921SHuazhong Tan * to the clear register, writing bit 1 means to keep the 227613050921SHuazhong Tan * old value. 227713050921SHuazhong Tan * for revision 0x20, the clear register is a read & write 227813050921SHuazhong Tan * register, so we should just write 0 to the bit we are 227913050921SHuazhong Tan * handling, and keep other bits as cmdq_stat_reg. 228013050921SHuazhong Tan */ 228113050921SHuazhong Tan if (hdev->pdev->revision >= 0x21) 228213050921SHuazhong Tan *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 228313050921SHuazhong Tan else 228413050921SHuazhong Tan *clearval = cmdq_stat_reg & 228513050921SHuazhong Tan ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 228613050921SHuazhong Tan 2287b90fcc5bSHuazhong Tan return HCLGEVF_VECTOR0_EVENT_MBX; 2288e2cb1decSSalil Mehta } 2289e2cb1decSSalil Mehta 2290e45afb39SHuazhong Tan /* print other vector0 event source */ 2291e45afb39SHuazhong Tan dev_info(&hdev->pdev->dev, 2292e45afb39SHuazhong Tan "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2293e45afb39SHuazhong Tan cmdq_stat_reg); 2294e2cb1decSSalil Mehta 2295b90fcc5bSHuazhong Tan return HCLGEVF_VECTOR0_EVENT_OTHER; 2296e2cb1decSSalil Mehta } 2297e2cb1decSSalil Mehta 2298e2cb1decSSalil Mehta static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2299e2cb1decSSalil Mehta { 2300b90fcc5bSHuazhong Tan enum hclgevf_evt_cause event_cause; 2301e2cb1decSSalil Mehta struct hclgevf_dev *hdev = data; 2302e2cb1decSSalil Mehta u32 clearval; 2303e2cb1decSSalil Mehta 2304e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, false); 2305b90fcc5bSHuazhong Tan event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2306e2cb1decSSalil Mehta 2307b90fcc5bSHuazhong Tan switch (event_cause) { 2308b90fcc5bSHuazhong Tan case HCLGEVF_VECTOR0_EVENT_RST: 2309b90fcc5bSHuazhong Tan hclgevf_reset_task_schedule(hdev); 2310b90fcc5bSHuazhong Tan break; 2311b90fcc5bSHuazhong Tan case HCLGEVF_VECTOR0_EVENT_MBX: 231207a0556aSSalil Mehta hclgevf_mbx_handler(hdev); 2313b90fcc5bSHuazhong Tan break; 2314b90fcc5bSHuazhong Tan default: 2315b90fcc5bSHuazhong Tan break; 2316b90fcc5bSHuazhong Tan } 2317e2cb1decSSalil Mehta 2318b90fcc5bSHuazhong Tan if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 2319e2cb1decSSalil Mehta hclgevf_clear_event_cause(hdev, clearval); 2320e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, true); 2321b90fcc5bSHuazhong Tan } 2322e2cb1decSSalil Mehta 2323e2cb1decSSalil Mehta return IRQ_HANDLED; 2324e2cb1decSSalil Mehta } 2325e2cb1decSSalil Mehta 2326e2cb1decSSalil Mehta static int hclgevf_configure(struct hclgevf_dev *hdev) 2327e2cb1decSSalil Mehta { 2328e2cb1decSSalil Mehta int ret; 2329e2cb1decSSalil Mehta 233092f11ea1SJian Shen /* get current port based vlan state from PF */ 233192f11ea1SJian Shen ret = hclgevf_get_port_base_vlan_filter_state(hdev); 233292f11ea1SJian Shen if (ret) 233392f11ea1SJian Shen return ret; 233492f11ea1SJian Shen 2335e2cb1decSSalil Mehta /* get queue configuration from PF */ 23366cee6fc3SJian Shen ret = hclgevf_get_queue_info(hdev); 2337e2cb1decSSalil Mehta if (ret) 2338e2cb1decSSalil Mehta return ret; 2339c0425944SPeng Li 2340c0425944SPeng Li /* get queue depth info from PF */ 2341c0425944SPeng Li ret = hclgevf_get_queue_depth(hdev); 2342c0425944SPeng Li if (ret) 2343c0425944SPeng Li return ret; 2344c0425944SPeng Li 23459c3e7130Sliuzhongzhu ret = hclgevf_get_pf_media_type(hdev); 23469c3e7130Sliuzhongzhu if (ret) 23479c3e7130Sliuzhongzhu return ret; 23489c3e7130Sliuzhongzhu 2349e2cb1decSSalil Mehta /* get tc configuration from PF */ 2350e2cb1decSSalil Mehta return hclgevf_get_tc_info(hdev); 2351e2cb1decSSalil Mehta } 2352e2cb1decSSalil Mehta 23537a01c897SSalil Mehta static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 23547a01c897SSalil Mehta { 23557a01c897SSalil Mehta struct pci_dev *pdev = ae_dev->pdev; 23561154bb26SPeng Li struct hclgevf_dev *hdev; 23577a01c897SSalil Mehta 23587a01c897SSalil Mehta hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 23597a01c897SSalil Mehta if (!hdev) 23607a01c897SSalil Mehta return -ENOMEM; 23617a01c897SSalil Mehta 23627a01c897SSalil Mehta hdev->pdev = pdev; 23637a01c897SSalil Mehta hdev->ae_dev = ae_dev; 23647a01c897SSalil Mehta ae_dev->priv = hdev; 23657a01c897SSalil Mehta 23667a01c897SSalil Mehta return 0; 23677a01c897SSalil Mehta } 23687a01c897SSalil Mehta 2369e2cb1decSSalil Mehta static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2370e2cb1decSSalil Mehta { 2371e2cb1decSSalil Mehta struct hnae3_handle *roce = &hdev->roce; 2372e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 2373e2cb1decSSalil Mehta 237407acf909SJian Shen roce->rinfo.num_vectors = hdev->num_roce_msix; 2375e2cb1decSSalil Mehta 2376e2cb1decSSalil Mehta if (hdev->num_msi_left < roce->rinfo.num_vectors || 2377e2cb1decSSalil Mehta hdev->num_msi_left == 0) 2378e2cb1decSSalil Mehta return -EINVAL; 2379e2cb1decSSalil Mehta 238007acf909SJian Shen roce->rinfo.base_vector = hdev->roce_base_vector; 2381e2cb1decSSalil Mehta 2382e2cb1decSSalil Mehta roce->rinfo.netdev = nic->kinfo.netdev; 2383e2cb1decSSalil Mehta roce->rinfo.roce_io_base = hdev->hw.io_base; 2384e2cb1decSSalil Mehta 2385e2cb1decSSalil Mehta roce->pdev = nic->pdev; 2386e2cb1decSSalil Mehta roce->ae_algo = nic->ae_algo; 2387e2cb1decSSalil Mehta roce->numa_node_mask = nic->numa_node_mask; 2388e2cb1decSSalil Mehta 2389e2cb1decSSalil Mehta return 0; 2390e2cb1decSSalil Mehta } 2391e2cb1decSSalil Mehta 2392b26a6feaSPeng Li static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 2393b26a6feaSPeng Li { 2394b26a6feaSPeng Li struct hclgevf_cfg_gro_status_cmd *req; 2395b26a6feaSPeng Li struct hclgevf_desc desc; 2396b26a6feaSPeng Li int ret; 2397b26a6feaSPeng Li 2398b26a6feaSPeng Li if (!hnae3_dev_gro_supported(hdev)) 2399b26a6feaSPeng Li return 0; 2400b26a6feaSPeng Li 2401b26a6feaSPeng Li hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2402b26a6feaSPeng Li false); 2403b26a6feaSPeng Li req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2404b26a6feaSPeng Li 2405fb9e44d6SHuazhong Tan req->gro_en = en ? 1 : 0; 2406b26a6feaSPeng Li 2407b26a6feaSPeng Li ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2408b26a6feaSPeng Li if (ret) 2409b26a6feaSPeng Li dev_err(&hdev->pdev->dev, 2410b26a6feaSPeng Li "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2411b26a6feaSPeng Li 2412b26a6feaSPeng Li return ret; 2413b26a6feaSPeng Li } 2414b26a6feaSPeng Li 2415944de484SGuojia Liao static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) 2416e2cb1decSSalil Mehta { 2417e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2418944de484SGuojia Liao struct hclgevf_rss_tuple_cfg *tuple_sets; 24194093d1a2SGuangbin Huang u32 i; 2420e2cb1decSSalil Mehta 2421944de484SGuojia Liao rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 24224093d1a2SGuangbin Huang rss_cfg->rss_size = hdev->nic.kinfo.rss_size; 2423944de484SGuojia Liao tuple_sets = &rss_cfg->rss_tuple_sets; 2424374ad291SJian Shen if (hdev->pdev->revision >= 0x21) { 2425472d7eceSJian Shen rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 2426472d7eceSJian Shen memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2427374ad291SJian Shen HCLGEVF_RSS_KEY_SIZE); 2428374ad291SJian Shen 2429944de484SGuojia Liao tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2430944de484SGuojia Liao tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2431944de484SGuojia Liao tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2432944de484SGuojia Liao tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2433944de484SGuojia Liao tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2434944de484SGuojia Liao tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2435944de484SGuojia Liao tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2436944de484SGuojia Liao tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2437374ad291SJian Shen } 2438374ad291SJian Shen 24399b2f3477SWeihang Li /* Initialize RSS indirect table */ 2440e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 24414093d1a2SGuangbin Huang rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 2442944de484SGuojia Liao } 2443944de484SGuojia Liao 2444944de484SGuojia Liao static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2445944de484SGuojia Liao { 2446944de484SGuojia Liao struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2447944de484SGuojia Liao int ret; 2448944de484SGuojia Liao 2449944de484SGuojia Liao if (hdev->pdev->revision >= 0x21) { 2450944de484SGuojia Liao ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 2451944de484SGuojia Liao rss_cfg->rss_hash_key); 2452944de484SGuojia Liao if (ret) 2453944de484SGuojia Liao return ret; 2454944de484SGuojia Liao 2455944de484SGuojia Liao ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2456944de484SGuojia Liao if (ret) 2457944de484SGuojia Liao return ret; 2458944de484SGuojia Liao } 2459e2cb1decSSalil Mehta 2460e2cb1decSSalil Mehta ret = hclgevf_set_rss_indir_table(hdev); 2461e2cb1decSSalil Mehta if (ret) 2462e2cb1decSSalil Mehta return ret; 2463e2cb1decSSalil Mehta 24644093d1a2SGuangbin Huang return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); 2465e2cb1decSSalil Mehta } 2466e2cb1decSSalil Mehta 2467e2cb1decSSalil Mehta static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2468e2cb1decSSalil Mehta { 2469e2cb1decSSalil Mehta return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2470e2cb1decSSalil Mehta false); 2471e2cb1decSSalil Mehta } 2472e2cb1decSSalil Mehta 2473ff200099SYunsheng Lin static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2474ff200099SYunsheng Lin { 2475ff200099SYunsheng Lin #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2476ff200099SYunsheng Lin 2477ff200099SYunsheng Lin unsigned long last = hdev->serv_processed_cnt; 2478ff200099SYunsheng Lin int i = 0; 2479ff200099SYunsheng Lin 2480ff200099SYunsheng Lin while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2481ff200099SYunsheng Lin i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2482ff200099SYunsheng Lin last == hdev->serv_processed_cnt) 2483ff200099SYunsheng Lin usleep_range(1, 1); 2484ff200099SYunsheng Lin } 2485ff200099SYunsheng Lin 24868cdb992fSJian Shen static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 24878cdb992fSJian Shen { 24888cdb992fSJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 24898cdb992fSJian Shen 24908cdb992fSJian Shen if (enable) { 2491ff200099SYunsheng Lin hclgevf_task_schedule(hdev, 0); 24928cdb992fSJian Shen } else { 2493b3c3fe8eSYunsheng Lin set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2494ff200099SYunsheng Lin 2495ff200099SYunsheng Lin /* flush memory to make sure DOWN is seen by service task */ 2496ff200099SYunsheng Lin smp_mb__before_atomic(); 2497ff200099SYunsheng Lin hclgevf_flush_link_update(hdev); 24988cdb992fSJian Shen } 24998cdb992fSJian Shen } 25008cdb992fSJian Shen 2501e2cb1decSSalil Mehta static int hclgevf_ae_start(struct hnae3_handle *handle) 2502e2cb1decSSalil Mehta { 2503e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2504e2cb1decSSalil Mehta 2505e2cb1decSSalil Mehta hclgevf_reset_tqp_stats(handle); 2506e2cb1decSSalil Mehta 2507e2cb1decSSalil Mehta hclgevf_request_link_info(hdev); 2508e2cb1decSSalil Mehta 25099194d18bSliuzhongzhu hclgevf_update_link_mode(hdev); 25109194d18bSliuzhongzhu 2511e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2512e2cb1decSSalil Mehta 2513e2cb1decSSalil Mehta return 0; 2514e2cb1decSSalil Mehta } 2515e2cb1decSSalil Mehta 2516e2cb1decSSalil Mehta static void hclgevf_ae_stop(struct hnae3_handle *handle) 2517e2cb1decSSalil Mehta { 2518e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 251939cfbc9cSHuazhong Tan int i; 2520e2cb1decSSalil Mehta 25212f7e4896SFuyun Liang set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 25222f7e4896SFuyun Liang 2523146e92c1SHuazhong Tan if (hdev->reset_type != HNAE3_VF_RESET) 252439cfbc9cSHuazhong Tan for (i = 0; i < handle->kinfo.num_tqps; i++) 2525146e92c1SHuazhong Tan if (hclgevf_reset_tqp(handle, i)) 2526146e92c1SHuazhong Tan break; 252739cfbc9cSHuazhong Tan 2528e2cb1decSSalil Mehta hclgevf_reset_tqp_stats(handle); 25298cc6c1f7SFuyun Liang hclgevf_update_link_status(hdev, 0); 2530e2cb1decSSalil Mehta } 2531e2cb1decSSalil Mehta 2532a6d818e3SYunsheng Lin static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2533a6d818e3SYunsheng Lin { 2534d3410018SYufeng Mo #define HCLGEVF_STATE_ALIVE 1 2535d3410018SYufeng Mo #define HCLGEVF_STATE_NOT_ALIVE 0 2536a6d818e3SYunsheng Lin 2537d3410018SYufeng Mo struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2538d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 2539d3410018SYufeng Mo 2540d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2541d3410018SYufeng Mo send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2542d3410018SYufeng Mo HCLGEVF_STATE_NOT_ALIVE; 2543d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2544a6d818e3SYunsheng Lin } 2545a6d818e3SYunsheng Lin 2546a6d818e3SYunsheng Lin static int hclgevf_client_start(struct hnae3_handle *handle) 2547a6d818e3SYunsheng Lin { 2548e233516eSHuazhong Tan int ret; 2549e233516eSHuazhong Tan 2550e233516eSHuazhong Tan ret = hclgevf_set_alive(handle, true); 2551e233516eSHuazhong Tan if (ret) 2552e233516eSHuazhong Tan return ret; 2553a6d818e3SYunsheng Lin 2554e233516eSHuazhong Tan return 0; 2555a6d818e3SYunsheng Lin } 2556a6d818e3SYunsheng Lin 2557a6d818e3SYunsheng Lin static void hclgevf_client_stop(struct hnae3_handle *handle) 2558a6d818e3SYunsheng Lin { 2559a6d818e3SYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2560a6d818e3SYunsheng Lin int ret; 2561a6d818e3SYunsheng Lin 2562a6d818e3SYunsheng Lin ret = hclgevf_set_alive(handle, false); 2563a6d818e3SYunsheng Lin if (ret) 2564a6d818e3SYunsheng Lin dev_warn(&hdev->pdev->dev, 2565a6d818e3SYunsheng Lin "%s failed %d\n", __func__, ret); 2566a6d818e3SYunsheng Lin } 2567a6d818e3SYunsheng Lin 2568e2cb1decSSalil Mehta static void hclgevf_state_init(struct hclgevf_dev *hdev) 2569e2cb1decSSalil Mehta { 2570e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2571e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2572d5432455SGuojia Liao clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2573e2cb1decSSalil Mehta 2574b3c3fe8eSYunsheng Lin INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 257535a1e503SSalil Mehta 2576e2cb1decSSalil Mehta mutex_init(&hdev->mbx_resp.mbx_mutex); 2577f28368bbSHuazhong Tan sema_init(&hdev->reset_sem, 1); 2578e2cb1decSSalil Mehta 2579ee4bcd3bSJian Shen spin_lock_init(&hdev->mac_table.mac_list_lock); 2580ee4bcd3bSJian Shen INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); 2581ee4bcd3bSJian Shen INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); 2582ee4bcd3bSJian Shen 2583e2cb1decSSalil Mehta /* bring the device down */ 2584e2cb1decSSalil Mehta set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2585e2cb1decSSalil Mehta } 2586e2cb1decSSalil Mehta 2587e2cb1decSSalil Mehta static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2588e2cb1decSSalil Mehta { 2589e2cb1decSSalil Mehta set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2590acfc3d55SHuazhong Tan set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2591e2cb1decSSalil Mehta 2592b3c3fe8eSYunsheng Lin if (hdev->service_task.work.func) 2593b3c3fe8eSYunsheng Lin cancel_delayed_work_sync(&hdev->service_task); 2594e2cb1decSSalil Mehta 2595e2cb1decSSalil Mehta mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2596e2cb1decSSalil Mehta } 2597e2cb1decSSalil Mehta 2598e2cb1decSSalil Mehta static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2599e2cb1decSSalil Mehta { 2600e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 2601e2cb1decSSalil Mehta int vectors; 2602e2cb1decSSalil Mehta int i; 2603e2cb1decSSalil Mehta 2604580a05f9SYonglong Liu if (hnae3_dev_roce_supported(hdev)) 260507acf909SJian Shen vectors = pci_alloc_irq_vectors(pdev, 260607acf909SJian Shen hdev->roce_base_msix_offset + 1, 260707acf909SJian Shen hdev->num_msi, 260807acf909SJian Shen PCI_IRQ_MSIX); 260907acf909SJian Shen else 2610580a05f9SYonglong Liu vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2611580a05f9SYonglong Liu hdev->num_msi, 2612e2cb1decSSalil Mehta PCI_IRQ_MSI | PCI_IRQ_MSIX); 261307acf909SJian Shen 2614e2cb1decSSalil Mehta if (vectors < 0) { 2615e2cb1decSSalil Mehta dev_err(&pdev->dev, 2616e2cb1decSSalil Mehta "failed(%d) to allocate MSI/MSI-X vectors\n", 2617e2cb1decSSalil Mehta vectors); 2618e2cb1decSSalil Mehta return vectors; 2619e2cb1decSSalil Mehta } 2620e2cb1decSSalil Mehta if (vectors < hdev->num_msi) 2621e2cb1decSSalil Mehta dev_warn(&hdev->pdev->dev, 2622adcf738bSGuojia Liao "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2623e2cb1decSSalil Mehta hdev->num_msi, vectors); 2624e2cb1decSSalil Mehta 2625e2cb1decSSalil Mehta hdev->num_msi = vectors; 2626e2cb1decSSalil Mehta hdev->num_msi_left = vectors; 2627580a05f9SYonglong Liu 2628e2cb1decSSalil Mehta hdev->base_msi_vector = pdev->irq; 262907acf909SJian Shen hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2630e2cb1decSSalil Mehta 2631e2cb1decSSalil Mehta hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2632e2cb1decSSalil Mehta sizeof(u16), GFP_KERNEL); 2633e2cb1decSSalil Mehta if (!hdev->vector_status) { 2634e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 2635e2cb1decSSalil Mehta return -ENOMEM; 2636e2cb1decSSalil Mehta } 2637e2cb1decSSalil Mehta 2638e2cb1decSSalil Mehta for (i = 0; i < hdev->num_msi; i++) 2639e2cb1decSSalil Mehta hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2640e2cb1decSSalil Mehta 2641e2cb1decSSalil Mehta hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2642e2cb1decSSalil Mehta sizeof(int), GFP_KERNEL); 2643e2cb1decSSalil Mehta if (!hdev->vector_irq) { 2644862d969aSHuazhong Tan devm_kfree(&pdev->dev, hdev->vector_status); 2645e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 2646e2cb1decSSalil Mehta return -ENOMEM; 2647e2cb1decSSalil Mehta } 2648e2cb1decSSalil Mehta 2649e2cb1decSSalil Mehta return 0; 2650e2cb1decSSalil Mehta } 2651e2cb1decSSalil Mehta 2652e2cb1decSSalil Mehta static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2653e2cb1decSSalil Mehta { 2654e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 2655e2cb1decSSalil Mehta 2656862d969aSHuazhong Tan devm_kfree(&pdev->dev, hdev->vector_status); 2657862d969aSHuazhong Tan devm_kfree(&pdev->dev, hdev->vector_irq); 2658e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 2659e2cb1decSSalil Mehta } 2660e2cb1decSSalil Mehta 2661e2cb1decSSalil Mehta static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2662e2cb1decSSalil Mehta { 2663cdd332acSGuojia Liao int ret; 2664e2cb1decSSalil Mehta 2665e2cb1decSSalil Mehta hclgevf_get_misc_vector(hdev); 2666e2cb1decSSalil Mehta 2667f97c4d82SYonglong Liu snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2668f97c4d82SYonglong Liu HCLGEVF_NAME, pci_name(hdev->pdev)); 2669e2cb1decSSalil Mehta ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2670f97c4d82SYonglong Liu 0, hdev->misc_vector.name, hdev); 2671e2cb1decSSalil Mehta if (ret) { 2672e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2673e2cb1decSSalil Mehta hdev->misc_vector.vector_irq); 2674e2cb1decSSalil Mehta return ret; 2675e2cb1decSSalil Mehta } 2676e2cb1decSSalil Mehta 26771819e409SXi Wang hclgevf_clear_event_cause(hdev, 0); 26781819e409SXi Wang 2679e2cb1decSSalil Mehta /* enable misc. vector(vector 0) */ 2680e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, true); 2681e2cb1decSSalil Mehta 2682e2cb1decSSalil Mehta return ret; 2683e2cb1decSSalil Mehta } 2684e2cb1decSSalil Mehta 2685e2cb1decSSalil Mehta static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2686e2cb1decSSalil Mehta { 2687e2cb1decSSalil Mehta /* disable misc vector(vector 0) */ 2688e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, false); 26891819e409SXi Wang synchronize_irq(hdev->misc_vector.vector_irq); 2690e2cb1decSSalil Mehta free_irq(hdev->misc_vector.vector_irq, hdev); 2691e2cb1decSSalil Mehta hclgevf_free_vector(hdev, 0); 2692e2cb1decSSalil Mehta } 2693e2cb1decSSalil Mehta 2694bb87be87SYonglong Liu static void hclgevf_info_show(struct hclgevf_dev *hdev) 2695bb87be87SYonglong Liu { 2696bb87be87SYonglong Liu struct device *dev = &hdev->pdev->dev; 2697bb87be87SYonglong Liu 2698bb87be87SYonglong Liu dev_info(dev, "VF info begin:\n"); 2699bb87be87SYonglong Liu 2700adcf738bSGuojia Liao dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2701adcf738bSGuojia Liao dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2702adcf738bSGuojia Liao dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2703adcf738bSGuojia Liao dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2704adcf738bSGuojia Liao dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2705adcf738bSGuojia Liao dev_info(dev, "PF media type of this VF: %u\n", 2706bb87be87SYonglong Liu hdev->hw.mac.media_type); 2707bb87be87SYonglong Liu 2708bb87be87SYonglong Liu dev_info(dev, "VF info end.\n"); 2709bb87be87SYonglong Liu } 2710bb87be87SYonglong Liu 27111db58f86SHuazhong Tan static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 27121db58f86SHuazhong Tan struct hnae3_client *client) 27131db58f86SHuazhong Tan { 27141db58f86SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 27154cd5beaaSGuangbin Huang int rst_cnt = hdev->rst_stats.rst_cnt; 27161db58f86SHuazhong Tan int ret; 27171db58f86SHuazhong Tan 27181db58f86SHuazhong Tan ret = client->ops->init_instance(&hdev->nic); 27191db58f86SHuazhong Tan if (ret) 27201db58f86SHuazhong Tan return ret; 27211db58f86SHuazhong Tan 27221db58f86SHuazhong Tan set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 27234cd5beaaSGuangbin Huang if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 27244cd5beaaSGuangbin Huang rst_cnt != hdev->rst_stats.rst_cnt) { 27254cd5beaaSGuangbin Huang clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 27264cd5beaaSGuangbin Huang 27274cd5beaaSGuangbin Huang client->ops->uninit_instance(&hdev->nic, 0); 27284cd5beaaSGuangbin Huang return -EBUSY; 27294cd5beaaSGuangbin Huang } 27304cd5beaaSGuangbin Huang 27311db58f86SHuazhong Tan hnae3_set_client_init_flag(client, ae_dev, 1); 27321db58f86SHuazhong Tan 27331db58f86SHuazhong Tan if (netif_msg_drv(&hdev->nic)) 27341db58f86SHuazhong Tan hclgevf_info_show(hdev); 27351db58f86SHuazhong Tan 27361db58f86SHuazhong Tan return 0; 27371db58f86SHuazhong Tan } 27381db58f86SHuazhong Tan 27391db58f86SHuazhong Tan static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 27401db58f86SHuazhong Tan struct hnae3_client *client) 27411db58f86SHuazhong Tan { 27421db58f86SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 27431db58f86SHuazhong Tan int ret; 27441db58f86SHuazhong Tan 27451db58f86SHuazhong Tan if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 27461db58f86SHuazhong Tan !hdev->nic_client) 27471db58f86SHuazhong Tan return 0; 27481db58f86SHuazhong Tan 27491db58f86SHuazhong Tan ret = hclgevf_init_roce_base_info(hdev); 27501db58f86SHuazhong Tan if (ret) 27511db58f86SHuazhong Tan return ret; 27521db58f86SHuazhong Tan 27531db58f86SHuazhong Tan ret = client->ops->init_instance(&hdev->roce); 27541db58f86SHuazhong Tan if (ret) 27551db58f86SHuazhong Tan return ret; 27561db58f86SHuazhong Tan 27571db58f86SHuazhong Tan hnae3_set_client_init_flag(client, ae_dev, 1); 27581db58f86SHuazhong Tan 27591db58f86SHuazhong Tan return 0; 27601db58f86SHuazhong Tan } 27611db58f86SHuazhong Tan 2762e718a93fSPeng Li static int hclgevf_init_client_instance(struct hnae3_client *client, 2763e718a93fSPeng Li struct hnae3_ae_dev *ae_dev) 2764e2cb1decSSalil Mehta { 2765e718a93fSPeng Li struct hclgevf_dev *hdev = ae_dev->priv; 2766e2cb1decSSalil Mehta int ret; 2767e2cb1decSSalil Mehta 2768e2cb1decSSalil Mehta switch (client->type) { 2769e2cb1decSSalil Mehta case HNAE3_CLIENT_KNIC: 2770e2cb1decSSalil Mehta hdev->nic_client = client; 2771e2cb1decSSalil Mehta hdev->nic.client = client; 2772e2cb1decSSalil Mehta 27731db58f86SHuazhong Tan ret = hclgevf_init_nic_client_instance(ae_dev, client); 2774e2cb1decSSalil Mehta if (ret) 277549dd8054SJian Shen goto clear_nic; 2776e2cb1decSSalil Mehta 27771db58f86SHuazhong Tan ret = hclgevf_init_roce_client_instance(ae_dev, 27781db58f86SHuazhong Tan hdev->roce_client); 2779e2cb1decSSalil Mehta if (ret) 278049dd8054SJian Shen goto clear_roce; 2781d9f28fc2SJian Shen 2782e2cb1decSSalil Mehta break; 2783e2cb1decSSalil Mehta case HNAE3_CLIENT_ROCE: 2784544a7bcdSLijun Ou if (hnae3_dev_roce_supported(hdev)) { 2785e2cb1decSSalil Mehta hdev->roce_client = client; 2786e2cb1decSSalil Mehta hdev->roce.client = client; 2787544a7bcdSLijun Ou } 2788e2cb1decSSalil Mehta 27891db58f86SHuazhong Tan ret = hclgevf_init_roce_client_instance(ae_dev, client); 2790e2cb1decSSalil Mehta if (ret) 279149dd8054SJian Shen goto clear_roce; 2792e2cb1decSSalil Mehta 2793fa7a4bd5SJian Shen break; 2794fa7a4bd5SJian Shen default: 2795fa7a4bd5SJian Shen return -EINVAL; 2796e2cb1decSSalil Mehta } 2797e2cb1decSSalil Mehta 2798e2cb1decSSalil Mehta return 0; 279949dd8054SJian Shen 280049dd8054SJian Shen clear_nic: 280149dd8054SJian Shen hdev->nic_client = NULL; 280249dd8054SJian Shen hdev->nic.client = NULL; 280349dd8054SJian Shen return ret; 280449dd8054SJian Shen clear_roce: 280549dd8054SJian Shen hdev->roce_client = NULL; 280649dd8054SJian Shen hdev->roce.client = NULL; 280749dd8054SJian Shen return ret; 2808e2cb1decSSalil Mehta } 2809e2cb1decSSalil Mehta 2810e718a93fSPeng Li static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2811e718a93fSPeng Li struct hnae3_ae_dev *ae_dev) 2812e2cb1decSSalil Mehta { 2813e718a93fSPeng Li struct hclgevf_dev *hdev = ae_dev->priv; 2814e718a93fSPeng Li 2815e2cb1decSSalil Mehta /* un-init roce, if it exists */ 281649dd8054SJian Shen if (hdev->roce_client) { 2817e2cb1decSSalil Mehta hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 281849dd8054SJian Shen hdev->roce_client = NULL; 281949dd8054SJian Shen hdev->roce.client = NULL; 282049dd8054SJian Shen } 2821e2cb1decSSalil Mehta 2822e2cb1decSSalil Mehta /* un-init nic/unic, if this was not called by roce client */ 282349dd8054SJian Shen if (client->ops->uninit_instance && hdev->nic_client && 282449dd8054SJian Shen client->type != HNAE3_CLIENT_ROCE) { 282525d1817cSHuazhong Tan clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 282625d1817cSHuazhong Tan 2827e2cb1decSSalil Mehta client->ops->uninit_instance(&hdev->nic, 0); 282849dd8054SJian Shen hdev->nic_client = NULL; 282949dd8054SJian Shen hdev->nic.client = NULL; 283049dd8054SJian Shen } 2831e2cb1decSSalil Mehta } 2832e2cb1decSSalil Mehta 2833e2cb1decSSalil Mehta static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2834e2cb1decSSalil Mehta { 2835e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 2836e2cb1decSSalil Mehta struct hclgevf_hw *hw; 2837e2cb1decSSalil Mehta int ret; 2838e2cb1decSSalil Mehta 2839e2cb1decSSalil Mehta ret = pci_enable_device(pdev); 2840e2cb1decSSalil Mehta if (ret) { 2841e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed to enable PCI device\n"); 28423e249d3bSFuyun Liang return ret; 2843e2cb1decSSalil Mehta } 2844e2cb1decSSalil Mehta 2845e2cb1decSSalil Mehta ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2846e2cb1decSSalil Mehta if (ret) { 2847e2cb1decSSalil Mehta dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2848e2cb1decSSalil Mehta goto err_disable_device; 2849e2cb1decSSalil Mehta } 2850e2cb1decSSalil Mehta 2851e2cb1decSSalil Mehta ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2852e2cb1decSSalil Mehta if (ret) { 2853e2cb1decSSalil Mehta dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2854e2cb1decSSalil Mehta goto err_disable_device; 2855e2cb1decSSalil Mehta } 2856e2cb1decSSalil Mehta 2857e2cb1decSSalil Mehta pci_set_master(pdev); 2858e2cb1decSSalil Mehta hw = &hdev->hw; 2859e2cb1decSSalil Mehta hw->hdev = hdev; 28602e1ea493SPeng Li hw->io_base = pci_iomap(pdev, 2, 0); 2861e2cb1decSSalil Mehta if (!hw->io_base) { 2862e2cb1decSSalil Mehta dev_err(&pdev->dev, "can't map configuration register space\n"); 2863e2cb1decSSalil Mehta ret = -ENOMEM; 2864e2cb1decSSalil Mehta goto err_clr_master; 2865e2cb1decSSalil Mehta } 2866e2cb1decSSalil Mehta 2867e2cb1decSSalil Mehta return 0; 2868e2cb1decSSalil Mehta 2869e2cb1decSSalil Mehta err_clr_master: 2870e2cb1decSSalil Mehta pci_clear_master(pdev); 2871e2cb1decSSalil Mehta pci_release_regions(pdev); 2872e2cb1decSSalil Mehta err_disable_device: 2873e2cb1decSSalil Mehta pci_disable_device(pdev); 28743e249d3bSFuyun Liang 2875e2cb1decSSalil Mehta return ret; 2876e2cb1decSSalil Mehta } 2877e2cb1decSSalil Mehta 2878e2cb1decSSalil Mehta static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2879e2cb1decSSalil Mehta { 2880e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 2881e2cb1decSSalil Mehta 2882e2cb1decSSalil Mehta pci_iounmap(pdev, hdev->hw.io_base); 2883e2cb1decSSalil Mehta pci_clear_master(pdev); 2884e2cb1decSSalil Mehta pci_release_regions(pdev); 2885e2cb1decSSalil Mehta pci_disable_device(pdev); 2886e2cb1decSSalil Mehta } 2887e2cb1decSSalil Mehta 288807acf909SJian Shen static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 288907acf909SJian Shen { 289007acf909SJian Shen struct hclgevf_query_res_cmd *req; 289107acf909SJian Shen struct hclgevf_desc desc; 289207acf909SJian Shen int ret; 289307acf909SJian Shen 289407acf909SJian Shen hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 289507acf909SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 289607acf909SJian Shen if (ret) { 289707acf909SJian Shen dev_err(&hdev->pdev->dev, 289807acf909SJian Shen "query vf resource failed, ret = %d.\n", ret); 289907acf909SJian Shen return ret; 290007acf909SJian Shen } 290107acf909SJian Shen 290207acf909SJian Shen req = (struct hclgevf_query_res_cmd *)desc.data; 290307acf909SJian Shen 2904580a05f9SYonglong Liu if (hnae3_dev_roce_supported(hdev)) { 290507acf909SJian Shen hdev->roce_base_msix_offset = 290660df7e91SHuazhong Tan hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 290707acf909SJian Shen HCLGEVF_MSIX_OFT_ROCEE_M, 290807acf909SJian Shen HCLGEVF_MSIX_OFT_ROCEE_S); 290907acf909SJian Shen hdev->num_roce_msix = 291060df7e91SHuazhong Tan hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 291107acf909SJian Shen HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 291207acf909SJian Shen 2913580a05f9SYonglong Liu /* nic's msix numbers is always equals to the roce's. */ 2914580a05f9SYonglong Liu hdev->num_nic_msix = hdev->num_roce_msix; 2915580a05f9SYonglong Liu 291607acf909SJian Shen /* VF should have NIC vectors and Roce vectors, NIC vectors 291707acf909SJian Shen * are queued before Roce vectors. The offset is fixed to 64. 291807acf909SJian Shen */ 291907acf909SJian Shen hdev->num_msi = hdev->num_roce_msix + 292007acf909SJian Shen hdev->roce_base_msix_offset; 292107acf909SJian Shen } else { 292207acf909SJian Shen hdev->num_msi = 292360df7e91SHuazhong Tan hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 292407acf909SJian Shen HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2925580a05f9SYonglong Liu 2926580a05f9SYonglong Liu hdev->num_nic_msix = hdev->num_msi; 2927580a05f9SYonglong Liu } 2928580a05f9SYonglong Liu 2929580a05f9SYonglong Liu if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 2930580a05f9SYonglong Liu dev_err(&hdev->pdev->dev, 2931580a05f9SYonglong Liu "Just %u msi resources, not enough for vf(min:2).\n", 2932580a05f9SYonglong Liu hdev->num_nic_msix); 2933580a05f9SYonglong Liu return -EINVAL; 293407acf909SJian Shen } 293507acf909SJian Shen 293607acf909SJian Shen return 0; 293707acf909SJian Shen } 293807acf909SJian Shen 2939862d969aSHuazhong Tan static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2940862d969aSHuazhong Tan { 2941862d969aSHuazhong Tan struct pci_dev *pdev = hdev->pdev; 2942862d969aSHuazhong Tan int ret = 0; 2943862d969aSHuazhong Tan 2944862d969aSHuazhong Tan if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2945862d969aSHuazhong Tan test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2946862d969aSHuazhong Tan hclgevf_misc_irq_uninit(hdev); 2947862d969aSHuazhong Tan hclgevf_uninit_msi(hdev); 2948862d969aSHuazhong Tan clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2949862d969aSHuazhong Tan } 2950862d969aSHuazhong Tan 2951862d969aSHuazhong Tan if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2952862d969aSHuazhong Tan pci_set_master(pdev); 2953862d969aSHuazhong Tan ret = hclgevf_init_msi(hdev); 2954862d969aSHuazhong Tan if (ret) { 2955862d969aSHuazhong Tan dev_err(&pdev->dev, 2956862d969aSHuazhong Tan "failed(%d) to init MSI/MSI-X\n", ret); 2957862d969aSHuazhong Tan return ret; 2958862d969aSHuazhong Tan } 2959862d969aSHuazhong Tan 2960862d969aSHuazhong Tan ret = hclgevf_misc_irq_init(hdev); 2961862d969aSHuazhong Tan if (ret) { 2962862d969aSHuazhong Tan hclgevf_uninit_msi(hdev); 2963862d969aSHuazhong Tan dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2964862d969aSHuazhong Tan ret); 2965862d969aSHuazhong Tan return ret; 2966862d969aSHuazhong Tan } 2967862d969aSHuazhong Tan 2968862d969aSHuazhong Tan set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2969862d969aSHuazhong Tan } 2970862d969aSHuazhong Tan 2971862d969aSHuazhong Tan return ret; 2972862d969aSHuazhong Tan } 2973862d969aSHuazhong Tan 2974039ba863SJian Shen static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) 2975039ba863SJian Shen { 2976039ba863SJian Shen struct hclge_vf_to_pf_msg send_msg; 2977039ba863SJian Shen 2978039ba863SJian Shen hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, 2979039ba863SJian Shen HCLGE_MBX_VPORT_LIST_CLEAR); 2980039ba863SJian Shen return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2981039ba863SJian Shen } 2982039ba863SJian Shen 29839c6f7085SHuazhong Tan static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2984e2cb1decSSalil Mehta { 29857a01c897SSalil Mehta struct pci_dev *pdev = hdev->pdev; 2986e2cb1decSSalil Mehta int ret; 2987e2cb1decSSalil Mehta 2988862d969aSHuazhong Tan ret = hclgevf_pci_reset(hdev); 2989862d969aSHuazhong Tan if (ret) { 2990862d969aSHuazhong Tan dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2991862d969aSHuazhong Tan return ret; 2992862d969aSHuazhong Tan } 2993862d969aSHuazhong Tan 29949c6f7085SHuazhong Tan ret = hclgevf_cmd_init(hdev); 29959c6f7085SHuazhong Tan if (ret) { 29969c6f7085SHuazhong Tan dev_err(&pdev->dev, "cmd failed %d\n", ret); 29979c6f7085SHuazhong Tan return ret; 29987a01c897SSalil Mehta } 2999e2cb1decSSalil Mehta 30009c6f7085SHuazhong Tan ret = hclgevf_rss_init_hw(hdev); 30019c6f7085SHuazhong Tan if (ret) { 30029c6f7085SHuazhong Tan dev_err(&hdev->pdev->dev, 30039c6f7085SHuazhong Tan "failed(%d) to initialize RSS\n", ret); 30049c6f7085SHuazhong Tan return ret; 30059c6f7085SHuazhong Tan } 30069c6f7085SHuazhong Tan 3007b26a6feaSPeng Li ret = hclgevf_config_gro(hdev, true); 3008b26a6feaSPeng Li if (ret) 3009b26a6feaSPeng Li return ret; 3010b26a6feaSPeng Li 30119c6f7085SHuazhong Tan ret = hclgevf_init_vlan_config(hdev); 30129c6f7085SHuazhong Tan if (ret) { 30139c6f7085SHuazhong Tan dev_err(&hdev->pdev->dev, 30149c6f7085SHuazhong Tan "failed(%d) to initialize VLAN config\n", ret); 30159c6f7085SHuazhong Tan return ret; 30169c6f7085SHuazhong Tan } 30179c6f7085SHuazhong Tan 3018c631c696SJian Shen set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 3019c631c696SJian Shen 30209c6f7085SHuazhong Tan dev_info(&hdev->pdev->dev, "Reset done\n"); 30219c6f7085SHuazhong Tan 30229c6f7085SHuazhong Tan return 0; 30239c6f7085SHuazhong Tan } 30249c6f7085SHuazhong Tan 30259c6f7085SHuazhong Tan static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 30269c6f7085SHuazhong Tan { 30279c6f7085SHuazhong Tan struct pci_dev *pdev = hdev->pdev; 30289c6f7085SHuazhong Tan int ret; 30299c6f7085SHuazhong Tan 3030e2cb1decSSalil Mehta ret = hclgevf_pci_init(hdev); 303160df7e91SHuazhong Tan if (ret) 3032e2cb1decSSalil Mehta return ret; 3033e2cb1decSSalil Mehta 30348b0195a3SHuazhong Tan ret = hclgevf_cmd_queue_init(hdev); 303560df7e91SHuazhong Tan if (ret) 30368b0195a3SHuazhong Tan goto err_cmd_queue_init; 30378b0195a3SHuazhong Tan 3038eddf0462SYunsheng Lin ret = hclgevf_cmd_init(hdev); 3039eddf0462SYunsheng Lin if (ret) 3040eddf0462SYunsheng Lin goto err_cmd_init; 3041eddf0462SYunsheng Lin 304207acf909SJian Shen /* Get vf resource */ 304307acf909SJian Shen ret = hclgevf_query_vf_resource(hdev); 304460df7e91SHuazhong Tan if (ret) 30458b0195a3SHuazhong Tan goto err_cmd_init; 304607acf909SJian Shen 304707acf909SJian Shen ret = hclgevf_init_msi(hdev); 304807acf909SJian Shen if (ret) { 304907acf909SJian Shen dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 30508b0195a3SHuazhong Tan goto err_cmd_init; 305107acf909SJian Shen } 305207acf909SJian Shen 305307acf909SJian Shen hclgevf_state_init(hdev); 3054dea846e8SHuazhong Tan hdev->reset_level = HNAE3_VF_FUNC_RESET; 3055afb6afdbSHuazhong Tan hdev->reset_type = HNAE3_NONE_RESET; 305607acf909SJian Shen 3057e2cb1decSSalil Mehta ret = hclgevf_misc_irq_init(hdev); 305860df7e91SHuazhong Tan if (ret) 3059e2cb1decSSalil Mehta goto err_misc_irq_init; 3060e2cb1decSSalil Mehta 3061862d969aSHuazhong Tan set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3062862d969aSHuazhong Tan 3063e2cb1decSSalil Mehta ret = hclgevf_configure(hdev); 3064e2cb1decSSalil Mehta if (ret) { 3065e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 3066e2cb1decSSalil Mehta goto err_config; 3067e2cb1decSSalil Mehta } 3068e2cb1decSSalil Mehta 3069e2cb1decSSalil Mehta ret = hclgevf_alloc_tqps(hdev); 3070e2cb1decSSalil Mehta if (ret) { 3071e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 3072e2cb1decSSalil Mehta goto err_config; 3073e2cb1decSSalil Mehta } 3074e2cb1decSSalil Mehta 3075e2cb1decSSalil Mehta ret = hclgevf_set_handle_info(hdev); 307660df7e91SHuazhong Tan if (ret) 3077e2cb1decSSalil Mehta goto err_config; 3078e2cb1decSSalil Mehta 3079b26a6feaSPeng Li ret = hclgevf_config_gro(hdev, true); 3080b26a6feaSPeng Li if (ret) 3081b26a6feaSPeng Li goto err_config; 3082b26a6feaSPeng Li 3083e2cb1decSSalil Mehta /* Initialize RSS for this VF */ 3084944de484SGuojia Liao hclgevf_rss_init_cfg(hdev); 3085e2cb1decSSalil Mehta ret = hclgevf_rss_init_hw(hdev); 3086e2cb1decSSalil Mehta if (ret) { 3087e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 3088e2cb1decSSalil Mehta "failed(%d) to initialize RSS\n", ret); 3089e2cb1decSSalil Mehta goto err_config; 3090e2cb1decSSalil Mehta } 3091e2cb1decSSalil Mehta 3092039ba863SJian Shen /* ensure vf tbl list as empty before init*/ 3093039ba863SJian Shen ret = hclgevf_clear_vport_list(hdev); 3094039ba863SJian Shen if (ret) { 3095039ba863SJian Shen dev_err(&pdev->dev, 3096039ba863SJian Shen "failed to clear tbl list configuration, ret = %d.\n", 3097039ba863SJian Shen ret); 3098039ba863SJian Shen goto err_config; 3099039ba863SJian Shen } 3100039ba863SJian Shen 3101e2cb1decSSalil Mehta ret = hclgevf_init_vlan_config(hdev); 3102e2cb1decSSalil Mehta if (ret) { 3103e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 3104e2cb1decSSalil Mehta "failed(%d) to initialize VLAN config\n", ret); 3105e2cb1decSSalil Mehta goto err_config; 3106e2cb1decSSalil Mehta } 3107e2cb1decSSalil Mehta 31080742ed7cSHuazhong Tan hdev->last_reset_time = jiffies; 310908d80a4cSHuazhong Tan dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 311008d80a4cSHuazhong Tan HCLGEVF_DRIVER_NAME); 3111e2cb1decSSalil Mehta 3112ff200099SYunsheng Lin hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 3113ff200099SYunsheng Lin 3114e2cb1decSSalil Mehta return 0; 3115e2cb1decSSalil Mehta 3116e2cb1decSSalil Mehta err_config: 3117e2cb1decSSalil Mehta hclgevf_misc_irq_uninit(hdev); 3118e2cb1decSSalil Mehta err_misc_irq_init: 3119e2cb1decSSalil Mehta hclgevf_state_uninit(hdev); 3120e2cb1decSSalil Mehta hclgevf_uninit_msi(hdev); 312107acf909SJian Shen err_cmd_init: 31228b0195a3SHuazhong Tan hclgevf_cmd_uninit(hdev); 31238b0195a3SHuazhong Tan err_cmd_queue_init: 3124e2cb1decSSalil Mehta hclgevf_pci_uninit(hdev); 3125862d969aSHuazhong Tan clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3126e2cb1decSSalil Mehta return ret; 3127e2cb1decSSalil Mehta } 3128e2cb1decSSalil Mehta 31297a01c897SSalil Mehta static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 3130e2cb1decSSalil Mehta { 3131d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 3132d3410018SYufeng Mo 3133e2cb1decSSalil Mehta hclgevf_state_uninit(hdev); 3134862d969aSHuazhong Tan 3135d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 3136d3410018SYufeng Mo hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 313723b4201dSJian Shen 3138862d969aSHuazhong Tan if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3139eddf0462SYunsheng Lin hclgevf_misc_irq_uninit(hdev); 3140e2cb1decSSalil Mehta hclgevf_uninit_msi(hdev); 31417a01c897SSalil Mehta } 31427a01c897SSalil Mehta 3143e3338205SHuazhong Tan hclgevf_pci_uninit(hdev); 3144862d969aSHuazhong Tan hclgevf_cmd_uninit(hdev); 3145ee4bcd3bSJian Shen hclgevf_uninit_mac_list(hdev); 3146862d969aSHuazhong Tan } 3147862d969aSHuazhong Tan 31487a01c897SSalil Mehta static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 31497a01c897SSalil Mehta { 31507a01c897SSalil Mehta struct pci_dev *pdev = ae_dev->pdev; 31517a01c897SSalil Mehta int ret; 31527a01c897SSalil Mehta 31537a01c897SSalil Mehta ret = hclgevf_alloc_hdev(ae_dev); 31547a01c897SSalil Mehta if (ret) { 31557a01c897SSalil Mehta dev_err(&pdev->dev, "hclge device allocation failed\n"); 31567a01c897SSalil Mehta return ret; 31577a01c897SSalil Mehta } 31587a01c897SSalil Mehta 31597a01c897SSalil Mehta ret = hclgevf_init_hdev(ae_dev->priv); 3160a6d818e3SYunsheng Lin if (ret) { 31617a01c897SSalil Mehta dev_err(&pdev->dev, "hclge device initialization failed\n"); 31627a01c897SSalil Mehta return ret; 31637a01c897SSalil Mehta } 31647a01c897SSalil Mehta 3165a6d818e3SYunsheng Lin return 0; 3166a6d818e3SYunsheng Lin } 3167a6d818e3SYunsheng Lin 31687a01c897SSalil Mehta static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 31697a01c897SSalil Mehta { 31707a01c897SSalil Mehta struct hclgevf_dev *hdev = ae_dev->priv; 31717a01c897SSalil Mehta 31727a01c897SSalil Mehta hclgevf_uninit_hdev(hdev); 3173e2cb1decSSalil Mehta ae_dev->priv = NULL; 3174e2cb1decSSalil Mehta } 3175e2cb1decSSalil Mehta 3176849e4607SPeng Li static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3177849e4607SPeng Li { 3178849e4607SPeng Li struct hnae3_handle *nic = &hdev->nic; 3179849e4607SPeng Li struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3180849e4607SPeng Li 31818be73621SHuazhong Tan return min_t(u32, hdev->rss_size_max, 31828be73621SHuazhong Tan hdev->num_tqps / kinfo->num_tc); 3183849e4607SPeng Li } 3184849e4607SPeng Li 3185849e4607SPeng Li /** 3186849e4607SPeng Li * hclgevf_get_channels - Get the current channels enabled and max supported. 3187849e4607SPeng Li * @handle: hardware information for network interface 3188849e4607SPeng Li * @ch: ethtool channels structure 3189849e4607SPeng Li * 3190849e4607SPeng Li * We don't support separate tx and rx queues as channels. The other count 3191849e4607SPeng Li * represents how many queues are being used for control. max_combined counts 3192849e4607SPeng Li * how many queue pairs we can support. They may not be mapped 1 to 1 with 3193849e4607SPeng Li * q_vectors since we support a lot more queue pairs than q_vectors. 3194849e4607SPeng Li **/ 3195849e4607SPeng Li static void hclgevf_get_channels(struct hnae3_handle *handle, 3196849e4607SPeng Li struct ethtool_channels *ch) 3197849e4607SPeng Li { 3198849e4607SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3199849e4607SPeng Li 3200849e4607SPeng Li ch->max_combined = hclgevf_get_max_channels(hdev); 3201849e4607SPeng Li ch->other_count = 0; 3202849e4607SPeng Li ch->max_other = 0; 32038be73621SHuazhong Tan ch->combined_count = handle->kinfo.rss_size; 3204849e4607SPeng Li } 3205849e4607SPeng Li 3206cc719218SPeng Li static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 32070d43bf45SHuazhong Tan u16 *alloc_tqps, u16 *max_rss_size) 3208cc719218SPeng Li { 3209cc719218SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3210cc719218SPeng Li 32110d43bf45SHuazhong Tan *alloc_tqps = hdev->num_tqps; 3212cc719218SPeng Li *max_rss_size = hdev->rss_size_max; 3213cc719218SPeng Li } 3214cc719218SPeng Li 32154093d1a2SGuangbin Huang static void hclgevf_update_rss_size(struct hnae3_handle *handle, 32164093d1a2SGuangbin Huang u32 new_tqps_num) 32174093d1a2SGuangbin Huang { 32184093d1a2SGuangbin Huang struct hnae3_knic_private_info *kinfo = &handle->kinfo; 32194093d1a2SGuangbin Huang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 32204093d1a2SGuangbin Huang u16 max_rss_size; 32214093d1a2SGuangbin Huang 32224093d1a2SGuangbin Huang kinfo->req_rss_size = new_tqps_num; 32234093d1a2SGuangbin Huang 32244093d1a2SGuangbin Huang max_rss_size = min_t(u16, hdev->rss_size_max, 32254093d1a2SGuangbin Huang hdev->num_tqps / kinfo->num_tc); 32264093d1a2SGuangbin Huang 32274093d1a2SGuangbin Huang /* Use the user's configuration when it is not larger than 32284093d1a2SGuangbin Huang * max_rss_size, otherwise, use the maximum specification value. 32294093d1a2SGuangbin Huang */ 32304093d1a2SGuangbin Huang if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 32314093d1a2SGuangbin Huang kinfo->req_rss_size <= max_rss_size) 32324093d1a2SGuangbin Huang kinfo->rss_size = kinfo->req_rss_size; 32334093d1a2SGuangbin Huang else if (kinfo->rss_size > max_rss_size || 32344093d1a2SGuangbin Huang (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 32354093d1a2SGuangbin Huang kinfo->rss_size = max_rss_size; 32364093d1a2SGuangbin Huang 32374093d1a2SGuangbin Huang kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; 32384093d1a2SGuangbin Huang } 32394093d1a2SGuangbin Huang 32404093d1a2SGuangbin Huang static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 32414093d1a2SGuangbin Huang bool rxfh_configured) 32424093d1a2SGuangbin Huang { 32434093d1a2SGuangbin Huang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 32444093d1a2SGuangbin Huang struct hnae3_knic_private_info *kinfo = &handle->kinfo; 32454093d1a2SGuangbin Huang u16 cur_rss_size = kinfo->rss_size; 32464093d1a2SGuangbin Huang u16 cur_tqps = kinfo->num_tqps; 32474093d1a2SGuangbin Huang u32 *rss_indir; 32484093d1a2SGuangbin Huang unsigned int i; 32494093d1a2SGuangbin Huang int ret; 32504093d1a2SGuangbin Huang 32514093d1a2SGuangbin Huang hclgevf_update_rss_size(handle, new_tqps_num); 32524093d1a2SGuangbin Huang 32534093d1a2SGuangbin Huang ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); 32544093d1a2SGuangbin Huang if (ret) 32554093d1a2SGuangbin Huang return ret; 32564093d1a2SGuangbin Huang 32574093d1a2SGuangbin Huang /* RSS indirection table has been configuared by user */ 32584093d1a2SGuangbin Huang if (rxfh_configured) 32594093d1a2SGuangbin Huang goto out; 32604093d1a2SGuangbin Huang 32614093d1a2SGuangbin Huang /* Reinitializes the rss indirect table according to the new RSS size */ 32624093d1a2SGuangbin Huang rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 32634093d1a2SGuangbin Huang if (!rss_indir) 32644093d1a2SGuangbin Huang return -ENOMEM; 32654093d1a2SGuangbin Huang 32664093d1a2SGuangbin Huang for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 32674093d1a2SGuangbin Huang rss_indir[i] = i % kinfo->rss_size; 32684093d1a2SGuangbin Huang 3269944de484SGuojia Liao hdev->rss_cfg.rss_size = kinfo->rss_size; 3270944de484SGuojia Liao 32714093d1a2SGuangbin Huang ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 32724093d1a2SGuangbin Huang if (ret) 32734093d1a2SGuangbin Huang dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 32744093d1a2SGuangbin Huang ret); 32754093d1a2SGuangbin Huang 32764093d1a2SGuangbin Huang kfree(rss_indir); 32774093d1a2SGuangbin Huang 32784093d1a2SGuangbin Huang out: 32794093d1a2SGuangbin Huang if (!ret) 32804093d1a2SGuangbin Huang dev_info(&hdev->pdev->dev, 32814093d1a2SGuangbin Huang "Channels changed, rss_size from %u to %u, tqps from %u to %u", 32824093d1a2SGuangbin Huang cur_rss_size, kinfo->rss_size, 32834093d1a2SGuangbin Huang cur_tqps, kinfo->rss_size * kinfo->num_tc); 32844093d1a2SGuangbin Huang 32854093d1a2SGuangbin Huang return ret; 32864093d1a2SGuangbin Huang } 32874093d1a2SGuangbin Huang 3288175ec96bSFuyun Liang static int hclgevf_get_status(struct hnae3_handle *handle) 3289175ec96bSFuyun Liang { 3290175ec96bSFuyun Liang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3291175ec96bSFuyun Liang 3292175ec96bSFuyun Liang return hdev->hw.mac.link; 3293175ec96bSFuyun Liang } 3294175ec96bSFuyun Liang 32954a152de9SFuyun Liang static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 32964a152de9SFuyun Liang u8 *auto_neg, u32 *speed, 32974a152de9SFuyun Liang u8 *duplex) 32984a152de9SFuyun Liang { 32994a152de9SFuyun Liang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33004a152de9SFuyun Liang 33014a152de9SFuyun Liang if (speed) 33024a152de9SFuyun Liang *speed = hdev->hw.mac.speed; 33034a152de9SFuyun Liang if (duplex) 33044a152de9SFuyun Liang *duplex = hdev->hw.mac.duplex; 33054a152de9SFuyun Liang if (auto_neg) 33064a152de9SFuyun Liang *auto_neg = AUTONEG_DISABLE; 33074a152de9SFuyun Liang } 33084a152de9SFuyun Liang 33094a152de9SFuyun Liang void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 33104a152de9SFuyun Liang u8 duplex) 33114a152de9SFuyun Liang { 33124a152de9SFuyun Liang hdev->hw.mac.speed = speed; 33134a152de9SFuyun Liang hdev->hw.mac.duplex = duplex; 33144a152de9SFuyun Liang } 33154a152de9SFuyun Liang 33161731be4cSYonglong Liu static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 33175c9f6b39SPeng Li { 33185c9f6b39SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33195c9f6b39SPeng Li 33205c9f6b39SPeng Li return hclgevf_config_gro(hdev, enable); 33215c9f6b39SPeng Li } 33225c9f6b39SPeng Li 332388d10bd6SJian Shen static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 332488d10bd6SJian Shen u8 *module_type) 3325c136b884SPeng Li { 3326c136b884SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 332788d10bd6SJian Shen 3328c136b884SPeng Li if (media_type) 3329c136b884SPeng Li *media_type = hdev->hw.mac.media_type; 333088d10bd6SJian Shen 333188d10bd6SJian Shen if (module_type) 333288d10bd6SJian Shen *module_type = hdev->hw.mac.module_type; 3333c136b884SPeng Li } 3334c136b884SPeng Li 33354d60291bSHuazhong Tan static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 33364d60291bSHuazhong Tan { 33374d60291bSHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33384d60291bSHuazhong Tan 3339aa5c4f17SHuazhong Tan return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 33404d60291bSHuazhong Tan } 33414d60291bSHuazhong Tan 33424d60291bSHuazhong Tan static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 33434d60291bSHuazhong Tan { 33444d60291bSHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33454d60291bSHuazhong Tan 33464d60291bSHuazhong Tan return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 33474d60291bSHuazhong Tan } 33484d60291bSHuazhong Tan 33494d60291bSHuazhong Tan static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 33504d60291bSHuazhong Tan { 33514d60291bSHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33524d60291bSHuazhong Tan 3353c88a6e7dSHuazhong Tan return hdev->rst_stats.hw_rst_done_cnt; 33544d60291bSHuazhong Tan } 33554d60291bSHuazhong Tan 33569194d18bSliuzhongzhu static void hclgevf_get_link_mode(struct hnae3_handle *handle, 33579194d18bSliuzhongzhu unsigned long *supported, 33589194d18bSliuzhongzhu unsigned long *advertising) 33599194d18bSliuzhongzhu { 33609194d18bSliuzhongzhu struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33619194d18bSliuzhongzhu 33629194d18bSliuzhongzhu *supported = hdev->hw.mac.supported; 33639194d18bSliuzhongzhu *advertising = hdev->hw.mac.advertising; 33649194d18bSliuzhongzhu } 33659194d18bSliuzhongzhu 33661600c3e5SJian Shen #define MAX_SEPARATE_NUM 4 33671600c3e5SJian Shen #define SEPARATOR_VALUE 0xFFFFFFFF 33681600c3e5SJian Shen #define REG_NUM_PER_LINE 4 33691600c3e5SJian Shen #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 33701600c3e5SJian Shen 33711600c3e5SJian Shen static int hclgevf_get_regs_len(struct hnae3_handle *handle) 33721600c3e5SJian Shen { 33731600c3e5SJian Shen int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 33741600c3e5SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33751600c3e5SJian Shen 33761600c3e5SJian Shen cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 33771600c3e5SJian Shen common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 33781600c3e5SJian Shen ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 33791600c3e5SJian Shen tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 33801600c3e5SJian Shen 33811600c3e5SJian Shen return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 33821600c3e5SJian Shen tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 33831600c3e5SJian Shen } 33841600c3e5SJian Shen 33851600c3e5SJian Shen static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 33861600c3e5SJian Shen void *data) 33871600c3e5SJian Shen { 33881600c3e5SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 33891600c3e5SJian Shen int i, j, reg_um, separator_num; 33901600c3e5SJian Shen u32 *reg = data; 33911600c3e5SJian Shen 33921600c3e5SJian Shen *version = hdev->fw_version; 33931600c3e5SJian Shen 33941600c3e5SJian Shen /* fetching per-VF registers values from VF PCIe register space */ 33951600c3e5SJian Shen reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 33961600c3e5SJian Shen separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 33971600c3e5SJian Shen for (i = 0; i < reg_um; i++) 33981600c3e5SJian Shen *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 33991600c3e5SJian Shen for (i = 0; i < separator_num; i++) 34001600c3e5SJian Shen *reg++ = SEPARATOR_VALUE; 34011600c3e5SJian Shen 34021600c3e5SJian Shen reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 34031600c3e5SJian Shen separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 34041600c3e5SJian Shen for (i = 0; i < reg_um; i++) 34051600c3e5SJian Shen *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 34061600c3e5SJian Shen for (i = 0; i < separator_num; i++) 34071600c3e5SJian Shen *reg++ = SEPARATOR_VALUE; 34081600c3e5SJian Shen 34091600c3e5SJian Shen reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 34101600c3e5SJian Shen separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 34111600c3e5SJian Shen for (j = 0; j < hdev->num_tqps; j++) { 34121600c3e5SJian Shen for (i = 0; i < reg_um; i++) 34131600c3e5SJian Shen *reg++ = hclgevf_read_dev(&hdev->hw, 34141600c3e5SJian Shen ring_reg_addr_list[i] + 34151600c3e5SJian Shen 0x200 * j); 34161600c3e5SJian Shen for (i = 0; i < separator_num; i++) 34171600c3e5SJian Shen *reg++ = SEPARATOR_VALUE; 34181600c3e5SJian Shen } 34191600c3e5SJian Shen 34201600c3e5SJian Shen reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 34211600c3e5SJian Shen separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 34221600c3e5SJian Shen for (j = 0; j < hdev->num_msi_used - 1; j++) { 34231600c3e5SJian Shen for (i = 0; i < reg_um; i++) 34241600c3e5SJian Shen *reg++ = hclgevf_read_dev(&hdev->hw, 34251600c3e5SJian Shen tqp_intr_reg_addr_list[i] + 34261600c3e5SJian Shen 4 * j); 34271600c3e5SJian Shen for (i = 0; i < separator_num; i++) 34281600c3e5SJian Shen *reg++ = SEPARATOR_VALUE; 34291600c3e5SJian Shen } 34301600c3e5SJian Shen } 34311600c3e5SJian Shen 343292f11ea1SJian Shen void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 343392f11ea1SJian Shen u8 *port_base_vlan_info, u8 data_size) 343492f11ea1SJian Shen { 343592f11ea1SJian Shen struct hnae3_handle *nic = &hdev->nic; 3436d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 343792f11ea1SJian Shen 343892f11ea1SJian Shen rtnl_lock(); 343992f11ea1SJian Shen hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 344092f11ea1SJian Shen rtnl_unlock(); 344192f11ea1SJian Shen 344292f11ea1SJian Shen /* send msg to PF and wait update port based vlan info */ 3443d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3444d3410018SYufeng Mo HCLGE_MBX_PORT_BASE_VLAN_CFG); 3445d3410018SYufeng Mo memcpy(send_msg.data, port_base_vlan_info, data_size); 3446d3410018SYufeng Mo hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 344792f11ea1SJian Shen 344892f11ea1SJian Shen if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 344992f11ea1SJian Shen nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; 345092f11ea1SJian Shen else 345192f11ea1SJian Shen nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 345292f11ea1SJian Shen 345392f11ea1SJian Shen rtnl_lock(); 345492f11ea1SJian Shen hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 345592f11ea1SJian Shen rtnl_unlock(); 345692f11ea1SJian Shen } 345792f11ea1SJian Shen 3458e2cb1decSSalil Mehta static const struct hnae3_ae_ops hclgevf_ops = { 3459e2cb1decSSalil Mehta .init_ae_dev = hclgevf_init_ae_dev, 3460e2cb1decSSalil Mehta .uninit_ae_dev = hclgevf_uninit_ae_dev, 34616ff3cf07SHuazhong Tan .flr_prepare = hclgevf_flr_prepare, 34626ff3cf07SHuazhong Tan .flr_done = hclgevf_flr_done, 3463e718a93fSPeng Li .init_client_instance = hclgevf_init_client_instance, 3464e718a93fSPeng Li .uninit_client_instance = hclgevf_uninit_client_instance, 3465e2cb1decSSalil Mehta .start = hclgevf_ae_start, 3466e2cb1decSSalil Mehta .stop = hclgevf_ae_stop, 3467a6d818e3SYunsheng Lin .client_start = hclgevf_client_start, 3468a6d818e3SYunsheng Lin .client_stop = hclgevf_client_stop, 3469e2cb1decSSalil Mehta .map_ring_to_vector = hclgevf_map_ring_to_vector, 3470e2cb1decSSalil Mehta .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3471e2cb1decSSalil Mehta .get_vector = hclgevf_get_vector, 34720d3e6631SYunsheng Lin .put_vector = hclgevf_put_vector, 3473e2cb1decSSalil Mehta .reset_queue = hclgevf_reset_tqp, 3474e2cb1decSSalil Mehta .get_mac_addr = hclgevf_get_mac_addr, 3475e2cb1decSSalil Mehta .set_mac_addr = hclgevf_set_mac_addr, 3476e2cb1decSSalil Mehta .add_uc_addr = hclgevf_add_uc_addr, 3477e2cb1decSSalil Mehta .rm_uc_addr = hclgevf_rm_uc_addr, 3478e2cb1decSSalil Mehta .add_mc_addr = hclgevf_add_mc_addr, 3479e2cb1decSSalil Mehta .rm_mc_addr = hclgevf_rm_mc_addr, 3480e2cb1decSSalil Mehta .get_stats = hclgevf_get_stats, 3481e2cb1decSSalil Mehta .update_stats = hclgevf_update_stats, 3482e2cb1decSSalil Mehta .get_strings = hclgevf_get_strings, 3483e2cb1decSSalil Mehta .get_sset_count = hclgevf_get_sset_count, 3484e2cb1decSSalil Mehta .get_rss_key_size = hclgevf_get_rss_key_size, 3485e2cb1decSSalil Mehta .get_rss_indir_size = hclgevf_get_rss_indir_size, 3486e2cb1decSSalil Mehta .get_rss = hclgevf_get_rss, 3487e2cb1decSSalil Mehta .set_rss = hclgevf_set_rss, 3488d97b3072SJian Shen .get_rss_tuple = hclgevf_get_rss_tuple, 3489d97b3072SJian Shen .set_rss_tuple = hclgevf_set_rss_tuple, 3490e2cb1decSSalil Mehta .get_tc_size = hclgevf_get_tc_size, 3491e2cb1decSSalil Mehta .get_fw_version = hclgevf_get_fw_version, 3492e2cb1decSSalil Mehta .set_vlan_filter = hclgevf_set_vlan_filter, 3493b2641e2aSYunsheng Lin .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 34946d4c3981SSalil Mehta .reset_event = hclgevf_reset_event, 3495720bd583SHuazhong Tan .set_default_reset_request = hclgevf_set_def_reset_request, 34964093d1a2SGuangbin Huang .set_channels = hclgevf_set_channels, 3497849e4607SPeng Li .get_channels = hclgevf_get_channels, 3498cc719218SPeng Li .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 34991600c3e5SJian Shen .get_regs_len = hclgevf_get_regs_len, 35001600c3e5SJian Shen .get_regs = hclgevf_get_regs, 3501175ec96bSFuyun Liang .get_status = hclgevf_get_status, 35024a152de9SFuyun Liang .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3503c136b884SPeng Li .get_media_type = hclgevf_get_media_type, 35044d60291bSHuazhong Tan .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 35054d60291bSHuazhong Tan .ae_dev_resetting = hclgevf_ae_dev_resetting, 35064d60291bSHuazhong Tan .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 35075c9f6b39SPeng Li .set_gro_en = hclgevf_gro_en, 3508818f1675SYunsheng Lin .set_mtu = hclgevf_set_mtu, 35090c29d191Sliuzhongzhu .get_global_queue_id = hclgevf_get_qid_global, 35108cdb992fSJian Shen .set_timer_task = hclgevf_set_timer_task, 35119194d18bSliuzhongzhu .get_link_mode = hclgevf_get_link_mode, 3512e196ec75SJian Shen .set_promisc_mode = hclgevf_set_promisc_mode, 3513c631c696SJian Shen .request_update_promisc_mode = hclgevf_request_update_promisc_mode, 3514e2cb1decSSalil Mehta }; 3515e2cb1decSSalil Mehta 3516e2cb1decSSalil Mehta static struct hnae3_ae_algo ae_algovf = { 3517e2cb1decSSalil Mehta .ops = &hclgevf_ops, 3518e2cb1decSSalil Mehta .pdev_id_table = ae_algovf_pci_tbl, 3519e2cb1decSSalil Mehta }; 3520e2cb1decSSalil Mehta 3521e2cb1decSSalil Mehta static int hclgevf_init(void) 3522e2cb1decSSalil Mehta { 3523e2cb1decSSalil Mehta pr_info("%s is initializing\n", HCLGEVF_NAME); 3524e2cb1decSSalil Mehta 352516deaef2SYunsheng Lin hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME); 35260ea68902SYunsheng Lin if (!hclgevf_wq) { 35270ea68902SYunsheng Lin pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 35280ea68902SYunsheng Lin return -ENOMEM; 35290ea68902SYunsheng Lin } 35300ea68902SYunsheng Lin 3531854cf33aSFuyun Liang hnae3_register_ae_algo(&ae_algovf); 3532854cf33aSFuyun Liang 3533854cf33aSFuyun Liang return 0; 3534e2cb1decSSalil Mehta } 3535e2cb1decSSalil Mehta 3536e2cb1decSSalil Mehta static void hclgevf_exit(void) 3537e2cb1decSSalil Mehta { 3538e2cb1decSSalil Mehta hnae3_unregister_ae_algo(&ae_algovf); 35390ea68902SYunsheng Lin destroy_workqueue(hclgevf_wq); 3540e2cb1decSSalil Mehta } 3541e2cb1decSSalil Mehta module_init(hclgevf_init); 3542e2cb1decSSalil Mehta module_exit(hclgevf_exit); 3543e2cb1decSSalil Mehta 3544e2cb1decSSalil Mehta MODULE_LICENSE("GPL"); 3545e2cb1decSSalil Mehta MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3546e2cb1decSSalil Mehta MODULE_DESCRIPTION("HCLGEVF Driver"); 3547e2cb1decSSalil Mehta MODULE_VERSION(HCLGEVF_MOD_VERSION); 3548