1e2cb1decSSalil Mehta // SPDX-License-Identifier: GPL-2.0+ 2e2cb1decSSalil Mehta // Copyright (c) 2016-2017 Hisilicon Limited. 3e2cb1decSSalil Mehta 4e2cb1decSSalil Mehta #include <linux/etherdevice.h> 5aa5c4f17SHuazhong Tan #include <linux/iopoll.h> 66988eb2aSSalil Mehta #include <net/rtnetlink.h> 7e2cb1decSSalil Mehta #include "hclgevf_cmd.h" 8e2cb1decSSalil Mehta #include "hclgevf_main.h" 9e2cb1decSSalil Mehta #include "hclge_mbx.h" 10e2cb1decSSalil Mehta #include "hnae3.h" 11cd624299SYufeng Mo #include "hclgevf_devlink.h" 12*027733b1SJie Wang #include "hclge_comm_rss.h" 13e2cb1decSSalil Mehta 14e2cb1decSSalil Mehta #define HCLGEVF_NAME "hclgevf" 15e2cb1decSSalil Mehta 16bbe6540eSHuazhong Tan #define HCLGEVF_RESET_MAX_FAIL_CNT 5 17bbe6540eSHuazhong Tan 189c6f7085SHuazhong Tan static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 195e7414cdSJian Shen static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 205e7414cdSJian Shen unsigned long delay); 215e7414cdSJian Shen 22e2cb1decSSalil Mehta static struct hnae3_ae_algo ae_algovf; 23e2cb1decSSalil Mehta 240ea68902SYunsheng Lin static struct workqueue_struct *hclgevf_wq; 250ea68902SYunsheng Lin 26e2cb1decSSalil Mehta static const struct pci_device_id ae_algovf_pci_tbl[] = { 27c155e22bSGuangbin Huang {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, 28c155e22bSGuangbin Huang {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 29c155e22bSGuangbin Huang HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 30e2cb1decSSalil Mehta /* required last entry */ 31e2cb1decSSalil Mehta {0, } 32e2cb1decSSalil Mehta }; 33e2cb1decSSalil Mehta 34472d7eceSJian Shen static const u8 hclgevf_hash_key[] = { 35472d7eceSJian Shen 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 36472d7eceSJian Shen 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 37472d7eceSJian Shen 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 38472d7eceSJian Shen 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 39472d7eceSJian Shen 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 40472d7eceSJian Shen }; 41472d7eceSJian Shen 422f550a46SYunsheng Lin MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 432f550a46SYunsheng Lin 44cb413bfaSJie Wang static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 45cb413bfaSJie Wang HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 46cb413bfaSJie Wang HCLGE_COMM_NIC_CSQ_DEPTH_REG, 47cb413bfaSJie Wang HCLGE_COMM_NIC_CSQ_TAIL_REG, 48cb413bfaSJie Wang HCLGE_COMM_NIC_CSQ_HEAD_REG, 49cb413bfaSJie Wang HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 50cb413bfaSJie Wang HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 51cb413bfaSJie Wang HCLGE_COMM_NIC_CRQ_DEPTH_REG, 52cb413bfaSJie Wang HCLGE_COMM_NIC_CRQ_TAIL_REG, 53cb413bfaSJie Wang HCLGE_COMM_NIC_CRQ_HEAD_REG, 54cb413bfaSJie Wang HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, 55cb413bfaSJie Wang HCLGE_COMM_VECTOR0_CMDQ_STATE_REG, 56cb413bfaSJie Wang HCLGE_COMM_CMDQ_INTR_EN_REG, 57cb413bfaSJie Wang HCLGE_COMM_CMDQ_INTR_GEN_REG}; 581600c3e5SJian Shen 591600c3e5SJian Shen static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 601600c3e5SJian Shen HCLGEVF_RST_ING, 611600c3e5SJian Shen HCLGEVF_GRO_EN_REG}; 621600c3e5SJian Shen 631600c3e5SJian Shen static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 641600c3e5SJian Shen HCLGEVF_RING_RX_ADDR_H_REG, 651600c3e5SJian Shen HCLGEVF_RING_RX_BD_NUM_REG, 661600c3e5SJian Shen HCLGEVF_RING_RX_BD_LENGTH_REG, 671600c3e5SJian Shen HCLGEVF_RING_RX_MERGE_EN_REG, 681600c3e5SJian Shen HCLGEVF_RING_RX_TAIL_REG, 691600c3e5SJian Shen HCLGEVF_RING_RX_HEAD_REG, 701600c3e5SJian Shen HCLGEVF_RING_RX_FBD_NUM_REG, 711600c3e5SJian Shen HCLGEVF_RING_RX_OFFSET_REG, 721600c3e5SJian Shen HCLGEVF_RING_RX_FBD_OFFSET_REG, 731600c3e5SJian Shen HCLGEVF_RING_RX_STASH_REG, 741600c3e5SJian Shen HCLGEVF_RING_RX_BD_ERR_REG, 751600c3e5SJian Shen HCLGEVF_RING_TX_ADDR_L_REG, 761600c3e5SJian Shen HCLGEVF_RING_TX_ADDR_H_REG, 771600c3e5SJian Shen HCLGEVF_RING_TX_BD_NUM_REG, 781600c3e5SJian Shen HCLGEVF_RING_TX_PRIORITY_REG, 791600c3e5SJian Shen HCLGEVF_RING_TX_TC_REG, 801600c3e5SJian Shen HCLGEVF_RING_TX_MERGE_EN_REG, 811600c3e5SJian Shen HCLGEVF_RING_TX_TAIL_REG, 821600c3e5SJian Shen HCLGEVF_RING_TX_HEAD_REG, 831600c3e5SJian Shen HCLGEVF_RING_TX_FBD_NUM_REG, 841600c3e5SJian Shen HCLGEVF_RING_TX_OFFSET_REG, 851600c3e5SJian Shen HCLGEVF_RING_TX_EBD_NUM_REG, 861600c3e5SJian Shen HCLGEVF_RING_TX_EBD_OFFSET_REG, 871600c3e5SJian Shen HCLGEVF_RING_TX_BD_ERR_REG, 881600c3e5SJian Shen HCLGEVF_RING_EN_REG}; 891600c3e5SJian Shen 901600c3e5SJian Shen static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 911600c3e5SJian Shen HCLGEVF_TQP_INTR_GL0_REG, 921600c3e5SJian Shen HCLGEVF_TQP_INTR_GL1_REG, 931600c3e5SJian Shen HCLGEVF_TQP_INTR_GL2_REG, 941600c3e5SJian Shen HCLGEVF_TQP_INTR_RL_REG}; 951600c3e5SJian Shen 96aab8d1c6SJie Wang /* hclgevf_cmd_send - send command to command queue 97aab8d1c6SJie Wang * @hw: pointer to the hw struct 98aab8d1c6SJie Wang * @desc: prefilled descriptor for describing the command 99aab8d1c6SJie Wang * @num : the number of descriptors to be sent 100aab8d1c6SJie Wang * 101aab8d1c6SJie Wang * This is the main send command for command queue, it 102aab8d1c6SJie Wang * sends the queue, cleans the queue, etc 103aab8d1c6SJie Wang */ 104aab8d1c6SJie Wang int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num) 105aab8d1c6SJie Wang { 1069970308fSJie Wang return hclge_comm_cmd_send(&hw->hw, desc, num); 107aab8d1c6SJie Wang } 108aab8d1c6SJie Wang 109aab8d1c6SJie Wang void hclgevf_arq_init(struct hclgevf_dev *hdev) 110aab8d1c6SJie Wang { 111aab8d1c6SJie Wang struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq; 112aab8d1c6SJie Wang 113aab8d1c6SJie Wang spin_lock(&cmdq->crq.lock); 114aab8d1c6SJie Wang /* initialize the pointers of async rx queue of mailbox */ 115aab8d1c6SJie Wang hdev->arq.hdev = hdev; 116aab8d1c6SJie Wang hdev->arq.head = 0; 117aab8d1c6SJie Wang hdev->arq.tail = 0; 118aab8d1c6SJie Wang atomic_set(&hdev->arq.count, 0); 119aab8d1c6SJie Wang spin_unlock(&cmdq->crq.lock); 120aab8d1c6SJie Wang } 121aab8d1c6SJie Wang 1229b2f3477SWeihang Li static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 123e2cb1decSSalil Mehta { 124eed9535fSPeng Li if (!handle->client) 125eed9535fSPeng Li return container_of(handle, struct hclgevf_dev, nic); 126eed9535fSPeng Li else if (handle->client->type == HNAE3_CLIENT_ROCE) 127eed9535fSPeng Li return container_of(handle, struct hclgevf_dev, roce); 128eed9535fSPeng Li else 129e2cb1decSSalil Mehta return container_of(handle, struct hclgevf_dev, nic); 130e2cb1decSSalil Mehta } 131e2cb1decSSalil Mehta 132e2cb1decSSalil Mehta static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 133e2cb1decSSalil Mehta { 134b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 135e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1366befad60SJie Wang struct hclge_desc desc; 137e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 138e2cb1decSSalil Mehta int status; 139e2cb1decSSalil Mehta int i; 140e2cb1decSSalil Mehta 141b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 142b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 143e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, 144e2cb1decSSalil Mehta HCLGEVF_OPC_QUERY_RX_STATUS, 145e2cb1decSSalil Mehta true); 146e2cb1decSSalil Mehta 147e2cb1decSSalil Mehta desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 148e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 149e2cb1decSSalil Mehta if (status) { 150e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 151e2cb1decSSalil Mehta "Query tqp stat fail, status = %d,queue = %d\n", 152e2cb1decSSalil Mehta status, i); 153e2cb1decSSalil Mehta return status; 154e2cb1decSSalil Mehta } 155e2cb1decSSalil Mehta tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 156cf72fa63SJian Shen le32_to_cpu(desc.data[1]); 157e2cb1decSSalil Mehta 158e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 159e2cb1decSSalil Mehta true); 160e2cb1decSSalil Mehta 161e2cb1decSSalil Mehta desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 162e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 163e2cb1decSSalil Mehta if (status) { 164e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 165e2cb1decSSalil Mehta "Query tqp stat fail, status = %d,queue = %d\n", 166e2cb1decSSalil Mehta status, i); 167e2cb1decSSalil Mehta return status; 168e2cb1decSSalil Mehta } 169e2cb1decSSalil Mehta tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 170cf72fa63SJian Shen le32_to_cpu(desc.data[1]); 171e2cb1decSSalil Mehta } 172e2cb1decSSalil Mehta 173e2cb1decSSalil Mehta return 0; 174e2cb1decSSalil Mehta } 175e2cb1decSSalil Mehta 176e2cb1decSSalil Mehta static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 177e2cb1decSSalil Mehta { 178e2cb1decSSalil Mehta struct hnae3_knic_private_info *kinfo = &handle->kinfo; 179e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 180e2cb1decSSalil Mehta u64 *buff = data; 181e2cb1decSSalil Mehta int i; 182e2cb1decSSalil Mehta 183b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 184b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 185e2cb1decSSalil Mehta *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 186e2cb1decSSalil Mehta } 187e2cb1decSSalil Mehta for (i = 0; i < kinfo->num_tqps; i++) { 188b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 189e2cb1decSSalil Mehta *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 190e2cb1decSSalil Mehta } 191e2cb1decSSalil Mehta 192e2cb1decSSalil Mehta return buff; 193e2cb1decSSalil Mehta } 194e2cb1decSSalil Mehta 195e2cb1decSSalil Mehta static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 196e2cb1decSSalil Mehta { 197b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 198e2cb1decSSalil Mehta 199b4f1d303SJian Shen return kinfo->num_tqps * 2; 200e2cb1decSSalil Mehta } 201e2cb1decSSalil Mehta 202e2cb1decSSalil Mehta static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 203e2cb1decSSalil Mehta { 204b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 205e2cb1decSSalil Mehta u8 *buff = data; 2069d8d5a36SYufeng Mo int i; 207e2cb1decSSalil Mehta 208b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 209b4f1d303SJian Shen struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 210e2cb1decSSalil Mehta struct hclgevf_tqp, q); 211c5aaf176SJiaran Zhang snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", 212e2cb1decSSalil Mehta tqp->index); 213e2cb1decSSalil Mehta buff += ETH_GSTRING_LEN; 214e2cb1decSSalil Mehta } 215e2cb1decSSalil Mehta 216b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 217b4f1d303SJian Shen struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 218e2cb1decSSalil Mehta struct hclgevf_tqp, q); 219c5aaf176SJiaran Zhang snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", 220e2cb1decSSalil Mehta tqp->index); 221e2cb1decSSalil Mehta buff += ETH_GSTRING_LEN; 222e2cb1decSSalil Mehta } 223e2cb1decSSalil Mehta 224e2cb1decSSalil Mehta return buff; 225e2cb1decSSalil Mehta } 226e2cb1decSSalil Mehta 227e2cb1decSSalil Mehta static void hclgevf_update_stats(struct hnae3_handle *handle, 228e2cb1decSSalil Mehta struct net_device_stats *net_stats) 229e2cb1decSSalil Mehta { 230e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 231e2cb1decSSalil Mehta int status; 232e2cb1decSSalil Mehta 233e2cb1decSSalil Mehta status = hclgevf_tqps_update_stats(handle); 234e2cb1decSSalil Mehta if (status) 235e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 236e2cb1decSSalil Mehta "VF update of TQPS stats fail, status = %d.\n", 237e2cb1decSSalil Mehta status); 238e2cb1decSSalil Mehta } 239e2cb1decSSalil Mehta 240e2cb1decSSalil Mehta static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 241e2cb1decSSalil Mehta { 242e2cb1decSSalil Mehta if (strset == ETH_SS_TEST) 243e2cb1decSSalil Mehta return -EOPNOTSUPP; 244e2cb1decSSalil Mehta else if (strset == ETH_SS_STATS) 245e2cb1decSSalil Mehta return hclgevf_tqps_get_sset_count(handle, strset); 246e2cb1decSSalil Mehta 247e2cb1decSSalil Mehta return 0; 248e2cb1decSSalil Mehta } 249e2cb1decSSalil Mehta 250e2cb1decSSalil Mehta static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 251e2cb1decSSalil Mehta u8 *data) 252e2cb1decSSalil Mehta { 253e2cb1decSSalil Mehta u8 *p = (char *)data; 254e2cb1decSSalil Mehta 255e2cb1decSSalil Mehta if (strset == ETH_SS_STATS) 256e2cb1decSSalil Mehta p = hclgevf_tqps_get_strings(handle, p); 257e2cb1decSSalil Mehta } 258e2cb1decSSalil Mehta 259e2cb1decSSalil Mehta static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 260e2cb1decSSalil Mehta { 261e2cb1decSSalil Mehta hclgevf_tqps_get_stats(handle, data); 262e2cb1decSSalil Mehta } 263e2cb1decSSalil Mehta 264d3410018SYufeng Mo static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 265d3410018SYufeng Mo u8 subcode) 266d3410018SYufeng Mo { 267d3410018SYufeng Mo if (msg) { 268d3410018SYufeng Mo memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 269d3410018SYufeng Mo msg->code = code; 270d3410018SYufeng Mo msg->subcode = subcode; 271d3410018SYufeng Mo } 272d3410018SYufeng Mo } 273d3410018SYufeng Mo 27432e6d104SJian Shen static int hclgevf_get_basic_info(struct hclgevf_dev *hdev) 275e2cb1decSSalil Mehta { 27632e6d104SJian Shen struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 27732e6d104SJian Shen u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE]; 27832e6d104SJian Shen struct hclge_basic_info *basic_info; 279d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 28032e6d104SJian Shen unsigned long caps; 281e2cb1decSSalil Mehta int status; 282e2cb1decSSalil Mehta 28332e6d104SJian Shen hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0); 28432e6d104SJian Shen status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 285d3410018SYufeng Mo sizeof(resp_msg)); 286e2cb1decSSalil Mehta if (status) { 287e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 28832e6d104SJian Shen "failed to get basic info from pf, ret = %d", status); 289e2cb1decSSalil Mehta return status; 290e2cb1decSSalil Mehta } 291e2cb1decSSalil Mehta 29232e6d104SJian Shen basic_info = (struct hclge_basic_info *)resp_msg; 29332e6d104SJian Shen 29432e6d104SJian Shen hdev->hw_tc_map = basic_info->hw_tc_map; 29532e6d104SJian Shen hdev->mbx_api_version = basic_info->mbx_api_version; 29632e6d104SJian Shen caps = basic_info->pf_caps; 29732e6d104SJian Shen if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps)) 29832e6d104SJian Shen set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); 299e2cb1decSSalil Mehta 300e2cb1decSSalil Mehta return 0; 301e2cb1decSSalil Mehta } 302e2cb1decSSalil Mehta 30392f11ea1SJian Shen static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 30492f11ea1SJian Shen { 30592f11ea1SJian Shen struct hnae3_handle *nic = &hdev->nic; 306d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 30792f11ea1SJian Shen u8 resp_msg; 30892f11ea1SJian Shen int ret; 30992f11ea1SJian Shen 310d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 311d3410018SYufeng Mo HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 312d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 313d3410018SYufeng Mo sizeof(u8)); 31492f11ea1SJian Shen if (ret) { 31592f11ea1SJian Shen dev_err(&hdev->pdev->dev, 31692f11ea1SJian Shen "VF request to get port based vlan state failed %d", 31792f11ea1SJian Shen ret); 31892f11ea1SJian Shen return ret; 31992f11ea1SJian Shen } 32092f11ea1SJian Shen 32192f11ea1SJian Shen nic->port_base_vlan_state = resp_msg; 32292f11ea1SJian Shen 32392f11ea1SJian Shen return 0; 32492f11ea1SJian Shen } 32592f11ea1SJian Shen 3266cee6fc3SJian Shen static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 327e2cb1decSSalil Mehta { 328c0425944SPeng Li #define HCLGEVF_TQPS_RSS_INFO_LEN 6 329d3410018SYufeng Mo #define HCLGEVF_TQPS_ALLOC_OFFSET 0 330d3410018SYufeng Mo #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 331d3410018SYufeng Mo #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 332d3410018SYufeng Mo 333e2cb1decSSalil Mehta u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 334d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 335e2cb1decSSalil Mehta int status; 336e2cb1decSSalil Mehta 337d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 338d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 339e2cb1decSSalil Mehta HCLGEVF_TQPS_RSS_INFO_LEN); 340e2cb1decSSalil Mehta if (status) { 341e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 342e2cb1decSSalil Mehta "VF request to get tqp info from PF failed %d", 343e2cb1decSSalil Mehta status); 344e2cb1decSSalil Mehta return status; 345e2cb1decSSalil Mehta } 346e2cb1decSSalil Mehta 347d3410018SYufeng Mo memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], 348d3410018SYufeng Mo sizeof(u16)); 349d3410018SYufeng Mo memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], 350d3410018SYufeng Mo sizeof(u16)); 351d3410018SYufeng Mo memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], 352d3410018SYufeng Mo sizeof(u16)); 353c0425944SPeng Li 354c0425944SPeng Li return 0; 355c0425944SPeng Li } 356c0425944SPeng Li 357c0425944SPeng Li static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 358c0425944SPeng Li { 359c0425944SPeng Li #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 360d3410018SYufeng Mo #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 361d3410018SYufeng Mo #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 362d3410018SYufeng Mo 363c0425944SPeng Li u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 364d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 365c0425944SPeng Li int ret; 366c0425944SPeng Li 367d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 368d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 369c0425944SPeng Li HCLGEVF_TQPS_DEPTH_INFO_LEN); 370c0425944SPeng Li if (ret) { 371c0425944SPeng Li dev_err(&hdev->pdev->dev, 372c0425944SPeng Li "VF request to get tqp depth info from PF failed %d", 373c0425944SPeng Li ret); 374c0425944SPeng Li return ret; 375c0425944SPeng Li } 376c0425944SPeng Li 377d3410018SYufeng Mo memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], 378d3410018SYufeng Mo sizeof(u16)); 379d3410018SYufeng Mo memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], 380d3410018SYufeng Mo sizeof(u16)); 381e2cb1decSSalil Mehta 382e2cb1decSSalil Mehta return 0; 383e2cb1decSSalil Mehta } 384e2cb1decSSalil Mehta 3850c29d191Sliuzhongzhu static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 3860c29d191Sliuzhongzhu { 3870c29d191Sliuzhongzhu struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 388d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 3890c29d191Sliuzhongzhu u16 qid_in_pf = 0; 390d3410018SYufeng Mo u8 resp_data[2]; 3910c29d191Sliuzhongzhu int ret; 3920c29d191Sliuzhongzhu 393d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 394d3410018SYufeng Mo memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 395d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 39663cbf7a9SYufeng Mo sizeof(resp_data)); 3970c29d191Sliuzhongzhu if (!ret) 3980c29d191Sliuzhongzhu qid_in_pf = *(u16 *)resp_data; 3990c29d191Sliuzhongzhu 4000c29d191Sliuzhongzhu return qid_in_pf; 4010c29d191Sliuzhongzhu } 4020c29d191Sliuzhongzhu 4039c3e7130Sliuzhongzhu static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 4049c3e7130Sliuzhongzhu { 405d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 40688d10bd6SJian Shen u8 resp_msg[2]; 4079c3e7130Sliuzhongzhu int ret; 4089c3e7130Sliuzhongzhu 409d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 410d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 411d3410018SYufeng Mo sizeof(resp_msg)); 4129c3e7130Sliuzhongzhu if (ret) { 4139c3e7130Sliuzhongzhu dev_err(&hdev->pdev->dev, 4149c3e7130Sliuzhongzhu "VF request to get the pf port media type failed %d", 4159c3e7130Sliuzhongzhu ret); 4169c3e7130Sliuzhongzhu return ret; 4179c3e7130Sliuzhongzhu } 4189c3e7130Sliuzhongzhu 41988d10bd6SJian Shen hdev->hw.mac.media_type = resp_msg[0]; 42088d10bd6SJian Shen hdev->hw.mac.module_type = resp_msg[1]; 4219c3e7130Sliuzhongzhu 4229c3e7130Sliuzhongzhu return 0; 4239c3e7130Sliuzhongzhu } 4249c3e7130Sliuzhongzhu 425e2cb1decSSalil Mehta static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 426e2cb1decSSalil Mehta { 427e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 428e2cb1decSSalil Mehta int i; 429e2cb1decSSalil Mehta 430e2cb1decSSalil Mehta hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 431e2cb1decSSalil Mehta sizeof(struct hclgevf_tqp), GFP_KERNEL); 432e2cb1decSSalil Mehta if (!hdev->htqp) 433e2cb1decSSalil Mehta return -ENOMEM; 434e2cb1decSSalil Mehta 435e2cb1decSSalil Mehta tqp = hdev->htqp; 436e2cb1decSSalil Mehta 437e2cb1decSSalil Mehta for (i = 0; i < hdev->num_tqps; i++) { 438e2cb1decSSalil Mehta tqp->dev = &hdev->pdev->dev; 439e2cb1decSSalil Mehta tqp->index = i; 440e2cb1decSSalil Mehta 441e2cb1decSSalil Mehta tqp->q.ae_algo = &ae_algovf; 442e2cb1decSSalil Mehta tqp->q.buf_size = hdev->rx_buf_len; 443c0425944SPeng Li tqp->q.tx_desc_num = hdev->num_tx_desc; 444c0425944SPeng Li tqp->q.rx_desc_num = hdev->num_rx_desc; 4459a5ef4aaSYonglong Liu 4469a5ef4aaSYonglong Liu /* need an extended offset to configure queues >= 4479a5ef4aaSYonglong Liu * HCLGEVF_TQP_MAX_SIZE_DEV_V2. 4489a5ef4aaSYonglong Liu */ 4499a5ef4aaSYonglong Liu if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2) 450076bb537SJie Wang tqp->q.io_base = hdev->hw.hw.io_base + 4519a5ef4aaSYonglong Liu HCLGEVF_TQP_REG_OFFSET + 452e2cb1decSSalil Mehta i * HCLGEVF_TQP_REG_SIZE; 4539a5ef4aaSYonglong Liu else 454076bb537SJie Wang tqp->q.io_base = hdev->hw.hw.io_base + 4559a5ef4aaSYonglong Liu HCLGEVF_TQP_REG_OFFSET + 4569a5ef4aaSYonglong Liu HCLGEVF_TQP_EXT_REG_OFFSET + 4579a5ef4aaSYonglong Liu (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) * 4589a5ef4aaSYonglong Liu HCLGEVF_TQP_REG_SIZE; 459e2cb1decSSalil Mehta 460e2cb1decSSalil Mehta tqp++; 461e2cb1decSSalil Mehta } 462e2cb1decSSalil Mehta 463e2cb1decSSalil Mehta return 0; 464e2cb1decSSalil Mehta } 465e2cb1decSSalil Mehta 466e2cb1decSSalil Mehta static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 467e2cb1decSSalil Mehta { 468e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 469e2cb1decSSalil Mehta struct hnae3_knic_private_info *kinfo; 470e2cb1decSSalil Mehta u16 new_tqps = hdev->num_tqps; 471ebaf1908SWeihang Li unsigned int i; 47235244430SJian Shen u8 num_tc = 0; 473e2cb1decSSalil Mehta 474e2cb1decSSalil Mehta kinfo = &nic->kinfo; 475c0425944SPeng Li kinfo->num_tx_desc = hdev->num_tx_desc; 476c0425944SPeng Li kinfo->num_rx_desc = hdev->num_rx_desc; 477e2cb1decSSalil Mehta kinfo->rx_buf_len = hdev->rx_buf_len; 478e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 479e2cb1decSSalil Mehta if (hdev->hw_tc_map & BIT(i)) 48035244430SJian Shen num_tc++; 481e2cb1decSSalil Mehta 48235244430SJian Shen num_tc = num_tc ? num_tc : 1; 48335244430SJian Shen kinfo->tc_info.num_tc = num_tc; 48435244430SJian Shen kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc); 48535244430SJian Shen new_tqps = kinfo->rss_size * num_tc; 486e2cb1decSSalil Mehta kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 487e2cb1decSSalil Mehta 488e2cb1decSSalil Mehta kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 489e2cb1decSSalil Mehta sizeof(struct hnae3_queue *), GFP_KERNEL); 490e2cb1decSSalil Mehta if (!kinfo->tqp) 491e2cb1decSSalil Mehta return -ENOMEM; 492e2cb1decSSalil Mehta 493e2cb1decSSalil Mehta for (i = 0; i < kinfo->num_tqps; i++) { 494e2cb1decSSalil Mehta hdev->htqp[i].q.handle = &hdev->nic; 495e2cb1decSSalil Mehta hdev->htqp[i].q.tqp_index = i; 496e2cb1decSSalil Mehta kinfo->tqp[i] = &hdev->htqp[i].q; 497e2cb1decSSalil Mehta } 498e2cb1decSSalil Mehta 499580a05f9SYonglong Liu /* after init the max rss_size and tqps, adjust the default tqp numbers 500580a05f9SYonglong Liu * and rss size with the actual vector numbers 501580a05f9SYonglong Liu */ 502580a05f9SYonglong Liu kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 50335244430SJian Shen kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc, 504580a05f9SYonglong Liu kinfo->rss_size); 505580a05f9SYonglong Liu 506e2cb1decSSalil Mehta return 0; 507e2cb1decSSalil Mehta } 508e2cb1decSSalil Mehta 509e2cb1decSSalil Mehta static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 510e2cb1decSSalil Mehta { 511d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 512e2cb1decSSalil Mehta int status; 513e2cb1decSSalil Mehta 514d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 515d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 516e2cb1decSSalil Mehta if (status) 517e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 518e2cb1decSSalil Mehta "VF failed to fetch link status(%d) from PF", status); 519e2cb1decSSalil Mehta } 520e2cb1decSSalil Mehta 521e2cb1decSSalil Mehta void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 522e2cb1decSSalil Mehta { 52345e92b7eSPeng Li struct hnae3_handle *rhandle = &hdev->roce; 524e2cb1decSSalil Mehta struct hnae3_handle *handle = &hdev->nic; 52545e92b7eSPeng Li struct hnae3_client *rclient; 526e2cb1decSSalil Mehta struct hnae3_client *client; 527e2cb1decSSalil Mehta 528ff200099SYunsheng Lin if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 529ff200099SYunsheng Lin return; 530ff200099SYunsheng Lin 531e2cb1decSSalil Mehta client = handle->client; 53245e92b7eSPeng Li rclient = hdev->roce_client; 533e2cb1decSSalil Mehta 534582d37bbSPeng Li link_state = 535582d37bbSPeng Li test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 536e2cb1decSSalil Mehta if (link_state != hdev->hw.mac.link) { 537b15c072aSYonglong Liu hdev->hw.mac.link = link_state; 538e2cb1decSSalil Mehta client->ops->link_status_change(handle, !!link_state); 53945e92b7eSPeng Li if (rclient && rclient->ops->link_status_change) 54045e92b7eSPeng Li rclient->ops->link_status_change(rhandle, !!link_state); 541e2cb1decSSalil Mehta } 542ff200099SYunsheng Lin 543ff200099SYunsheng Lin clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 544e2cb1decSSalil Mehta } 545e2cb1decSSalil Mehta 546538abaf3SYueHaibing static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 5479194d18bSliuzhongzhu { 5489194d18bSliuzhongzhu #define HCLGEVF_ADVERTISING 0 5499194d18bSliuzhongzhu #define HCLGEVF_SUPPORTED 1 5509194d18bSliuzhongzhu 551d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 552d3410018SYufeng Mo 553d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 554d3410018SYufeng Mo send_msg.data[0] = HCLGEVF_ADVERTISING; 555d3410018SYufeng Mo hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 556d3410018SYufeng Mo send_msg.data[0] = HCLGEVF_SUPPORTED; 557d3410018SYufeng Mo hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 5589194d18bSliuzhongzhu } 5599194d18bSliuzhongzhu 560e2cb1decSSalil Mehta static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 561e2cb1decSSalil Mehta { 562e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 563e2cb1decSSalil Mehta int ret; 564e2cb1decSSalil Mehta 565e2cb1decSSalil Mehta nic->ae_algo = &ae_algovf; 566e2cb1decSSalil Mehta nic->pdev = hdev->pdev; 567e2cb1decSSalil Mehta nic->numa_node_mask = hdev->numa_node_mask; 568424eb834SSalil Mehta nic->flags |= HNAE3_SUPPORT_VF; 569076bb537SJie Wang nic->kinfo.io_base = hdev->hw.hw.io_base; 570e2cb1decSSalil Mehta 571e2cb1decSSalil Mehta ret = hclgevf_knic_setup(hdev); 572e2cb1decSSalil Mehta if (ret) 573e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 574e2cb1decSSalil Mehta ret); 575e2cb1decSSalil Mehta return ret; 576e2cb1decSSalil Mehta } 577e2cb1decSSalil Mehta 578e2cb1decSSalil Mehta static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 579e2cb1decSSalil Mehta { 58036cbbdf6SPeng Li if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 58136cbbdf6SPeng Li dev_warn(&hdev->pdev->dev, 58236cbbdf6SPeng Li "vector(vector_id %d) has been freed.\n", vector_id); 58336cbbdf6SPeng Li return; 58436cbbdf6SPeng Li } 58536cbbdf6SPeng Li 586e2cb1decSSalil Mehta hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 587e2cb1decSSalil Mehta hdev->num_msi_left += 1; 588e2cb1decSSalil Mehta hdev->num_msi_used -= 1; 589e2cb1decSSalil Mehta } 590e2cb1decSSalil Mehta 591e2cb1decSSalil Mehta static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 592e2cb1decSSalil Mehta struct hnae3_vector_info *vector_info) 593e2cb1decSSalil Mehta { 594e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 595e2cb1decSSalil Mehta struct hnae3_vector_info *vector = vector_info; 596e2cb1decSSalil Mehta int alloc = 0; 597e2cb1decSSalil Mehta int i, j; 598e2cb1decSSalil Mehta 599580a05f9SYonglong Liu vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 600e2cb1decSSalil Mehta vector_num = min(hdev->num_msi_left, vector_num); 601e2cb1decSSalil Mehta 602e2cb1decSSalil Mehta for (j = 0; j < vector_num; j++) { 603e2cb1decSSalil Mehta for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 604e2cb1decSSalil Mehta if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 605e2cb1decSSalil Mehta vector->vector = pci_irq_vector(hdev->pdev, i); 606076bb537SJie Wang vector->io_addr = hdev->hw.hw.io_base + 607e2cb1decSSalil Mehta HCLGEVF_VECTOR_REG_BASE + 608e2cb1decSSalil Mehta (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 609e2cb1decSSalil Mehta hdev->vector_status[i] = 0; 610e2cb1decSSalil Mehta hdev->vector_irq[i] = vector->vector; 611e2cb1decSSalil Mehta 612e2cb1decSSalil Mehta vector++; 613e2cb1decSSalil Mehta alloc++; 614e2cb1decSSalil Mehta 615e2cb1decSSalil Mehta break; 616e2cb1decSSalil Mehta } 617e2cb1decSSalil Mehta } 618e2cb1decSSalil Mehta } 619e2cb1decSSalil Mehta hdev->num_msi_left -= alloc; 620e2cb1decSSalil Mehta hdev->num_msi_used += alloc; 621e2cb1decSSalil Mehta 622e2cb1decSSalil Mehta return alloc; 623e2cb1decSSalil Mehta } 624e2cb1decSSalil Mehta 625e2cb1decSSalil Mehta static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 626e2cb1decSSalil Mehta { 627e2cb1decSSalil Mehta int i; 628e2cb1decSSalil Mehta 629e2cb1decSSalil Mehta for (i = 0; i < hdev->num_msi; i++) 630e2cb1decSSalil Mehta if (vector == hdev->vector_irq[i]) 631e2cb1decSSalil Mehta return i; 632e2cb1decSSalil Mehta 633e2cb1decSSalil Mehta return -EINVAL; 634e2cb1decSSalil Mehta } 635e2cb1decSSalil Mehta 636374ad291SJian Shen static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 637374ad291SJian Shen const u8 hfunc, const u8 *key) 638374ad291SJian Shen { 639374ad291SJian Shen struct hclgevf_rss_config_cmd *req; 640ebaf1908SWeihang Li unsigned int key_offset = 0; 6416befad60SJie Wang struct hclge_desc desc; 6423caf772bSYufeng Mo int key_counts; 643374ad291SJian Shen int key_size; 644374ad291SJian Shen int ret; 645374ad291SJian Shen 6463caf772bSYufeng Mo key_counts = HCLGEVF_RSS_KEY_SIZE; 647374ad291SJian Shen req = (struct hclgevf_rss_config_cmd *)desc.data; 648374ad291SJian Shen 6493caf772bSYufeng Mo while (key_counts) { 650374ad291SJian Shen hclgevf_cmd_setup_basic_desc(&desc, 651374ad291SJian Shen HCLGEVF_OPC_RSS_GENERIC_CONFIG, 652374ad291SJian Shen false); 653374ad291SJian Shen 654374ad291SJian Shen req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 655374ad291SJian Shen req->hash_config |= 656374ad291SJian Shen (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 657374ad291SJian Shen 6583caf772bSYufeng Mo key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 659374ad291SJian Shen memcpy(req->hash_key, 660374ad291SJian Shen key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 661374ad291SJian Shen 6623caf772bSYufeng Mo key_counts -= key_size; 6633caf772bSYufeng Mo key_offset++; 664374ad291SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 665374ad291SJian Shen if (ret) { 666374ad291SJian Shen dev_err(&hdev->pdev->dev, 667374ad291SJian Shen "Configure RSS config fail, status = %d\n", 668374ad291SJian Shen ret); 669374ad291SJian Shen return ret; 670374ad291SJian Shen } 671374ad291SJian Shen } 672374ad291SJian Shen 673374ad291SJian Shen return 0; 674374ad291SJian Shen } 675374ad291SJian Shen 676e2cb1decSSalil Mehta static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 677e2cb1decSSalil Mehta { 678*027733b1SJie Wang const u16 *indir = hdev->rss_cfg.rss_indirection_tbl; 679e2cb1decSSalil Mehta struct hclgevf_rss_indirection_table_cmd *req; 6806befad60SJie Wang struct hclge_desc desc; 68187ce161eSGuangbin Huang int rss_cfg_tbl_num; 682e2cb1decSSalil Mehta int status; 683e2cb1decSSalil Mehta int i, j; 684e2cb1decSSalil Mehta 685e2cb1decSSalil Mehta req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 68687ce161eSGuangbin Huang rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size / 68787ce161eSGuangbin Huang HCLGEVF_RSS_CFG_TBL_SIZE; 688e2cb1decSSalil Mehta 68987ce161eSGuangbin Huang for (i = 0; i < rss_cfg_tbl_num; i++) { 690e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 691e2cb1decSSalil Mehta false); 69255ff3ed5SJian Shen req->start_table_index = 69355ff3ed5SJian Shen cpu_to_le16(i * HCLGEVF_RSS_CFG_TBL_SIZE); 69455ff3ed5SJian Shen req->rss_set_bitmap = cpu_to_le16(HCLGEVF_RSS_SET_BITMAP_MSK); 695e2cb1decSSalil Mehta for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 696e2cb1decSSalil Mehta req->rss_result[j] = 697e2cb1decSSalil Mehta indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 698e2cb1decSSalil Mehta 699e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 700e2cb1decSSalil Mehta if (status) { 701e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 702e2cb1decSSalil Mehta "VF failed(=%d) to set RSS indirection table\n", 703e2cb1decSSalil Mehta status); 704e2cb1decSSalil Mehta return status; 705e2cb1decSSalil Mehta } 706e2cb1decSSalil Mehta } 707e2cb1decSSalil Mehta 708e2cb1decSSalil Mehta return 0; 709e2cb1decSSalil Mehta } 710e2cb1decSSalil Mehta 711e2cb1decSSalil Mehta static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 712e2cb1decSSalil Mehta { 713e2cb1decSSalil Mehta struct hclgevf_rss_tc_mode_cmd *req; 714e2cb1decSSalil Mehta u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 715e2cb1decSSalil Mehta u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 716e2cb1decSSalil Mehta u16 tc_size[HCLGEVF_MAX_TC_NUM]; 7176befad60SJie Wang struct hclge_desc desc; 718e2cb1decSSalil Mehta u16 roundup_size; 719ebaf1908SWeihang Li unsigned int i; 7202adb8187SHuazhong Tan int status; 721e2cb1decSSalil Mehta 722e2cb1decSSalil Mehta req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 723e2cb1decSSalil Mehta 724e2cb1decSSalil Mehta roundup_size = roundup_pow_of_two(rss_size); 725e2cb1decSSalil Mehta roundup_size = ilog2(roundup_size); 726e2cb1decSSalil Mehta 727e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 7288d2ad993SGuangbin Huang tc_valid[i] = 1; 729e2cb1decSSalil Mehta tc_size[i] = roundup_size; 7308d2ad993SGuangbin Huang tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0; 731e2cb1decSSalil Mehta } 732e2cb1decSSalil Mehta 733e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 734e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 73555ff3ed5SJian Shen u16 mode = 0; 73655ff3ed5SJian Shen 73755ff3ed5SJian Shen hnae3_set_bit(mode, HCLGEVF_RSS_TC_VALID_B, 738e2cb1decSSalil Mehta (tc_valid[i] & 0x1)); 73955ff3ed5SJian Shen hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M, 740e2cb1decSSalil Mehta HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 74133a8f764SGuojia Liao hnae3_set_bit(mode, HCLGEVF_RSS_TC_SIZE_MSB_B, 74233a8f764SGuojia Liao tc_size[i] >> HCLGEVF_RSS_TC_SIZE_MSB_OFFSET & 74333a8f764SGuojia Liao 0x1); 74455ff3ed5SJian Shen hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M, 745e2cb1decSSalil Mehta HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 74655ff3ed5SJian Shen 74755ff3ed5SJian Shen req->rss_tc_mode[i] = cpu_to_le16(mode); 748e2cb1decSSalil Mehta } 749e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 750e2cb1decSSalil Mehta if (status) 751e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 752e2cb1decSSalil Mehta "VF failed(=%d) to set rss tc mode\n", status); 753e2cb1decSSalil Mehta 754e2cb1decSSalil Mehta return status; 755e2cb1decSSalil Mehta } 756e2cb1decSSalil Mehta 757a638b1d8SJian Shen /* for revision 0x20, vf shared the same rss config with pf */ 758a638b1d8SJian Shen static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 759a638b1d8SJian Shen { 760a638b1d8SJian Shen #define HCLGEVF_RSS_MBX_RESP_LEN 8 761*027733b1SJie Wang struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 762a638b1d8SJian Shen u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 763d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 764a638b1d8SJian Shen u16 msg_num, hash_key_index; 765a638b1d8SJian Shen u8 index; 766a638b1d8SJian Shen int ret; 767a638b1d8SJian Shen 768d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 769a638b1d8SJian Shen msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 770a638b1d8SJian Shen HCLGEVF_RSS_MBX_RESP_LEN; 771a638b1d8SJian Shen for (index = 0; index < msg_num; index++) { 772d3410018SYufeng Mo send_msg.data[0] = index; 773d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 774a638b1d8SJian Shen HCLGEVF_RSS_MBX_RESP_LEN); 775a638b1d8SJian Shen if (ret) { 776a638b1d8SJian Shen dev_err(&hdev->pdev->dev, 777a638b1d8SJian Shen "VF get rss hash key from PF failed, ret=%d", 778a638b1d8SJian Shen ret); 779a638b1d8SJian Shen return ret; 780a638b1d8SJian Shen } 781a638b1d8SJian Shen 782a638b1d8SJian Shen hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 783a638b1d8SJian Shen if (index == msg_num - 1) 784a638b1d8SJian Shen memcpy(&rss_cfg->rss_hash_key[hash_key_index], 785a638b1d8SJian Shen &resp_msg[0], 786a638b1d8SJian Shen HCLGEVF_RSS_KEY_SIZE - hash_key_index); 787a638b1d8SJian Shen else 788a638b1d8SJian Shen memcpy(&rss_cfg->rss_hash_key[hash_key_index], 789a638b1d8SJian Shen &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 790a638b1d8SJian Shen } 791a638b1d8SJian Shen 792a638b1d8SJian Shen return 0; 793a638b1d8SJian Shen } 794a638b1d8SJian Shen 795e2cb1decSSalil Mehta static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 796e2cb1decSSalil Mehta u8 *hfunc) 797e2cb1decSSalil Mehta { 798e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 799*027733b1SJie Wang struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 800a638b1d8SJian Shen int i, ret; 801e2cb1decSSalil Mehta 802295ba232SGuangbin Huang if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 803374ad291SJian Shen /* Get hash algorithm */ 804374ad291SJian Shen if (hfunc) { 805*027733b1SJie Wang switch (rss_cfg->rss_algo) { 806374ad291SJian Shen case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 807374ad291SJian Shen *hfunc = ETH_RSS_HASH_TOP; 808374ad291SJian Shen break; 809374ad291SJian Shen case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 810374ad291SJian Shen *hfunc = ETH_RSS_HASH_XOR; 811374ad291SJian Shen break; 812374ad291SJian Shen default: 813374ad291SJian Shen *hfunc = ETH_RSS_HASH_UNKNOWN; 814374ad291SJian Shen break; 815374ad291SJian Shen } 816374ad291SJian Shen } 817374ad291SJian Shen 818374ad291SJian Shen /* Get the RSS Key required by the user */ 819374ad291SJian Shen if (key) 820374ad291SJian Shen memcpy(key, rss_cfg->rss_hash_key, 821374ad291SJian Shen HCLGEVF_RSS_KEY_SIZE); 822a638b1d8SJian Shen } else { 823a638b1d8SJian Shen if (hfunc) 824a638b1d8SJian Shen *hfunc = ETH_RSS_HASH_TOP; 825a638b1d8SJian Shen if (key) { 826a638b1d8SJian Shen ret = hclgevf_get_rss_hash_key(hdev); 827a638b1d8SJian Shen if (ret) 828a638b1d8SJian Shen return ret; 829a638b1d8SJian Shen memcpy(key, rss_cfg->rss_hash_key, 830a638b1d8SJian Shen HCLGEVF_RSS_KEY_SIZE); 831a638b1d8SJian Shen } 832374ad291SJian Shen } 833374ad291SJian Shen 834e2cb1decSSalil Mehta if (indir) 83587ce161eSGuangbin Huang for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 836e2cb1decSSalil Mehta indir[i] = rss_cfg->rss_indirection_tbl[i]; 837e2cb1decSSalil Mehta 838374ad291SJian Shen return 0; 839e2cb1decSSalil Mehta } 840e2cb1decSSalil Mehta 841e2cb1decSSalil Mehta static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 842e2cb1decSSalil Mehta const u8 *key, const u8 hfunc) 843e2cb1decSSalil Mehta { 844e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 845*027733b1SJie Wang struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 846e184cec5SJian Shen u8 hash_algo; 847374ad291SJian Shen int ret, i; 848374ad291SJian Shen 849295ba232SGuangbin Huang if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 850*027733b1SJie Wang ret = hclge_comm_parse_rss_hfunc(rss_cfg, hfunc, &hash_algo); 851374ad291SJian Shen if (ret) 852374ad291SJian Shen return ret; 853374ad291SJian Shen 854e184cec5SJian Shen /* Set the RSS Hash Key if specififed by the user */ 855e184cec5SJian Shen if (key) { 856e184cec5SJian Shen ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key); 857e184cec5SJian Shen if (ret) { 858e184cec5SJian Shen dev_err(&hdev->pdev->dev, 859e184cec5SJian Shen "invalid hfunc type %u\n", hfunc); 860e184cec5SJian Shen return ret; 861e184cec5SJian Shen } 862e184cec5SJian Shen 863374ad291SJian Shen /* Update the shadow RSS key with user specified qids */ 864374ad291SJian Shen memcpy(rss_cfg->rss_hash_key, key, 865374ad291SJian Shen HCLGEVF_RSS_KEY_SIZE); 866e184cec5SJian Shen } else { 867e184cec5SJian Shen ret = hclgevf_set_rss_algo_key(hdev, hash_algo, 868e184cec5SJian Shen rss_cfg->rss_hash_key); 869e184cec5SJian Shen if (ret) 870e184cec5SJian Shen return ret; 871374ad291SJian Shen } 872*027733b1SJie Wang rss_cfg->rss_algo = hash_algo; 873374ad291SJian Shen } 874e2cb1decSSalil Mehta 875e2cb1decSSalil Mehta /* update the shadow RSS table with user specified qids */ 87687ce161eSGuangbin Huang for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 877e2cb1decSSalil Mehta rss_cfg->rss_indirection_tbl[i] = indir[i]; 878e2cb1decSSalil Mehta 879e2cb1decSSalil Mehta /* update the hardware */ 880e2cb1decSSalil Mehta return hclgevf_set_rss_indir_table(hdev); 881e2cb1decSSalil Mehta } 882e2cb1decSSalil Mehta 883d97b3072SJian Shen static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 884d97b3072SJian Shen { 885d97b3072SJian Shen u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 886d97b3072SJian Shen 887d97b3072SJian Shen if (nfc->data & RXH_L4_B_2_3) 888d97b3072SJian Shen hash_sets |= HCLGEVF_D_PORT_BIT; 889d97b3072SJian Shen else 890d97b3072SJian Shen hash_sets &= ~HCLGEVF_D_PORT_BIT; 891d97b3072SJian Shen 892d97b3072SJian Shen if (nfc->data & RXH_IP_SRC) 893d97b3072SJian Shen hash_sets |= HCLGEVF_S_IP_BIT; 894d97b3072SJian Shen else 895d97b3072SJian Shen hash_sets &= ~HCLGEVF_S_IP_BIT; 896d97b3072SJian Shen 897d97b3072SJian Shen if (nfc->data & RXH_IP_DST) 898d97b3072SJian Shen hash_sets |= HCLGEVF_D_IP_BIT; 899d97b3072SJian Shen else 900d97b3072SJian Shen hash_sets &= ~HCLGEVF_D_IP_BIT; 901d97b3072SJian Shen 902d97b3072SJian Shen if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 903d97b3072SJian Shen hash_sets |= HCLGEVF_V_TAG_BIT; 904d97b3072SJian Shen 905d97b3072SJian Shen return hash_sets; 906d97b3072SJian Shen } 907d97b3072SJian Shen 9085fd0e7b4SHuazhong Tan static int hclgevf_init_rss_tuple_cmd(struct hnae3_handle *handle, 9095fd0e7b4SHuazhong Tan struct ethtool_rxnfc *nfc, 9105fd0e7b4SHuazhong Tan struct hclgevf_rss_input_tuple_cmd *req) 911d97b3072SJian Shen { 912d97b3072SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 913*027733b1SJie Wang struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 914d97b3072SJian Shen u8 tuple_sets; 915d97b3072SJian Shen 916d97b3072SJian Shen req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 917d97b3072SJian Shen req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 918d97b3072SJian Shen req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 919d97b3072SJian Shen req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 920d97b3072SJian Shen req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 921d97b3072SJian Shen req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 922d97b3072SJian Shen req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 923d97b3072SJian Shen req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 924d97b3072SJian Shen 925d97b3072SJian Shen tuple_sets = hclgevf_get_rss_hash_bits(nfc); 926d97b3072SJian Shen switch (nfc->flow_type) { 927d97b3072SJian Shen case TCP_V4_FLOW: 928d97b3072SJian Shen req->ipv4_tcp_en = tuple_sets; 929d97b3072SJian Shen break; 930d97b3072SJian Shen case TCP_V6_FLOW: 931d97b3072SJian Shen req->ipv6_tcp_en = tuple_sets; 932d97b3072SJian Shen break; 933d97b3072SJian Shen case UDP_V4_FLOW: 934d97b3072SJian Shen req->ipv4_udp_en = tuple_sets; 935d97b3072SJian Shen break; 936d97b3072SJian Shen case UDP_V6_FLOW: 937d97b3072SJian Shen req->ipv6_udp_en = tuple_sets; 938d97b3072SJian Shen break; 939d97b3072SJian Shen case SCTP_V4_FLOW: 940d97b3072SJian Shen req->ipv4_sctp_en = tuple_sets; 941d97b3072SJian Shen break; 942d97b3072SJian Shen case SCTP_V6_FLOW: 943ab6e32d2SJian Shen if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && 944ab6e32d2SJian Shen (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))) 945d97b3072SJian Shen return -EINVAL; 946d97b3072SJian Shen 947d97b3072SJian Shen req->ipv6_sctp_en = tuple_sets; 948d97b3072SJian Shen break; 949d97b3072SJian Shen case IPV4_FLOW: 950d97b3072SJian Shen req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 951d97b3072SJian Shen break; 952d97b3072SJian Shen case IPV6_FLOW: 953d97b3072SJian Shen req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 954d97b3072SJian Shen break; 955d97b3072SJian Shen default: 956d97b3072SJian Shen return -EINVAL; 957d97b3072SJian Shen } 958d97b3072SJian Shen 9595fd0e7b4SHuazhong Tan return 0; 9605fd0e7b4SHuazhong Tan } 9615fd0e7b4SHuazhong Tan 9625fd0e7b4SHuazhong Tan static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 9635fd0e7b4SHuazhong Tan struct ethtool_rxnfc *nfc) 9645fd0e7b4SHuazhong Tan { 9655fd0e7b4SHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 966*027733b1SJie Wang struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 9675fd0e7b4SHuazhong Tan struct hclgevf_rss_input_tuple_cmd *req; 9686befad60SJie Wang struct hclge_desc desc; 9695fd0e7b4SHuazhong Tan int ret; 9705fd0e7b4SHuazhong Tan 9715fd0e7b4SHuazhong Tan if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 9725fd0e7b4SHuazhong Tan return -EOPNOTSUPP; 9735fd0e7b4SHuazhong Tan 9745fd0e7b4SHuazhong Tan if (nfc->data & 9755fd0e7b4SHuazhong Tan ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 9765fd0e7b4SHuazhong Tan return -EINVAL; 9775fd0e7b4SHuazhong Tan 9785fd0e7b4SHuazhong Tan req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 9795fd0e7b4SHuazhong Tan hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 9805fd0e7b4SHuazhong Tan 9815fd0e7b4SHuazhong Tan ret = hclgevf_init_rss_tuple_cmd(handle, nfc, req); 9825fd0e7b4SHuazhong Tan if (ret) { 9835fd0e7b4SHuazhong Tan dev_err(&hdev->pdev->dev, 9845fd0e7b4SHuazhong Tan "failed to init rss tuple cmd, ret = %d\n", ret); 9855fd0e7b4SHuazhong Tan return ret; 9865fd0e7b4SHuazhong Tan } 9875fd0e7b4SHuazhong Tan 988d97b3072SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 989d97b3072SJian Shen if (ret) { 990d97b3072SJian Shen dev_err(&hdev->pdev->dev, 991d97b3072SJian Shen "Set rss tuple fail, status = %d\n", ret); 992d97b3072SJian Shen return ret; 993d97b3072SJian Shen } 994d97b3072SJian Shen 995d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 996d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 997d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 998d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 999d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 1000d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 1001d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 1002d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 1003d97b3072SJian Shen return 0; 1004d97b3072SJian Shen } 1005d97b3072SJian Shen 100673f7767eSJian Shen static u64 hclgevf_convert_rss_tuple(u8 tuple_sets) 100773f7767eSJian Shen { 100873f7767eSJian Shen u64 tuple_data = 0; 100973f7767eSJian Shen 101073f7767eSJian Shen if (tuple_sets & HCLGEVF_D_PORT_BIT) 101173f7767eSJian Shen tuple_data |= RXH_L4_B_2_3; 101273f7767eSJian Shen if (tuple_sets & HCLGEVF_S_PORT_BIT) 101373f7767eSJian Shen tuple_data |= RXH_L4_B_0_1; 101473f7767eSJian Shen if (tuple_sets & HCLGEVF_D_IP_BIT) 101573f7767eSJian Shen tuple_data |= RXH_IP_DST; 101673f7767eSJian Shen if (tuple_sets & HCLGEVF_S_IP_BIT) 101773f7767eSJian Shen tuple_data |= RXH_IP_SRC; 101873f7767eSJian Shen 101973f7767eSJian Shen return tuple_data; 102073f7767eSJian Shen } 102173f7767eSJian Shen 1022d97b3072SJian Shen static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 1023d97b3072SJian Shen struct ethtool_rxnfc *nfc) 1024d97b3072SJian Shen { 1025d97b3072SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1026d97b3072SJian Shen u8 tuple_sets; 102773f7767eSJian Shen int ret; 1028d97b3072SJian Shen 1029295ba232SGuangbin Huang if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 1030d97b3072SJian Shen return -EOPNOTSUPP; 1031d97b3072SJian Shen 1032d97b3072SJian Shen nfc->data = 0; 1033d97b3072SJian Shen 1034*027733b1SJie Wang ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type, 103573f7767eSJian Shen &tuple_sets); 103673f7767eSJian Shen if (ret || !tuple_sets) 103773f7767eSJian Shen return ret; 1038d97b3072SJian Shen 103973f7767eSJian Shen nfc->data = hclgevf_convert_rss_tuple(tuple_sets); 1040d97b3072SJian Shen 1041d97b3072SJian Shen return 0; 1042d97b3072SJian Shen } 1043d97b3072SJian Shen 1044d97b3072SJian Shen static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 1045*027733b1SJie Wang struct hclge_comm_rss_cfg *rss_cfg) 1046d97b3072SJian Shen { 1047d97b3072SJian Shen struct hclgevf_rss_input_tuple_cmd *req; 10486befad60SJie Wang struct hclge_desc desc; 1049d97b3072SJian Shen int ret; 1050d97b3072SJian Shen 1051d97b3072SJian Shen hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 1052d97b3072SJian Shen 1053d97b3072SJian Shen req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 1054d97b3072SJian Shen 1055d97b3072SJian Shen req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 1056d97b3072SJian Shen req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 1057d97b3072SJian Shen req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 1058d97b3072SJian Shen req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 1059d97b3072SJian Shen req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 1060d97b3072SJian Shen req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 1061d97b3072SJian Shen req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 1062d97b3072SJian Shen req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 1063d97b3072SJian Shen 1064d97b3072SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1065d97b3072SJian Shen if (ret) 1066d97b3072SJian Shen dev_err(&hdev->pdev->dev, 1067d97b3072SJian Shen "Configure rss input fail, status = %d\n", ret); 1068d97b3072SJian Shen return ret; 1069d97b3072SJian Shen } 1070d97b3072SJian Shen 1071e2cb1decSSalil Mehta static int hclgevf_get_tc_size(struct hnae3_handle *handle) 1072e2cb1decSSalil Mehta { 1073e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1074*027733b1SJie Wang struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 1075e2cb1decSSalil Mehta 1076e2cb1decSSalil Mehta return rss_cfg->rss_size; 1077e2cb1decSSalil Mehta } 1078e2cb1decSSalil Mehta 1079e2cb1decSSalil Mehta static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 1080b204bc74SPeng Li int vector_id, 1081e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 1082e2cb1decSSalil Mehta { 1083e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1084d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1085e2cb1decSSalil Mehta struct hnae3_ring_chain_node *node; 1086e2cb1decSSalil Mehta int status; 1087d3410018SYufeng Mo int i = 0; 1088e2cb1decSSalil Mehta 1089d3410018SYufeng Mo memset(&send_msg, 0, sizeof(send_msg)); 1090d3410018SYufeng Mo send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 1091c09ba484SPeng Li HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1092d3410018SYufeng Mo send_msg.vector_id = vector_id; 1093e2cb1decSSalil Mehta 1094e2cb1decSSalil Mehta for (node = ring_chain; node; node = node->next) { 1095d3410018SYufeng Mo send_msg.param[i].ring_type = 1096e4e87715SPeng Li hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1097d3410018SYufeng Mo 1098d3410018SYufeng Mo send_msg.param[i].tqp_index = node->tqp_index; 1099d3410018SYufeng Mo send_msg.param[i].int_gl_index = 1100d3410018SYufeng Mo hnae3_get_field(node->int_gl_idx, 110179eee410SFuyun Liang HNAE3_RING_GL_IDX_M, 110279eee410SFuyun Liang HNAE3_RING_GL_IDX_S); 110379eee410SFuyun Liang 11045d02a58dSYunsheng Lin i++; 1105d3410018SYufeng Mo if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 1106d3410018SYufeng Mo send_msg.ring_num = i; 1107e2cb1decSSalil Mehta 1108d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 1109d3410018SYufeng Mo NULL, 0); 1110e2cb1decSSalil Mehta if (status) { 1111e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 1112e2cb1decSSalil Mehta "Map TQP fail, status is %d.\n", 1113e2cb1decSSalil Mehta status); 1114e2cb1decSSalil Mehta return status; 1115e2cb1decSSalil Mehta } 1116e2cb1decSSalil Mehta i = 0; 1117e2cb1decSSalil Mehta } 1118e2cb1decSSalil Mehta } 1119e2cb1decSSalil Mehta 1120e2cb1decSSalil Mehta return 0; 1121e2cb1decSSalil Mehta } 1122e2cb1decSSalil Mehta 1123e2cb1decSSalil Mehta static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1124e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 1125e2cb1decSSalil Mehta { 1126b204bc74SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1127b204bc74SPeng Li int vector_id; 1128b204bc74SPeng Li 1129b204bc74SPeng Li vector_id = hclgevf_get_vector_index(hdev, vector); 1130b204bc74SPeng Li if (vector_id < 0) { 1131b204bc74SPeng Li dev_err(&handle->pdev->dev, 1132b204bc74SPeng Li "Get vector index fail. ret =%d\n", vector_id); 1133b204bc74SPeng Li return vector_id; 1134b204bc74SPeng Li } 1135b204bc74SPeng Li 1136b204bc74SPeng Li return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1137e2cb1decSSalil Mehta } 1138e2cb1decSSalil Mehta 1139e2cb1decSSalil Mehta static int hclgevf_unmap_ring_from_vector( 1140e2cb1decSSalil Mehta struct hnae3_handle *handle, 1141e2cb1decSSalil Mehta int vector, 1142e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 1143e2cb1decSSalil Mehta { 1144e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1145e2cb1decSSalil Mehta int ret, vector_id; 1146e2cb1decSSalil Mehta 1147dea846e8SHuazhong Tan if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1148dea846e8SHuazhong Tan return 0; 1149dea846e8SHuazhong Tan 1150e2cb1decSSalil Mehta vector_id = hclgevf_get_vector_index(hdev, vector); 1151e2cb1decSSalil Mehta if (vector_id < 0) { 1152e2cb1decSSalil Mehta dev_err(&handle->pdev->dev, 1153e2cb1decSSalil Mehta "Get vector index fail. ret =%d\n", vector_id); 1154e2cb1decSSalil Mehta return vector_id; 1155e2cb1decSSalil Mehta } 1156e2cb1decSSalil Mehta 1157b204bc74SPeng Li ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 11580d3e6631SYunsheng Lin if (ret) 1159e2cb1decSSalil Mehta dev_err(&handle->pdev->dev, 1160e2cb1decSSalil Mehta "Unmap ring from vector fail. vector=%d, ret =%d\n", 1161e2cb1decSSalil Mehta vector_id, 1162e2cb1decSSalil Mehta ret); 11630d3e6631SYunsheng Lin 1164e2cb1decSSalil Mehta return ret; 1165e2cb1decSSalil Mehta } 1166e2cb1decSSalil Mehta 11670d3e6631SYunsheng Lin static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 11680d3e6631SYunsheng Lin { 11690d3e6631SYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 117003718db9SYunsheng Lin int vector_id; 11710d3e6631SYunsheng Lin 117203718db9SYunsheng Lin vector_id = hclgevf_get_vector_index(hdev, vector); 117303718db9SYunsheng Lin if (vector_id < 0) { 117403718db9SYunsheng Lin dev_err(&handle->pdev->dev, 117503718db9SYunsheng Lin "hclgevf_put_vector get vector index fail. ret =%d\n", 117603718db9SYunsheng Lin vector_id); 117703718db9SYunsheng Lin return vector_id; 117803718db9SYunsheng Lin } 117903718db9SYunsheng Lin 118003718db9SYunsheng Lin hclgevf_free_vector(hdev, vector_id); 1181e2cb1decSSalil Mehta 1182e2cb1decSSalil Mehta return 0; 1183e2cb1decSSalil Mehta } 1184e2cb1decSSalil Mehta 11853b75c3dfSPeng Li static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1186e196ec75SJian Shen bool en_uc_pmc, bool en_mc_pmc, 1187f01f5559SJian Shen bool en_bc_pmc) 1188e2cb1decSSalil Mehta { 11895e7414cdSJian Shen struct hnae3_handle *handle = &hdev->nic; 1190d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1191f01f5559SJian Shen int ret; 1192e2cb1decSSalil Mehta 1193d3410018SYufeng Mo memset(&send_msg, 0, sizeof(send_msg)); 1194d3410018SYufeng Mo send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 1195d3410018SYufeng Mo send_msg.en_bc = en_bc_pmc ? 1 : 0; 1196d3410018SYufeng Mo send_msg.en_uc = en_uc_pmc ? 1 : 0; 1197d3410018SYufeng Mo send_msg.en_mc = en_mc_pmc ? 1 : 0; 11985e7414cdSJian Shen send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC, 11995e7414cdSJian Shen &handle->priv_flags) ? 1 : 0; 1200e2cb1decSSalil Mehta 1201d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1202f01f5559SJian Shen if (ret) 1203e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 1204f01f5559SJian Shen "Set promisc mode fail, status is %d.\n", ret); 1205e2cb1decSSalil Mehta 1206f01f5559SJian Shen return ret; 1207e2cb1decSSalil Mehta } 1208e2cb1decSSalil Mehta 1209e196ec75SJian Shen static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 1210e196ec75SJian Shen bool en_mc_pmc) 1211e2cb1decSSalil Mehta { 1212e196ec75SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1213e196ec75SJian Shen bool en_bc_pmc; 1214e196ec75SJian Shen 1215295ba232SGuangbin Huang en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2; 1216e196ec75SJian Shen 1217e196ec75SJian Shen return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 1218e196ec75SJian Shen en_bc_pmc); 1219e2cb1decSSalil Mehta } 1220e2cb1decSSalil Mehta 1221c631c696SJian Shen static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) 1222c631c696SJian Shen { 1223c631c696SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1224c631c696SJian Shen 1225c631c696SJian Shen set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 12265e7414cdSJian Shen hclgevf_task_schedule(hdev, 0); 1227c631c696SJian Shen } 1228c631c696SJian Shen 1229c631c696SJian Shen static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) 1230c631c696SJian Shen { 1231c631c696SJian Shen struct hnae3_handle *handle = &hdev->nic; 1232c631c696SJian Shen bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; 1233c631c696SJian Shen bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; 1234c631c696SJian Shen int ret; 1235c631c696SJian Shen 1236c631c696SJian Shen if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { 1237c631c696SJian Shen ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); 1238c631c696SJian Shen if (!ret) 1239c631c696SJian Shen clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1240c631c696SJian Shen } 1241c631c696SJian Shen } 1242c631c696SJian Shen 12438fa86551SYufeng Mo static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id, 12448fa86551SYufeng Mo u16 stream_id, bool enable) 1245e2cb1decSSalil Mehta { 1246e2cb1decSSalil Mehta struct hclgevf_cfg_com_tqp_queue_cmd *req; 12476befad60SJie Wang struct hclge_desc desc; 1248e2cb1decSSalil Mehta 1249e2cb1decSSalil Mehta req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1250e2cb1decSSalil Mehta 1251e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1252e2cb1decSSalil Mehta false); 1253e2cb1decSSalil Mehta req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1254e2cb1decSSalil Mehta req->stream_id = cpu_to_le16(stream_id); 1255ebaf1908SWeihang Li if (enable) 1256ebaf1908SWeihang Li req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1257e2cb1decSSalil Mehta 12588fa86551SYufeng Mo return hclgevf_cmd_send(&hdev->hw, &desc, 1); 12598fa86551SYufeng Mo } 1260e2cb1decSSalil Mehta 12618fa86551SYufeng Mo static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable) 12628fa86551SYufeng Mo { 12638fa86551SYufeng Mo struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 12648fa86551SYufeng Mo int ret; 12658fa86551SYufeng Mo u16 i; 12668fa86551SYufeng Mo 12678fa86551SYufeng Mo for (i = 0; i < handle->kinfo.num_tqps; i++) { 12688fa86551SYufeng Mo ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable); 12698fa86551SYufeng Mo if (ret) 12708fa86551SYufeng Mo return ret; 12718fa86551SYufeng Mo } 12728fa86551SYufeng Mo 12738fa86551SYufeng Mo return 0; 1274e2cb1decSSalil Mehta } 1275e2cb1decSSalil Mehta 1276e2cb1decSSalil Mehta static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1277e2cb1decSSalil Mehta { 1278b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1279e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 1280e2cb1decSSalil Mehta int i; 1281e2cb1decSSalil Mehta 1282b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 1283b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1284e2cb1decSSalil Mehta memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1285e2cb1decSSalil Mehta } 1286e2cb1decSSalil Mehta } 1287e2cb1decSSalil Mehta 12888e6de441SHuazhong Tan static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 12898e6de441SHuazhong Tan { 1290d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 12918e6de441SHuazhong Tan u8 host_mac[ETH_ALEN]; 12928e6de441SHuazhong Tan int status; 12938e6de441SHuazhong Tan 1294d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 1295d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 1296d3410018SYufeng Mo ETH_ALEN); 12978e6de441SHuazhong Tan if (status) { 12988e6de441SHuazhong Tan dev_err(&hdev->pdev->dev, 12998e6de441SHuazhong Tan "fail to get VF MAC from host %d", status); 13008e6de441SHuazhong Tan return status; 13018e6de441SHuazhong Tan } 13028e6de441SHuazhong Tan 13038e6de441SHuazhong Tan ether_addr_copy(p, host_mac); 13048e6de441SHuazhong Tan 13058e6de441SHuazhong Tan return 0; 13068e6de441SHuazhong Tan } 13078e6de441SHuazhong Tan 1308e2cb1decSSalil Mehta static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1309e2cb1decSSalil Mehta { 1310e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 13118e6de441SHuazhong Tan u8 host_mac_addr[ETH_ALEN]; 1312e2cb1decSSalil Mehta 13138e6de441SHuazhong Tan if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 13148e6de441SHuazhong Tan return; 13158e6de441SHuazhong Tan 13168e6de441SHuazhong Tan hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 13178e6de441SHuazhong Tan if (hdev->has_pf_mac) 13188e6de441SHuazhong Tan ether_addr_copy(p, host_mac_addr); 13198e6de441SHuazhong Tan else 1320e2cb1decSSalil Mehta ether_addr_copy(p, hdev->hw.mac.mac_addr); 1321e2cb1decSSalil Mehta } 1322e2cb1decSSalil Mehta 132376660757SJakub Kicinski static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p, 132459098055SFuyun Liang bool is_first) 1325e2cb1decSSalil Mehta { 1326e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1327e2cb1decSSalil Mehta u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1328d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1329e2cb1decSSalil Mehta u8 *new_mac_addr = (u8 *)p; 1330e2cb1decSSalil Mehta int status; 1331e2cb1decSSalil Mehta 1332d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 1333ee4bcd3bSJian Shen send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1334d3410018SYufeng Mo ether_addr_copy(send_msg.data, new_mac_addr); 1335ee4bcd3bSJian Shen if (is_first && !hdev->has_pf_mac) 1336ee4bcd3bSJian Shen eth_zero_addr(&send_msg.data[ETH_ALEN]); 1337ee4bcd3bSJian Shen else 1338d3410018SYufeng Mo ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 1339d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1340e2cb1decSSalil Mehta if (!status) 1341e2cb1decSSalil Mehta ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1342e2cb1decSSalil Mehta 1343e2cb1decSSalil Mehta return status; 1344e2cb1decSSalil Mehta } 1345e2cb1decSSalil Mehta 1346ee4bcd3bSJian Shen static struct hclgevf_mac_addr_node * 1347ee4bcd3bSJian Shen hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) 1348ee4bcd3bSJian Shen { 1349ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp; 1350ee4bcd3bSJian Shen 1351ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, list, node) 1352ee4bcd3bSJian Shen if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 1353ee4bcd3bSJian Shen return mac_node; 1354ee4bcd3bSJian Shen 1355ee4bcd3bSJian Shen return NULL; 1356ee4bcd3bSJian Shen } 1357ee4bcd3bSJian Shen 1358ee4bcd3bSJian Shen static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, 1359ee4bcd3bSJian Shen enum HCLGEVF_MAC_NODE_STATE state) 1360ee4bcd3bSJian Shen { 1361ee4bcd3bSJian Shen switch (state) { 1362ee4bcd3bSJian Shen /* from set_rx_mode or tmp_add_list */ 1363ee4bcd3bSJian Shen case HCLGEVF_MAC_TO_ADD: 1364ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_DEL) 1365ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_ACTIVE; 1366ee4bcd3bSJian Shen break; 1367ee4bcd3bSJian Shen /* only from set_rx_mode */ 1368ee4bcd3bSJian Shen case HCLGEVF_MAC_TO_DEL: 1369ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1370ee4bcd3bSJian Shen list_del(&mac_node->node); 1371ee4bcd3bSJian Shen kfree(mac_node); 1372ee4bcd3bSJian Shen } else { 1373ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_TO_DEL; 1374ee4bcd3bSJian Shen } 1375ee4bcd3bSJian Shen break; 1376ee4bcd3bSJian Shen /* only from tmp_add_list, the mac_node->state won't be 1377ee4bcd3bSJian Shen * HCLGEVF_MAC_ACTIVE 1378ee4bcd3bSJian Shen */ 1379ee4bcd3bSJian Shen case HCLGEVF_MAC_ACTIVE: 1380ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1381ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_ACTIVE; 1382ee4bcd3bSJian Shen break; 1383ee4bcd3bSJian Shen } 1384ee4bcd3bSJian Shen } 1385ee4bcd3bSJian Shen 1386ee4bcd3bSJian Shen static int hclgevf_update_mac_list(struct hnae3_handle *handle, 1387ee4bcd3bSJian Shen enum HCLGEVF_MAC_NODE_STATE state, 1388ee4bcd3bSJian Shen enum HCLGEVF_MAC_ADDR_TYPE mac_type, 1389e2cb1decSSalil Mehta const unsigned char *addr) 1390e2cb1decSSalil Mehta { 1391e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1392ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node; 1393ee4bcd3bSJian Shen struct list_head *list; 1394e2cb1decSSalil Mehta 1395ee4bcd3bSJian Shen list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1396ee4bcd3bSJian Shen &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1397ee4bcd3bSJian Shen 1398ee4bcd3bSJian Shen spin_lock_bh(&hdev->mac_table.mac_list_lock); 1399ee4bcd3bSJian Shen 1400ee4bcd3bSJian Shen /* if the mac addr is already in the mac list, no need to add a new 1401ee4bcd3bSJian Shen * one into it, just check the mac addr state, convert it to a new 1402ee4bcd3bSJian Shen * new state, or just remove it, or do nothing. 1403ee4bcd3bSJian Shen */ 1404ee4bcd3bSJian Shen mac_node = hclgevf_find_mac_node(list, addr); 1405ee4bcd3bSJian Shen if (mac_node) { 1406ee4bcd3bSJian Shen hclgevf_update_mac_node(mac_node, state); 1407ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1408ee4bcd3bSJian Shen return 0; 1409ee4bcd3bSJian Shen } 1410ee4bcd3bSJian Shen /* if this address is never added, unnecessary to delete */ 1411ee4bcd3bSJian Shen if (state == HCLGEVF_MAC_TO_DEL) { 1412ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1413ee4bcd3bSJian Shen return -ENOENT; 1414ee4bcd3bSJian Shen } 1415ee4bcd3bSJian Shen 1416ee4bcd3bSJian Shen mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 1417ee4bcd3bSJian Shen if (!mac_node) { 1418ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1419ee4bcd3bSJian Shen return -ENOMEM; 1420ee4bcd3bSJian Shen } 1421ee4bcd3bSJian Shen 1422ee4bcd3bSJian Shen mac_node->state = state; 1423ee4bcd3bSJian Shen ether_addr_copy(mac_node->mac_addr, addr); 1424ee4bcd3bSJian Shen list_add_tail(&mac_node->node, list); 1425ee4bcd3bSJian Shen 1426ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1427ee4bcd3bSJian Shen return 0; 1428ee4bcd3bSJian Shen } 1429ee4bcd3bSJian Shen 1430ee4bcd3bSJian Shen static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1431ee4bcd3bSJian Shen const unsigned char *addr) 1432ee4bcd3bSJian Shen { 1433ee4bcd3bSJian Shen return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1434ee4bcd3bSJian Shen HCLGEVF_MAC_ADDR_UC, addr); 1435e2cb1decSSalil Mehta } 1436e2cb1decSSalil Mehta 1437e2cb1decSSalil Mehta static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1438e2cb1decSSalil Mehta const unsigned char *addr) 1439e2cb1decSSalil Mehta { 1440ee4bcd3bSJian Shen return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1441ee4bcd3bSJian Shen HCLGEVF_MAC_ADDR_UC, addr); 1442e2cb1decSSalil Mehta } 1443e2cb1decSSalil Mehta 1444e2cb1decSSalil Mehta static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1445e2cb1decSSalil Mehta const unsigned char *addr) 1446e2cb1decSSalil Mehta { 1447ee4bcd3bSJian Shen return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1448ee4bcd3bSJian Shen HCLGEVF_MAC_ADDR_MC, addr); 1449e2cb1decSSalil Mehta } 1450e2cb1decSSalil Mehta 1451e2cb1decSSalil Mehta static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1452e2cb1decSSalil Mehta const unsigned char *addr) 1453e2cb1decSSalil Mehta { 1454ee4bcd3bSJian Shen return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1455ee4bcd3bSJian Shen HCLGEVF_MAC_ADDR_MC, addr); 1456ee4bcd3bSJian Shen } 1457e2cb1decSSalil Mehta 1458ee4bcd3bSJian Shen static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, 1459ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, 1460ee4bcd3bSJian Shen enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1461ee4bcd3bSJian Shen { 1462ee4bcd3bSJian Shen struct hclge_vf_to_pf_msg send_msg; 1463ee4bcd3bSJian Shen u8 code, subcode; 1464ee4bcd3bSJian Shen 1465ee4bcd3bSJian Shen if (mac_type == HCLGEVF_MAC_ADDR_UC) { 1466ee4bcd3bSJian Shen code = HCLGE_MBX_SET_UNICAST; 1467ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1468ee4bcd3bSJian Shen subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; 1469ee4bcd3bSJian Shen else 1470ee4bcd3bSJian Shen subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; 1471ee4bcd3bSJian Shen } else { 1472ee4bcd3bSJian Shen code = HCLGE_MBX_SET_MULTICAST; 1473ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1474ee4bcd3bSJian Shen subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; 1475ee4bcd3bSJian Shen else 1476ee4bcd3bSJian Shen subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; 1477ee4bcd3bSJian Shen } 1478ee4bcd3bSJian Shen 1479ee4bcd3bSJian Shen hclgevf_build_send_msg(&send_msg, code, subcode); 1480ee4bcd3bSJian Shen ether_addr_copy(send_msg.data, mac_node->mac_addr); 1481d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1482e2cb1decSSalil Mehta } 1483e2cb1decSSalil Mehta 1484ee4bcd3bSJian Shen static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, 1485ee4bcd3bSJian Shen struct list_head *list, 1486ee4bcd3bSJian Shen enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1487ee4bcd3bSJian Shen { 14884f331fdaSYufeng Mo char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 1489ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp; 1490ee4bcd3bSJian Shen int ret; 1491ee4bcd3bSJian Shen 1492ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, list, node) { 1493ee4bcd3bSJian Shen ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); 1494ee4bcd3bSJian Shen if (ret) { 14954f331fdaSYufeng Mo hnae3_format_mac_addr(format_mac_addr, 14964f331fdaSYufeng Mo mac_node->mac_addr); 1497ee4bcd3bSJian Shen dev_err(&hdev->pdev->dev, 14984f331fdaSYufeng Mo "failed to configure mac %s, state = %d, ret = %d\n", 14994f331fdaSYufeng Mo format_mac_addr, mac_node->state, ret); 1500ee4bcd3bSJian Shen return; 1501ee4bcd3bSJian Shen } 1502ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1503ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_ACTIVE; 1504ee4bcd3bSJian Shen } else { 1505ee4bcd3bSJian Shen list_del(&mac_node->node); 1506ee4bcd3bSJian Shen kfree(mac_node); 1507ee4bcd3bSJian Shen } 1508ee4bcd3bSJian Shen } 1509ee4bcd3bSJian Shen } 1510ee4bcd3bSJian Shen 1511ee4bcd3bSJian Shen static void hclgevf_sync_from_add_list(struct list_head *add_list, 1512ee4bcd3bSJian Shen struct list_head *mac_list) 1513ee4bcd3bSJian Shen { 1514ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1515ee4bcd3bSJian Shen 1516ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, add_list, node) { 1517ee4bcd3bSJian Shen /* if the mac address from tmp_add_list is not in the 1518ee4bcd3bSJian Shen * uc/mc_mac_list, it means have received a TO_DEL request 1519ee4bcd3bSJian Shen * during the time window of sending mac config request to PF 1520ee4bcd3bSJian Shen * If mac_node state is ACTIVE, then change its state to TO_DEL, 1521ee4bcd3bSJian Shen * then it will be removed at next time. If is TO_ADD, it means 1522ee4bcd3bSJian Shen * send TO_ADD request failed, so just remove the mac node. 1523ee4bcd3bSJian Shen */ 1524ee4bcd3bSJian Shen new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1525ee4bcd3bSJian Shen if (new_node) { 1526ee4bcd3bSJian Shen hclgevf_update_mac_node(new_node, mac_node->state); 1527ee4bcd3bSJian Shen list_del(&mac_node->node); 1528ee4bcd3bSJian Shen kfree(mac_node); 1529ee4bcd3bSJian Shen } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { 1530ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_TO_DEL; 153149768ce9SBaokun Li list_move_tail(&mac_node->node, mac_list); 1532ee4bcd3bSJian Shen } else { 1533ee4bcd3bSJian Shen list_del(&mac_node->node); 1534ee4bcd3bSJian Shen kfree(mac_node); 1535ee4bcd3bSJian Shen } 1536ee4bcd3bSJian Shen } 1537ee4bcd3bSJian Shen } 1538ee4bcd3bSJian Shen 1539ee4bcd3bSJian Shen static void hclgevf_sync_from_del_list(struct list_head *del_list, 1540ee4bcd3bSJian Shen struct list_head *mac_list) 1541ee4bcd3bSJian Shen { 1542ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1543ee4bcd3bSJian Shen 1544ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, del_list, node) { 1545ee4bcd3bSJian Shen new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1546ee4bcd3bSJian Shen if (new_node) { 1547ee4bcd3bSJian Shen /* If the mac addr is exist in the mac list, it means 1548ee4bcd3bSJian Shen * received a new request TO_ADD during the time window 1549ee4bcd3bSJian Shen * of sending mac addr configurrequest to PF, so just 1550ee4bcd3bSJian Shen * change the mac state to ACTIVE. 1551ee4bcd3bSJian Shen */ 1552ee4bcd3bSJian Shen new_node->state = HCLGEVF_MAC_ACTIVE; 1553ee4bcd3bSJian Shen list_del(&mac_node->node); 1554ee4bcd3bSJian Shen kfree(mac_node); 1555ee4bcd3bSJian Shen } else { 155649768ce9SBaokun Li list_move_tail(&mac_node->node, mac_list); 1557ee4bcd3bSJian Shen } 1558ee4bcd3bSJian Shen } 1559ee4bcd3bSJian Shen } 1560ee4bcd3bSJian Shen 1561ee4bcd3bSJian Shen static void hclgevf_clear_list(struct list_head *list) 1562ee4bcd3bSJian Shen { 1563ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp; 1564ee4bcd3bSJian Shen 1565ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, list, node) { 1566ee4bcd3bSJian Shen list_del(&mac_node->node); 1567ee4bcd3bSJian Shen kfree(mac_node); 1568ee4bcd3bSJian Shen } 1569ee4bcd3bSJian Shen } 1570ee4bcd3bSJian Shen 1571ee4bcd3bSJian Shen static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, 1572ee4bcd3bSJian Shen enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1573ee4bcd3bSJian Shen { 1574ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1575ee4bcd3bSJian Shen struct list_head tmp_add_list, tmp_del_list; 1576ee4bcd3bSJian Shen struct list_head *list; 1577ee4bcd3bSJian Shen 1578ee4bcd3bSJian Shen INIT_LIST_HEAD(&tmp_add_list); 1579ee4bcd3bSJian Shen INIT_LIST_HEAD(&tmp_del_list); 1580ee4bcd3bSJian Shen 1581ee4bcd3bSJian Shen /* move the mac addr to the tmp_add_list and tmp_del_list, then 1582ee4bcd3bSJian Shen * we can add/delete these mac addr outside the spin lock 1583ee4bcd3bSJian Shen */ 1584ee4bcd3bSJian Shen list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1585ee4bcd3bSJian Shen &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1586ee4bcd3bSJian Shen 1587ee4bcd3bSJian Shen spin_lock_bh(&hdev->mac_table.mac_list_lock); 1588ee4bcd3bSJian Shen 1589ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, list, node) { 1590ee4bcd3bSJian Shen switch (mac_node->state) { 1591ee4bcd3bSJian Shen case HCLGEVF_MAC_TO_DEL: 159249768ce9SBaokun Li list_move_tail(&mac_node->node, &tmp_del_list); 1593ee4bcd3bSJian Shen break; 1594ee4bcd3bSJian Shen case HCLGEVF_MAC_TO_ADD: 1595ee4bcd3bSJian Shen new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 1596ee4bcd3bSJian Shen if (!new_node) 1597ee4bcd3bSJian Shen goto stop_traverse; 1598ee4bcd3bSJian Shen 1599ee4bcd3bSJian Shen ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 1600ee4bcd3bSJian Shen new_node->state = mac_node->state; 1601ee4bcd3bSJian Shen list_add_tail(&new_node->node, &tmp_add_list); 1602ee4bcd3bSJian Shen break; 1603ee4bcd3bSJian Shen default: 1604ee4bcd3bSJian Shen break; 1605ee4bcd3bSJian Shen } 1606ee4bcd3bSJian Shen } 1607ee4bcd3bSJian Shen 1608ee4bcd3bSJian Shen stop_traverse: 1609ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1610ee4bcd3bSJian Shen 1611ee4bcd3bSJian Shen /* delete first, in order to get max mac table space for adding */ 1612ee4bcd3bSJian Shen hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); 1613ee4bcd3bSJian Shen hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); 1614ee4bcd3bSJian Shen 1615ee4bcd3bSJian Shen /* if some mac addresses were added/deleted fail, move back to the 1616ee4bcd3bSJian Shen * mac_list, and retry at next time. 1617ee4bcd3bSJian Shen */ 1618ee4bcd3bSJian Shen spin_lock_bh(&hdev->mac_table.mac_list_lock); 1619ee4bcd3bSJian Shen 1620ee4bcd3bSJian Shen hclgevf_sync_from_del_list(&tmp_del_list, list); 1621ee4bcd3bSJian Shen hclgevf_sync_from_add_list(&tmp_add_list, list); 1622ee4bcd3bSJian Shen 1623ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1624ee4bcd3bSJian Shen } 1625ee4bcd3bSJian Shen 1626ee4bcd3bSJian Shen static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) 1627ee4bcd3bSJian Shen { 1628ee4bcd3bSJian Shen hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); 1629ee4bcd3bSJian Shen hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); 1630ee4bcd3bSJian Shen } 1631ee4bcd3bSJian Shen 1632ee4bcd3bSJian Shen static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) 1633ee4bcd3bSJian Shen { 1634ee4bcd3bSJian Shen spin_lock_bh(&hdev->mac_table.mac_list_lock); 1635ee4bcd3bSJian Shen 1636ee4bcd3bSJian Shen hclgevf_clear_list(&hdev->mac_table.uc_mac_list); 1637ee4bcd3bSJian Shen hclgevf_clear_list(&hdev->mac_table.mc_mac_list); 1638ee4bcd3bSJian Shen 1639ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1640ee4bcd3bSJian Shen } 1641ee4bcd3bSJian Shen 1642fa6a262aSJian Shen static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 1643fa6a262aSJian Shen { 1644fa6a262aSJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1645fa6a262aSJian Shen struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 1646fa6a262aSJian Shen struct hclge_vf_to_pf_msg send_msg; 1647fa6a262aSJian Shen 1648fa6a262aSJian Shen if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) 1649fa6a262aSJian Shen return -EOPNOTSUPP; 1650fa6a262aSJian Shen 1651fa6a262aSJian Shen hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1652fa6a262aSJian Shen HCLGE_MBX_ENABLE_VLAN_FILTER); 1653fa6a262aSJian Shen send_msg.data[0] = enable ? 1 : 0; 1654fa6a262aSJian Shen 1655fa6a262aSJian Shen return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1656fa6a262aSJian Shen } 1657fa6a262aSJian Shen 1658e2cb1decSSalil Mehta static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1659e2cb1decSSalil Mehta __be16 proto, u16 vlan_id, 1660e2cb1decSSalil Mehta bool is_kill) 1661e2cb1decSSalil Mehta { 1662d3410018SYufeng Mo #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 1663d3410018SYufeng Mo #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 1664d3410018SYufeng Mo #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 1665d3410018SYufeng Mo 1666e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1667d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1668fe4144d4SJian Shen int ret; 1669e2cb1decSSalil Mehta 1670b37ce587SYufeng Mo if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1671e2cb1decSSalil Mehta return -EINVAL; 1672e2cb1decSSalil Mehta 1673e2cb1decSSalil Mehta if (proto != htons(ETH_P_8021Q)) 1674e2cb1decSSalil Mehta return -EPROTONOSUPPORT; 1675e2cb1decSSalil Mehta 1676b7b5d25bSGuojia Liao /* When device is resetting or reset failed, firmware is unable to 1677b7b5d25bSGuojia Liao * handle mailbox. Just record the vlan id, and remove it after 1678fe4144d4SJian Shen * reset finished. 1679fe4144d4SJian Shen */ 1680b7b5d25bSGuojia Liao if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 1681b7b5d25bSGuojia Liao test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { 1682fe4144d4SJian Shen set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1683fe4144d4SJian Shen return -EBUSY; 1684fe4144d4SJian Shen } 1685fe4144d4SJian Shen 1686d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1687d3410018SYufeng Mo HCLGE_MBX_VLAN_FILTER); 1688d3410018SYufeng Mo send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; 1689d3410018SYufeng Mo memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, 1690d3410018SYufeng Mo sizeof(vlan_id)); 1691d3410018SYufeng Mo memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, 1692d3410018SYufeng Mo sizeof(proto)); 169346ee7350SGuojia Liao /* when remove hw vlan filter failed, record the vlan id, 1694fe4144d4SJian Shen * and try to remove it from hw later, to be consistence 1695fe4144d4SJian Shen * with stack. 1696fe4144d4SJian Shen */ 1697d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1698fe4144d4SJian Shen if (is_kill && ret) 1699fe4144d4SJian Shen set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1700fe4144d4SJian Shen 1701fe4144d4SJian Shen return ret; 1702fe4144d4SJian Shen } 1703fe4144d4SJian Shen 1704fe4144d4SJian Shen static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1705fe4144d4SJian Shen { 1706fe4144d4SJian Shen #define HCLGEVF_MAX_SYNC_COUNT 60 1707fe4144d4SJian Shen struct hnae3_handle *handle = &hdev->nic; 1708fe4144d4SJian Shen int ret, sync_cnt = 0; 1709fe4144d4SJian Shen u16 vlan_id; 1710fe4144d4SJian Shen 1711fe4144d4SJian Shen vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1712fe4144d4SJian Shen while (vlan_id != VLAN_N_VID) { 1713fe4144d4SJian Shen ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1714fe4144d4SJian Shen vlan_id, true); 1715fe4144d4SJian Shen if (ret) 1716fe4144d4SJian Shen return; 1717fe4144d4SJian Shen 1718fe4144d4SJian Shen clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1719fe4144d4SJian Shen sync_cnt++; 1720fe4144d4SJian Shen if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1721fe4144d4SJian Shen return; 1722fe4144d4SJian Shen 1723fe4144d4SJian Shen vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1724fe4144d4SJian Shen } 1725e2cb1decSSalil Mehta } 1726e2cb1decSSalil Mehta 1727b2641e2aSYunsheng Lin static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1728b2641e2aSYunsheng Lin { 1729b2641e2aSYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1730d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1731b2641e2aSYunsheng Lin 1732d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1733d3410018SYufeng Mo HCLGE_MBX_VLAN_RX_OFF_CFG); 1734d3410018SYufeng Mo send_msg.data[0] = enable ? 1 : 0; 1735d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1736b2641e2aSYunsheng Lin } 1737b2641e2aSYunsheng Lin 17388fa86551SYufeng Mo static int hclgevf_reset_tqp(struct hnae3_handle *handle) 1739e2cb1decSSalil Mehta { 17408fa86551SYufeng Mo #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U 1741e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1742d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 17438fa86551SYufeng Mo u8 return_status = 0; 17441a426f8bSPeng Li int ret; 17458fa86551SYufeng Mo u16 i; 1746e2cb1decSSalil Mehta 17471a426f8bSPeng Li /* disable vf queue before send queue reset msg to PF */ 17488fa86551SYufeng Mo ret = hclgevf_tqp_enable(handle, false); 17498fa86551SYufeng Mo if (ret) { 17508fa86551SYufeng Mo dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n", 17518fa86551SYufeng Mo ret); 17527fa6be4fSHuazhong Tan return ret; 17538fa86551SYufeng Mo } 17541a426f8bSPeng Li 1755d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 17568fa86551SYufeng Mo 17578fa86551SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status, 17588fa86551SYufeng Mo sizeof(return_status)); 17598fa86551SYufeng Mo if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE) 17608fa86551SYufeng Mo return ret; 17618fa86551SYufeng Mo 17628fa86551SYufeng Mo for (i = 1; i < handle->kinfo.num_tqps; i++) { 17638fa86551SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 17648fa86551SYufeng Mo memcpy(send_msg.data, &i, sizeof(i)); 17658fa86551SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 17668fa86551SYufeng Mo if (ret) 17678fa86551SYufeng Mo return ret; 17688fa86551SYufeng Mo } 17698fa86551SYufeng Mo 17708fa86551SYufeng Mo return 0; 1771e2cb1decSSalil Mehta } 1772e2cb1decSSalil Mehta 1773818f1675SYunsheng Lin static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1774818f1675SYunsheng Lin { 1775818f1675SYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1776d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1777818f1675SYunsheng Lin 1778d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1779d3410018SYufeng Mo memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); 1780d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1781818f1675SYunsheng Lin } 1782818f1675SYunsheng Lin 17836988eb2aSSalil Mehta static int hclgevf_notify_client(struct hclgevf_dev *hdev, 17846988eb2aSSalil Mehta enum hnae3_reset_notify_type type) 17856988eb2aSSalil Mehta { 17866988eb2aSSalil Mehta struct hnae3_client *client = hdev->nic_client; 17876988eb2aSSalil Mehta struct hnae3_handle *handle = &hdev->nic; 17886a5f6fa3SHuazhong Tan int ret; 17896988eb2aSSalil Mehta 179025d1817cSHuazhong Tan if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 179125d1817cSHuazhong Tan !client) 179225d1817cSHuazhong Tan return 0; 179325d1817cSHuazhong Tan 17946988eb2aSSalil Mehta if (!client->ops->reset_notify) 17956988eb2aSSalil Mehta return -EOPNOTSUPP; 17966988eb2aSSalil Mehta 17976a5f6fa3SHuazhong Tan ret = client->ops->reset_notify(handle, type); 17986a5f6fa3SHuazhong Tan if (ret) 17996a5f6fa3SHuazhong Tan dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 18006a5f6fa3SHuazhong Tan type, ret); 18016a5f6fa3SHuazhong Tan 18026a5f6fa3SHuazhong Tan return ret; 18036988eb2aSSalil Mehta } 18046988eb2aSSalil Mehta 1805fe735c84SHuazhong Tan static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev, 1806fe735c84SHuazhong Tan enum hnae3_reset_notify_type type) 1807fe735c84SHuazhong Tan { 1808fe735c84SHuazhong Tan struct hnae3_client *client = hdev->roce_client; 1809fe735c84SHuazhong Tan struct hnae3_handle *handle = &hdev->roce; 1810fe735c84SHuazhong Tan int ret; 1811fe735c84SHuazhong Tan 1812fe735c84SHuazhong Tan if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client) 1813fe735c84SHuazhong Tan return 0; 1814fe735c84SHuazhong Tan 1815fe735c84SHuazhong Tan if (!client->ops->reset_notify) 1816fe735c84SHuazhong Tan return -EOPNOTSUPP; 1817fe735c84SHuazhong Tan 1818fe735c84SHuazhong Tan ret = client->ops->reset_notify(handle, type); 1819fe735c84SHuazhong Tan if (ret) 1820fe735c84SHuazhong Tan dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", 1821fe735c84SHuazhong Tan type, ret); 1822fe735c84SHuazhong Tan return ret; 1823fe735c84SHuazhong Tan } 1824fe735c84SHuazhong Tan 18256988eb2aSSalil Mehta static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 18266988eb2aSSalil Mehta { 1827aa5c4f17SHuazhong Tan #define HCLGEVF_RESET_WAIT_US 20000 1828aa5c4f17SHuazhong Tan #define HCLGEVF_RESET_WAIT_CNT 2000 1829aa5c4f17SHuazhong Tan #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1830aa5c4f17SHuazhong Tan (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1831aa5c4f17SHuazhong Tan 1832aa5c4f17SHuazhong Tan u32 val; 1833aa5c4f17SHuazhong Tan int ret; 18346988eb2aSSalil Mehta 1835f28368bbSHuazhong Tan if (hdev->reset_type == HNAE3_VF_RESET) 1836076bb537SJie Wang ret = readl_poll_timeout(hdev->hw.hw.io_base + 183772e2fb07SHuazhong Tan HCLGEVF_VF_RST_ING, val, 183872e2fb07SHuazhong Tan !(val & HCLGEVF_VF_RST_ING_BIT), 183972e2fb07SHuazhong Tan HCLGEVF_RESET_WAIT_US, 184072e2fb07SHuazhong Tan HCLGEVF_RESET_WAIT_TIMEOUT_US); 184172e2fb07SHuazhong Tan else 1842076bb537SJie Wang ret = readl_poll_timeout(hdev->hw.hw.io_base + 184372e2fb07SHuazhong Tan HCLGEVF_RST_ING, val, 1844aa5c4f17SHuazhong Tan !(val & HCLGEVF_RST_ING_BITS), 1845aa5c4f17SHuazhong Tan HCLGEVF_RESET_WAIT_US, 1846aa5c4f17SHuazhong Tan HCLGEVF_RESET_WAIT_TIMEOUT_US); 18476988eb2aSSalil Mehta 18486988eb2aSSalil Mehta /* hardware completion status should be available by this time */ 1849aa5c4f17SHuazhong Tan if (ret) { 1850aa5c4f17SHuazhong Tan dev_err(&hdev->pdev->dev, 18518912fd6aSColin Ian King "couldn't get reset done status from h/w, timeout!\n"); 1852aa5c4f17SHuazhong Tan return ret; 18536988eb2aSSalil Mehta } 18546988eb2aSSalil Mehta 18556988eb2aSSalil Mehta /* we will wait a bit more to let reset of the stack to complete. This 18566988eb2aSSalil Mehta * might happen in case reset assertion was made by PF. Yes, this also 18576988eb2aSSalil Mehta * means we might end up waiting bit more even for VF reset. 18586988eb2aSSalil Mehta */ 18596988eb2aSSalil Mehta msleep(5000); 18606988eb2aSSalil Mehta 18616988eb2aSSalil Mehta return 0; 18626988eb2aSSalil Mehta } 18636988eb2aSSalil Mehta 18646b428b4fSHuazhong Tan static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 18656b428b4fSHuazhong Tan { 18666b428b4fSHuazhong Tan u32 reg_val; 18676b428b4fSHuazhong Tan 1868cb413bfaSJie Wang reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); 18696b428b4fSHuazhong Tan if (enable) 18706b428b4fSHuazhong Tan reg_val |= HCLGEVF_NIC_SW_RST_RDY; 18716b428b4fSHuazhong Tan else 18726b428b4fSHuazhong Tan reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 18736b428b4fSHuazhong Tan 1874cb413bfaSJie Wang hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 18756b428b4fSHuazhong Tan reg_val); 18766b428b4fSHuazhong Tan } 18776b428b4fSHuazhong Tan 18786988eb2aSSalil Mehta static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 18796988eb2aSSalil Mehta { 18807a01c897SSalil Mehta int ret; 18817a01c897SSalil Mehta 18826988eb2aSSalil Mehta /* uninitialize the nic client */ 18836a5f6fa3SHuazhong Tan ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 18846a5f6fa3SHuazhong Tan if (ret) 18856a5f6fa3SHuazhong Tan return ret; 18866988eb2aSSalil Mehta 18877a01c897SSalil Mehta /* re-initialize the hclge device */ 18889c6f7085SHuazhong Tan ret = hclgevf_reset_hdev(hdev); 18897a01c897SSalil Mehta if (ret) { 18907a01c897SSalil Mehta dev_err(&hdev->pdev->dev, 18917a01c897SSalil Mehta "hclge device re-init failed, VF is disabled!\n"); 18927a01c897SSalil Mehta return ret; 18937a01c897SSalil Mehta } 18946988eb2aSSalil Mehta 18956988eb2aSSalil Mehta /* bring up the nic client again */ 18966a5f6fa3SHuazhong Tan ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 18976a5f6fa3SHuazhong Tan if (ret) 18986a5f6fa3SHuazhong Tan return ret; 18996988eb2aSSalil Mehta 19006b428b4fSHuazhong Tan /* clear handshake status with IMP */ 19016b428b4fSHuazhong Tan hclgevf_reset_handshake(hdev, false); 19026b428b4fSHuazhong Tan 19031cc9bc6eSHuazhong Tan /* bring up the nic to enable TX/RX again */ 19041cc9bc6eSHuazhong Tan return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 19056988eb2aSSalil Mehta } 19066988eb2aSSalil Mehta 1907dea846e8SHuazhong Tan static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1908dea846e8SHuazhong Tan { 1909ada13ee3SHuazhong Tan #define HCLGEVF_RESET_SYNC_TIME 100 1910ada13ee3SHuazhong Tan 1911f28368bbSHuazhong Tan if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1912d41884eeSHuazhong Tan struct hclge_vf_to_pf_msg send_msg; 1913d41884eeSHuazhong Tan int ret; 1914d41884eeSHuazhong Tan 1915d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1916d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1917cddd5648SHuazhong Tan if (ret) { 1918cddd5648SHuazhong Tan dev_err(&hdev->pdev->dev, 1919cddd5648SHuazhong Tan "failed to assert VF reset, ret = %d\n", ret); 1920cddd5648SHuazhong Tan return ret; 1921cddd5648SHuazhong Tan } 1922c88a6e7dSHuazhong Tan hdev->rst_stats.vf_func_rst_cnt++; 1923dea846e8SHuazhong Tan } 1924dea846e8SHuazhong Tan 1925076bb537SJie Wang set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 1926ada13ee3SHuazhong Tan /* inform hardware that preparatory work is done */ 1927ada13ee3SHuazhong Tan msleep(HCLGEVF_RESET_SYNC_TIME); 19286b428b4fSHuazhong Tan hclgevf_reset_handshake(hdev, true); 1929d41884eeSHuazhong Tan dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n", 1930d41884eeSHuazhong Tan hdev->reset_type); 1931dea846e8SHuazhong Tan 1932d41884eeSHuazhong Tan return 0; 1933dea846e8SHuazhong Tan } 1934dea846e8SHuazhong Tan 19353d77d0cbSHuazhong Tan static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 19363d77d0cbSHuazhong Tan { 19373d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 19383d77d0cbSHuazhong Tan hdev->rst_stats.vf_func_rst_cnt); 19393d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 19403d77d0cbSHuazhong Tan hdev->rst_stats.flr_rst_cnt); 19413d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 19423d77d0cbSHuazhong Tan hdev->rst_stats.vf_rst_cnt); 19433d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "reset done count: %u\n", 19443d77d0cbSHuazhong Tan hdev->rst_stats.rst_done_cnt); 19453d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 19463d77d0cbSHuazhong Tan hdev->rst_stats.hw_rst_done_cnt); 19473d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "reset count: %u\n", 19483d77d0cbSHuazhong Tan hdev->rst_stats.rst_cnt); 19493d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 19503d77d0cbSHuazhong Tan hdev->rst_stats.rst_fail_cnt); 19513d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 19523d77d0cbSHuazhong Tan hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 19533d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 1954cb413bfaSJie Wang hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG)); 19553d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1956cb413bfaSJie Wang hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG)); 19573d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 19583d77d0cbSHuazhong Tan hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 19593d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 19603d77d0cbSHuazhong Tan } 19613d77d0cbSHuazhong Tan 1962bbe6540eSHuazhong Tan static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1963bbe6540eSHuazhong Tan { 19646b428b4fSHuazhong Tan /* recover handshake status with IMP when reset fail */ 19656b428b4fSHuazhong Tan hclgevf_reset_handshake(hdev, true); 1966bbe6540eSHuazhong Tan hdev->rst_stats.rst_fail_cnt++; 1967adcf738bSGuojia Liao dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1968bbe6540eSHuazhong Tan hdev->rst_stats.rst_fail_cnt); 1969bbe6540eSHuazhong Tan 1970bbe6540eSHuazhong Tan if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1971bbe6540eSHuazhong Tan set_bit(hdev->reset_type, &hdev->reset_pending); 1972bbe6540eSHuazhong Tan 1973bbe6540eSHuazhong Tan if (hclgevf_is_reset_pending(hdev)) { 1974bbe6540eSHuazhong Tan set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1975bbe6540eSHuazhong Tan hclgevf_reset_task_schedule(hdev); 19763d77d0cbSHuazhong Tan } else { 1977d5432455SGuojia Liao set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 19783d77d0cbSHuazhong Tan hclgevf_dump_rst_info(hdev); 1979bbe6540eSHuazhong Tan } 1980bbe6540eSHuazhong Tan } 1981bbe6540eSHuazhong Tan 19821cc9bc6eSHuazhong Tan static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 19836988eb2aSSalil Mehta { 19846988eb2aSSalil Mehta int ret; 19856988eb2aSSalil Mehta 1986c88a6e7dSHuazhong Tan hdev->rst_stats.rst_cnt++; 19876988eb2aSSalil Mehta 1988fe735c84SHuazhong Tan /* perform reset of the stack & ae device for a client */ 1989fe735c84SHuazhong Tan ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); 1990fe735c84SHuazhong Tan if (ret) 1991fe735c84SHuazhong Tan return ret; 1992fe735c84SHuazhong Tan 19931cc9bc6eSHuazhong Tan rtnl_lock(); 19946988eb2aSSalil Mehta /* bring down the nic to stop any ongoing TX/RX */ 19956a5f6fa3SHuazhong Tan ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 199629118ab9SHuazhong Tan rtnl_unlock(); 19976a5f6fa3SHuazhong Tan if (ret) 19981cc9bc6eSHuazhong Tan return ret; 1999dea846e8SHuazhong Tan 20001cc9bc6eSHuazhong Tan return hclgevf_reset_prepare_wait(hdev); 20016988eb2aSSalil Mehta } 20026988eb2aSSalil Mehta 20031cc9bc6eSHuazhong Tan static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 20041cc9bc6eSHuazhong Tan { 20051cc9bc6eSHuazhong Tan int ret; 20061cc9bc6eSHuazhong Tan 2007c88a6e7dSHuazhong Tan hdev->rst_stats.hw_rst_done_cnt++; 2008fe735c84SHuazhong Tan ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); 2009fe735c84SHuazhong Tan if (ret) 2010fe735c84SHuazhong Tan return ret; 2011c88a6e7dSHuazhong Tan 201229118ab9SHuazhong Tan rtnl_lock(); 20136988eb2aSSalil Mehta /* now, re-initialize the nic client and ae device */ 20146988eb2aSSalil Mehta ret = hclgevf_reset_stack(hdev); 20151cc9bc6eSHuazhong Tan rtnl_unlock(); 20166a5f6fa3SHuazhong Tan if (ret) { 20176988eb2aSSalil Mehta dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 20181cc9bc6eSHuazhong Tan return ret; 20196a5f6fa3SHuazhong Tan } 20206988eb2aSSalil Mehta 2021fe735c84SHuazhong Tan ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT); 2022fe735c84SHuazhong Tan /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1 2023fe735c84SHuazhong Tan * times 2024fe735c84SHuazhong Tan */ 2025fe735c84SHuazhong Tan if (ret && 2026fe735c84SHuazhong Tan hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1) 2027fe735c84SHuazhong Tan return ret; 2028fe735c84SHuazhong Tan 2029fe735c84SHuazhong Tan ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT); 2030fe735c84SHuazhong Tan if (ret) 2031fe735c84SHuazhong Tan return ret; 2032fe735c84SHuazhong Tan 2033b644a8d4SHuazhong Tan hdev->last_reset_time = jiffies; 2034c88a6e7dSHuazhong Tan hdev->rst_stats.rst_done_cnt++; 2035bbe6540eSHuazhong Tan hdev->rst_stats.rst_fail_cnt = 0; 2036d5432455SGuojia Liao clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2037b644a8d4SHuazhong Tan 20381cc9bc6eSHuazhong Tan return 0; 20391cc9bc6eSHuazhong Tan } 20401cc9bc6eSHuazhong Tan 20411cc9bc6eSHuazhong Tan static void hclgevf_reset(struct hclgevf_dev *hdev) 20421cc9bc6eSHuazhong Tan { 20431cc9bc6eSHuazhong Tan if (hclgevf_reset_prepare(hdev)) 20441cc9bc6eSHuazhong Tan goto err_reset; 20451cc9bc6eSHuazhong Tan 20461cc9bc6eSHuazhong Tan /* check if VF could successfully fetch the hardware reset completion 20471cc9bc6eSHuazhong Tan * status from the hardware 20481cc9bc6eSHuazhong Tan */ 20491cc9bc6eSHuazhong Tan if (hclgevf_reset_wait(hdev)) { 20501cc9bc6eSHuazhong Tan /* can't do much in this situation, will disable VF */ 20511cc9bc6eSHuazhong Tan dev_err(&hdev->pdev->dev, 20521cc9bc6eSHuazhong Tan "failed to fetch H/W reset completion status\n"); 20531cc9bc6eSHuazhong Tan goto err_reset; 20541cc9bc6eSHuazhong Tan } 20551cc9bc6eSHuazhong Tan 20561cc9bc6eSHuazhong Tan if (hclgevf_reset_rebuild(hdev)) 20571cc9bc6eSHuazhong Tan goto err_reset; 20581cc9bc6eSHuazhong Tan 20591cc9bc6eSHuazhong Tan return; 20601cc9bc6eSHuazhong Tan 20616a5f6fa3SHuazhong Tan err_reset: 2062bbe6540eSHuazhong Tan hclgevf_reset_err_handle(hdev); 20636988eb2aSSalil Mehta } 20646988eb2aSSalil Mehta 2065720bd583SHuazhong Tan static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 2066720bd583SHuazhong Tan unsigned long *addr) 2067720bd583SHuazhong Tan { 2068720bd583SHuazhong Tan enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 2069720bd583SHuazhong Tan 2070dea846e8SHuazhong Tan /* return the highest priority reset level amongst all */ 2071b90fcc5bSHuazhong Tan if (test_bit(HNAE3_VF_RESET, addr)) { 2072b90fcc5bSHuazhong Tan rst_level = HNAE3_VF_RESET; 2073b90fcc5bSHuazhong Tan clear_bit(HNAE3_VF_RESET, addr); 2074b90fcc5bSHuazhong Tan clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 2075b90fcc5bSHuazhong Tan clear_bit(HNAE3_VF_FUNC_RESET, addr); 2076b90fcc5bSHuazhong Tan } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 2077dea846e8SHuazhong Tan rst_level = HNAE3_VF_FULL_RESET; 2078dea846e8SHuazhong Tan clear_bit(HNAE3_VF_FULL_RESET, addr); 2079dea846e8SHuazhong Tan clear_bit(HNAE3_VF_FUNC_RESET, addr); 2080aa5c4f17SHuazhong Tan } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 2081aa5c4f17SHuazhong Tan rst_level = HNAE3_VF_PF_FUNC_RESET; 2082aa5c4f17SHuazhong Tan clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 2083aa5c4f17SHuazhong Tan clear_bit(HNAE3_VF_FUNC_RESET, addr); 2084dea846e8SHuazhong Tan } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 2085dea846e8SHuazhong Tan rst_level = HNAE3_VF_FUNC_RESET; 2086dea846e8SHuazhong Tan clear_bit(HNAE3_VF_FUNC_RESET, addr); 20876ff3cf07SHuazhong Tan } else if (test_bit(HNAE3_FLR_RESET, addr)) { 20886ff3cf07SHuazhong Tan rst_level = HNAE3_FLR_RESET; 20896ff3cf07SHuazhong Tan clear_bit(HNAE3_FLR_RESET, addr); 2090720bd583SHuazhong Tan } 2091720bd583SHuazhong Tan 2092720bd583SHuazhong Tan return rst_level; 2093720bd583SHuazhong Tan } 2094720bd583SHuazhong Tan 20956ae4e733SShiju Jose static void hclgevf_reset_event(struct pci_dev *pdev, 20966ae4e733SShiju Jose struct hnae3_handle *handle) 20976d4c3981SSalil Mehta { 20986ff3cf07SHuazhong Tan struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 20996ff3cf07SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 21006d4c3981SSalil Mehta 21016d4c3981SSalil Mehta dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 21026d4c3981SSalil Mehta 21036ff3cf07SHuazhong Tan if (hdev->default_reset_request) 21040742ed7cSHuazhong Tan hdev->reset_level = 2105720bd583SHuazhong Tan hclgevf_get_reset_level(hdev, 2106720bd583SHuazhong Tan &hdev->default_reset_request); 2107720bd583SHuazhong Tan else 2108dea846e8SHuazhong Tan hdev->reset_level = HNAE3_VF_FUNC_RESET; 21096d4c3981SSalil Mehta 2110436667d2SSalil Mehta /* reset of this VF requested */ 2111436667d2SSalil Mehta set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 2112436667d2SSalil Mehta hclgevf_reset_task_schedule(hdev); 21136d4c3981SSalil Mehta 21140742ed7cSHuazhong Tan hdev->last_reset_time = jiffies; 21156d4c3981SSalil Mehta } 21166d4c3981SSalil Mehta 2117720bd583SHuazhong Tan static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 2118720bd583SHuazhong Tan enum hnae3_reset_type rst_type) 2119720bd583SHuazhong Tan { 2120720bd583SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 2121720bd583SHuazhong Tan 2122720bd583SHuazhong Tan set_bit(rst_type, &hdev->default_reset_request); 2123720bd583SHuazhong Tan } 2124720bd583SHuazhong Tan 2125f28368bbSHuazhong Tan static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 2126f28368bbSHuazhong Tan { 2127f28368bbSHuazhong Tan writel(en ? 1 : 0, vector->addr); 2128f28368bbSHuazhong Tan } 2129f28368bbSHuazhong Tan 2130bb1890d5SJiaran Zhang static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev, 2131bb1890d5SJiaran Zhang enum hnae3_reset_type rst_type) 21326ff3cf07SHuazhong Tan { 2133bb1890d5SJiaran Zhang #define HCLGEVF_RESET_RETRY_WAIT_MS 500 2134bb1890d5SJiaran Zhang #define HCLGEVF_RESET_RETRY_CNT 5 2135f28368bbSHuazhong Tan 21366ff3cf07SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 2137f28368bbSHuazhong Tan int retry_cnt = 0; 2138f28368bbSHuazhong Tan int ret; 21396ff3cf07SHuazhong Tan 2140ed0e658cSJiaran Zhang while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) { 2141f28368bbSHuazhong Tan down(&hdev->reset_sem); 2142f28368bbSHuazhong Tan set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2143bb1890d5SJiaran Zhang hdev->reset_type = rst_type; 2144f28368bbSHuazhong Tan ret = hclgevf_reset_prepare(hdev); 2145ed0e658cSJiaran Zhang if (!ret && !hdev->reset_pending) 2146ed0e658cSJiaran Zhang break; 2147ed0e658cSJiaran Zhang 21486ff3cf07SHuazhong Tan dev_err(&hdev->pdev->dev, 2149ed0e658cSJiaran Zhang "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n", 2150ed0e658cSJiaran Zhang ret, hdev->reset_pending, retry_cnt); 2151f28368bbSHuazhong Tan clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2152f28368bbSHuazhong Tan up(&hdev->reset_sem); 2153bb1890d5SJiaran Zhang msleep(HCLGEVF_RESET_RETRY_WAIT_MS); 2154f28368bbSHuazhong Tan } 2155f28368bbSHuazhong Tan 2156bb1890d5SJiaran Zhang /* disable misc vector before reset done */ 2157f28368bbSHuazhong Tan hclgevf_enable_vector(&hdev->misc_vector, false); 2158bb1890d5SJiaran Zhang 2159bb1890d5SJiaran Zhang if (hdev->reset_type == HNAE3_FLR_RESET) 2160f28368bbSHuazhong Tan hdev->rst_stats.flr_rst_cnt++; 2161f28368bbSHuazhong Tan } 2162f28368bbSHuazhong Tan 2163bb1890d5SJiaran Zhang static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev) 2164f28368bbSHuazhong Tan { 2165f28368bbSHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 2166f28368bbSHuazhong Tan int ret; 2167f28368bbSHuazhong Tan 2168f28368bbSHuazhong Tan hclgevf_enable_vector(&hdev->misc_vector, true); 2169f28368bbSHuazhong Tan 2170f28368bbSHuazhong Tan ret = hclgevf_reset_rebuild(hdev); 2171f28368bbSHuazhong Tan if (ret) 2172f28368bbSHuazhong Tan dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 2173f28368bbSHuazhong Tan ret); 2174f28368bbSHuazhong Tan 2175f28368bbSHuazhong Tan hdev->reset_type = HNAE3_NONE_RESET; 2176f28368bbSHuazhong Tan clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2177f28368bbSHuazhong Tan up(&hdev->reset_sem); 21786ff3cf07SHuazhong Tan } 21796ff3cf07SHuazhong Tan 2180e2cb1decSSalil Mehta static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 2181e2cb1decSSalil Mehta { 2182e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2183e2cb1decSSalil Mehta 2184e2cb1decSSalil Mehta return hdev->fw_version; 2185e2cb1decSSalil Mehta } 2186e2cb1decSSalil Mehta 2187e2cb1decSSalil Mehta static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 2188e2cb1decSSalil Mehta { 2189e2cb1decSSalil Mehta struct hclgevf_misc_vector *vector = &hdev->misc_vector; 2190e2cb1decSSalil Mehta 2191e2cb1decSSalil Mehta vector->vector_irq = pci_irq_vector(hdev->pdev, 2192e2cb1decSSalil Mehta HCLGEVF_MISC_VECTOR_NUM); 2193076bb537SJie Wang vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 2194e2cb1decSSalil Mehta /* vector status always valid for Vector 0 */ 2195e2cb1decSSalil Mehta hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 2196e2cb1decSSalil Mehta hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 2197e2cb1decSSalil Mehta 2198e2cb1decSSalil Mehta hdev->num_msi_left -= 1; 2199e2cb1decSSalil Mehta hdev->num_msi_used += 1; 2200e2cb1decSSalil Mehta } 2201e2cb1decSSalil Mehta 220235a1e503SSalil Mehta void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 220335a1e503SSalil Mehta { 2204ff200099SYunsheng Lin if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 22050251d196SGuangbin Huang test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) && 2206ff200099SYunsheng Lin !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 2207ff200099SYunsheng Lin &hdev->state)) 22080ea68902SYunsheng Lin mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 220935a1e503SSalil Mehta } 221035a1e503SSalil Mehta 221107a0556aSSalil Mehta void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 2212e2cb1decSSalil Mehta { 2213ff200099SYunsheng Lin if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2214ff200099SYunsheng Lin !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 2215ff200099SYunsheng Lin &hdev->state)) 22160ea68902SYunsheng Lin mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 221707a0556aSSalil Mehta } 2218e2cb1decSSalil Mehta 2219ff200099SYunsheng Lin static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 2220ff200099SYunsheng Lin unsigned long delay) 2221e2cb1decSSalil Mehta { 2222d5432455SGuojia Liao if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2223d5432455SGuojia Liao !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 22240ea68902SYunsheng Lin mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 2225e2cb1decSSalil Mehta } 2226e2cb1decSSalil Mehta 2227ff200099SYunsheng Lin static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 222835a1e503SSalil Mehta { 2229d6ad7c53SGuojia Liao #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 2230d6ad7c53SGuojia Liao 2231ff200099SYunsheng Lin if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 2232ff200099SYunsheng Lin return; 2233ff200099SYunsheng Lin 2234f28368bbSHuazhong Tan down(&hdev->reset_sem); 2235f28368bbSHuazhong Tan set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 223635a1e503SSalil Mehta 2237436667d2SSalil Mehta if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 2238436667d2SSalil Mehta &hdev->reset_state)) { 2239cd7e963dSSalil Mehta /* PF has intimated that it is about to reset the hardware. 22409b2f3477SWeihang Li * We now have to poll & check if hardware has actually 22419b2f3477SWeihang Li * completed the reset sequence. On hardware reset completion, 22429b2f3477SWeihang Li * VF needs to reset the client and ae device. 224335a1e503SSalil Mehta */ 2244436667d2SSalil Mehta hdev->reset_attempts = 0; 2245436667d2SSalil Mehta 2246dea846e8SHuazhong Tan hdev->last_reset_time = jiffies; 22471385cc81SYufeng Mo hdev->reset_type = 22481385cc81SYufeng Mo hclgevf_get_reset_level(hdev, &hdev->reset_pending); 22491385cc81SYufeng Mo if (hdev->reset_type != HNAE3_NONE_RESET) 22501cc9bc6eSHuazhong Tan hclgevf_reset(hdev); 2251436667d2SSalil Mehta } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 2252436667d2SSalil Mehta &hdev->reset_state)) { 2253436667d2SSalil Mehta /* we could be here when either of below happens: 22549b2f3477SWeihang Li * 1. reset was initiated due to watchdog timeout caused by 2255436667d2SSalil Mehta * a. IMP was earlier reset and our TX got choked down and 2256436667d2SSalil Mehta * which resulted in watchdog reacting and inducing VF 2257436667d2SSalil Mehta * reset. This also means our cmdq would be unreliable. 2258436667d2SSalil Mehta * b. problem in TX due to other lower layer(example link 2259436667d2SSalil Mehta * layer not functioning properly etc.) 2260436667d2SSalil Mehta * 2. VF reset might have been initiated due to some config 2261436667d2SSalil Mehta * change. 2262436667d2SSalil Mehta * 2263436667d2SSalil Mehta * NOTE: Theres no clear way to detect above cases than to react 2264436667d2SSalil Mehta * to the response of PF for this reset request. PF will ack the 2265436667d2SSalil Mehta * 1b and 2. cases but we will not get any intimation about 1a 2266436667d2SSalil Mehta * from PF as cmdq would be in unreliable state i.e. mailbox 2267436667d2SSalil Mehta * communication between PF and VF would be broken. 226846ee7350SGuojia Liao * 226946ee7350SGuojia Liao * if we are never geting into pending state it means either: 2270436667d2SSalil Mehta * 1. PF is not receiving our request which could be due to IMP 2271436667d2SSalil Mehta * reset 2272436667d2SSalil Mehta * 2. PF is screwed 2273436667d2SSalil Mehta * We cannot do much for 2. but to check first we can try reset 2274436667d2SSalil Mehta * our PCIe + stack and see if it alleviates the problem. 2275436667d2SSalil Mehta */ 2276d6ad7c53SGuojia Liao if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 2277436667d2SSalil Mehta /* prepare for full reset of stack + pcie interface */ 2278dea846e8SHuazhong Tan set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 2279436667d2SSalil Mehta 2280436667d2SSalil Mehta /* "defer" schedule the reset task again */ 2281436667d2SSalil Mehta set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2282436667d2SSalil Mehta } else { 2283436667d2SSalil Mehta hdev->reset_attempts++; 2284436667d2SSalil Mehta 2285dea846e8SHuazhong Tan set_bit(hdev->reset_level, &hdev->reset_pending); 2286dea846e8SHuazhong Tan set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2287436667d2SSalil Mehta } 2288dea846e8SHuazhong Tan hclgevf_reset_task_schedule(hdev); 2289436667d2SSalil Mehta } 229035a1e503SSalil Mehta 2291afb6afdbSHuazhong Tan hdev->reset_type = HNAE3_NONE_RESET; 229235a1e503SSalil Mehta clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2293f28368bbSHuazhong Tan up(&hdev->reset_sem); 229435a1e503SSalil Mehta } 229535a1e503SSalil Mehta 2296ff200099SYunsheng Lin static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 2297e2cb1decSSalil Mehta { 2298ff200099SYunsheng Lin if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2299ff200099SYunsheng Lin return; 2300e2cb1decSSalil Mehta 2301e2cb1decSSalil Mehta if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 2302e2cb1decSSalil Mehta return; 2303e2cb1decSSalil Mehta 230407a0556aSSalil Mehta hclgevf_mbx_async_handler(hdev); 2305e2cb1decSSalil Mehta 2306e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2307e2cb1decSSalil Mehta } 2308e2cb1decSSalil Mehta 2309ff200099SYunsheng Lin static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 2310a6d818e3SYunsheng Lin { 2311d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 2312a6d818e3SYunsheng Lin int ret; 2313a6d818e3SYunsheng Lin 2314076bb537SJie Wang if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) 2315c59a85c0SJian Shen return; 2316c59a85c0SJian Shen 2317d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 2318d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2319a6d818e3SYunsheng Lin if (ret) 2320a6d818e3SYunsheng Lin dev_err(&hdev->pdev->dev, 2321a6d818e3SYunsheng Lin "VF sends keep alive cmd failed(=%d)\n", ret); 2322a6d818e3SYunsheng Lin } 2323a6d818e3SYunsheng Lin 2324ff200099SYunsheng Lin static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 2325e2cb1decSSalil Mehta { 2326ff200099SYunsheng Lin unsigned long delta = round_jiffies_relative(HZ); 2327ff200099SYunsheng Lin struct hnae3_handle *handle = &hdev->nic; 2328e2cb1decSSalil Mehta 2329e6394363SGuangbin Huang if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2330e6394363SGuangbin Huang return; 2331e6394363SGuangbin Huang 2332ff200099SYunsheng Lin if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 2333ff200099SYunsheng Lin delta = jiffies - hdev->last_serv_processed; 2334db01afebSliuzhongzhu 2335ff200099SYunsheng Lin if (delta < round_jiffies_relative(HZ)) { 2336ff200099SYunsheng Lin delta = round_jiffies_relative(HZ) - delta; 2337ff200099SYunsheng Lin goto out; 2338db01afebSliuzhongzhu } 2339ff200099SYunsheng Lin } 2340ff200099SYunsheng Lin 2341ff200099SYunsheng Lin hdev->serv_processed_cnt++; 2342ff200099SYunsheng Lin if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 2343ff200099SYunsheng Lin hclgevf_keep_alive(hdev); 2344ff200099SYunsheng Lin 2345ff200099SYunsheng Lin if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 2346ff200099SYunsheng Lin hdev->last_serv_processed = jiffies; 2347ff200099SYunsheng Lin goto out; 2348ff200099SYunsheng Lin } 2349ff200099SYunsheng Lin 2350ff200099SYunsheng Lin if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 2351ff200099SYunsheng Lin hclgevf_tqps_update_stats(handle); 2352e2cb1decSSalil Mehta 235301305e16SGuangbin Huang /* VF does not need to request link status when this bit is set, because 235401305e16SGuangbin Huang * PF will push its link status to VFs when link status changed. 2355e2cb1decSSalil Mehta */ 235601305e16SGuangbin Huang if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state)) 2357e2cb1decSSalil Mehta hclgevf_request_link_info(hdev); 2358e2cb1decSSalil Mehta 23599194d18bSliuzhongzhu hclgevf_update_link_mode(hdev); 23609194d18bSliuzhongzhu 2361fe4144d4SJian Shen hclgevf_sync_vlan_filter(hdev); 2362fe4144d4SJian Shen 2363ee4bcd3bSJian Shen hclgevf_sync_mac_table(hdev); 2364ee4bcd3bSJian Shen 2365c631c696SJian Shen hclgevf_sync_promisc_mode(hdev); 2366c631c696SJian Shen 2367ff200099SYunsheng Lin hdev->last_serv_processed = jiffies; 2368436667d2SSalil Mehta 2369ff200099SYunsheng Lin out: 2370ff200099SYunsheng Lin hclgevf_task_schedule(hdev, delta); 2371ff200099SYunsheng Lin } 2372b3c3fe8eSYunsheng Lin 2373ff200099SYunsheng Lin static void hclgevf_service_task(struct work_struct *work) 2374ff200099SYunsheng Lin { 2375ff200099SYunsheng Lin struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 2376ff200099SYunsheng Lin service_task.work); 2377ff200099SYunsheng Lin 2378ff200099SYunsheng Lin hclgevf_reset_service_task(hdev); 2379ff200099SYunsheng Lin hclgevf_mailbox_service_task(hdev); 2380ff200099SYunsheng Lin hclgevf_periodic_service_task(hdev); 2381ff200099SYunsheng Lin 2382ff200099SYunsheng Lin /* Handle reset and mbx again in case periodical task delays the 2383ff200099SYunsheng Lin * handling by calling hclgevf_task_schedule() in 2384ff200099SYunsheng Lin * hclgevf_periodic_service_task() 2385ff200099SYunsheng Lin */ 2386ff200099SYunsheng Lin hclgevf_reset_service_task(hdev); 2387ff200099SYunsheng Lin hclgevf_mailbox_service_task(hdev); 2388e2cb1decSSalil Mehta } 2389e2cb1decSSalil Mehta 2390e2cb1decSSalil Mehta static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 2391e2cb1decSSalil Mehta { 2392cb413bfaSJie Wang hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr); 2393e2cb1decSSalil Mehta } 2394e2cb1decSSalil Mehta 2395b90fcc5bSHuazhong Tan static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 2396b90fcc5bSHuazhong Tan u32 *clearval) 2397e2cb1decSSalil Mehta { 239813050921SHuazhong Tan u32 val, cmdq_stat_reg, rst_ing_reg; 2399e2cb1decSSalil Mehta 2400e2cb1decSSalil Mehta /* fetch the events from their corresponding regs */ 240113050921SHuazhong Tan cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 2402cb413bfaSJie Wang HCLGE_COMM_VECTOR0_CMDQ_STATE_REG); 240313050921SHuazhong Tan if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 2404b90fcc5bSHuazhong Tan rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2405b90fcc5bSHuazhong Tan dev_info(&hdev->pdev->dev, 2406b90fcc5bSHuazhong Tan "receive reset interrupt 0x%x!\n", rst_ing_reg); 2407b90fcc5bSHuazhong Tan set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 2408b90fcc5bSHuazhong Tan set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2409076bb537SJie Wang set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 241013050921SHuazhong Tan *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 2411c88a6e7dSHuazhong Tan hdev->rst_stats.vf_rst_cnt++; 241272e2fb07SHuazhong Tan /* set up VF hardware reset status, its PF will clear 241372e2fb07SHuazhong Tan * this status when PF has initialized done. 241472e2fb07SHuazhong Tan */ 241572e2fb07SHuazhong Tan val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 241672e2fb07SHuazhong Tan hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 241772e2fb07SHuazhong Tan val | HCLGEVF_VF_RST_ING_BIT); 2418b90fcc5bSHuazhong Tan return HCLGEVF_VECTOR0_EVENT_RST; 2419b90fcc5bSHuazhong Tan } 2420b90fcc5bSHuazhong Tan 2421e2cb1decSSalil Mehta /* check for vector0 mailbox(=CMDQ RX) event source */ 242213050921SHuazhong Tan if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 242313050921SHuazhong Tan /* for revision 0x21, clearing interrupt is writing bit 0 242413050921SHuazhong Tan * to the clear register, writing bit 1 means to keep the 242513050921SHuazhong Tan * old value. 242613050921SHuazhong Tan * for revision 0x20, the clear register is a read & write 242713050921SHuazhong Tan * register, so we should just write 0 to the bit we are 242813050921SHuazhong Tan * handling, and keep other bits as cmdq_stat_reg. 242913050921SHuazhong Tan */ 2430295ba232SGuangbin Huang if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) 243113050921SHuazhong Tan *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 243213050921SHuazhong Tan else 243313050921SHuazhong Tan *clearval = cmdq_stat_reg & 243413050921SHuazhong Tan ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 243513050921SHuazhong Tan 2436b90fcc5bSHuazhong Tan return HCLGEVF_VECTOR0_EVENT_MBX; 2437e2cb1decSSalil Mehta } 2438e2cb1decSSalil Mehta 2439e45afb39SHuazhong Tan /* print other vector0 event source */ 2440e45afb39SHuazhong Tan dev_info(&hdev->pdev->dev, 2441e45afb39SHuazhong Tan "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2442e45afb39SHuazhong Tan cmdq_stat_reg); 2443e2cb1decSSalil Mehta 2444b90fcc5bSHuazhong Tan return HCLGEVF_VECTOR0_EVENT_OTHER; 2445e2cb1decSSalil Mehta } 2446e2cb1decSSalil Mehta 2447e2cb1decSSalil Mehta static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2448e2cb1decSSalil Mehta { 2449b90fcc5bSHuazhong Tan enum hclgevf_evt_cause event_cause; 2450e2cb1decSSalil Mehta struct hclgevf_dev *hdev = data; 2451e2cb1decSSalil Mehta u32 clearval; 2452e2cb1decSSalil Mehta 2453e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, false); 2454b90fcc5bSHuazhong Tan event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2455427900d2SJiaran Zhang if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) 2456427900d2SJiaran Zhang hclgevf_clear_event_cause(hdev, clearval); 2457e2cb1decSSalil Mehta 2458b90fcc5bSHuazhong Tan switch (event_cause) { 2459b90fcc5bSHuazhong Tan case HCLGEVF_VECTOR0_EVENT_RST: 2460b90fcc5bSHuazhong Tan hclgevf_reset_task_schedule(hdev); 2461b90fcc5bSHuazhong Tan break; 2462b90fcc5bSHuazhong Tan case HCLGEVF_VECTOR0_EVENT_MBX: 246307a0556aSSalil Mehta hclgevf_mbx_handler(hdev); 2464b90fcc5bSHuazhong Tan break; 2465b90fcc5bSHuazhong Tan default: 2466b90fcc5bSHuazhong Tan break; 2467b90fcc5bSHuazhong Tan } 2468e2cb1decSSalil Mehta 2469427900d2SJiaran Zhang if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) 2470e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, true); 2471e2cb1decSSalil Mehta 2472e2cb1decSSalil Mehta return IRQ_HANDLED; 2473e2cb1decSSalil Mehta } 2474e2cb1decSSalil Mehta 2475e2cb1decSSalil Mehta static int hclgevf_configure(struct hclgevf_dev *hdev) 2476e2cb1decSSalil Mehta { 2477e2cb1decSSalil Mehta int ret; 2478e2cb1decSSalil Mehta 24793462207dSYufeng Mo hdev->gro_en = true; 24803462207dSYufeng Mo 248132e6d104SJian Shen ret = hclgevf_get_basic_info(hdev); 248232e6d104SJian Shen if (ret) 248332e6d104SJian Shen return ret; 248432e6d104SJian Shen 248592f11ea1SJian Shen /* get current port based vlan state from PF */ 248692f11ea1SJian Shen ret = hclgevf_get_port_base_vlan_filter_state(hdev); 248792f11ea1SJian Shen if (ret) 248892f11ea1SJian Shen return ret; 248992f11ea1SJian Shen 2490e2cb1decSSalil Mehta /* get queue configuration from PF */ 24916cee6fc3SJian Shen ret = hclgevf_get_queue_info(hdev); 2492e2cb1decSSalil Mehta if (ret) 2493e2cb1decSSalil Mehta return ret; 2494c0425944SPeng Li 2495c0425944SPeng Li /* get queue depth info from PF */ 2496c0425944SPeng Li ret = hclgevf_get_queue_depth(hdev); 2497c0425944SPeng Li if (ret) 2498c0425944SPeng Li return ret; 2499c0425944SPeng Li 250032e6d104SJian Shen return hclgevf_get_pf_media_type(hdev); 2501e2cb1decSSalil Mehta } 2502e2cb1decSSalil Mehta 25037a01c897SSalil Mehta static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 25047a01c897SSalil Mehta { 25057a01c897SSalil Mehta struct pci_dev *pdev = ae_dev->pdev; 25061154bb26SPeng Li struct hclgevf_dev *hdev; 25077a01c897SSalil Mehta 25087a01c897SSalil Mehta hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 25097a01c897SSalil Mehta if (!hdev) 25107a01c897SSalil Mehta return -ENOMEM; 25117a01c897SSalil Mehta 25127a01c897SSalil Mehta hdev->pdev = pdev; 25137a01c897SSalil Mehta hdev->ae_dev = ae_dev; 25147a01c897SSalil Mehta ae_dev->priv = hdev; 25157a01c897SSalil Mehta 25167a01c897SSalil Mehta return 0; 25177a01c897SSalil Mehta } 25187a01c897SSalil Mehta 2519e2cb1decSSalil Mehta static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2520e2cb1decSSalil Mehta { 2521e2cb1decSSalil Mehta struct hnae3_handle *roce = &hdev->roce; 2522e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 2523e2cb1decSSalil Mehta 252407acf909SJian Shen roce->rinfo.num_vectors = hdev->num_roce_msix; 2525e2cb1decSSalil Mehta 2526e2cb1decSSalil Mehta if (hdev->num_msi_left < roce->rinfo.num_vectors || 2527e2cb1decSSalil Mehta hdev->num_msi_left == 0) 2528e2cb1decSSalil Mehta return -EINVAL; 2529e2cb1decSSalil Mehta 2530beb27ca4SJie Wang roce->rinfo.base_vector = hdev->roce_base_msix_offset; 2531e2cb1decSSalil Mehta 2532e2cb1decSSalil Mehta roce->rinfo.netdev = nic->kinfo.netdev; 2533076bb537SJie Wang roce->rinfo.roce_io_base = hdev->hw.hw.io_base; 2534076bb537SJie Wang roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; 2535e2cb1decSSalil Mehta 2536e2cb1decSSalil Mehta roce->pdev = nic->pdev; 2537e2cb1decSSalil Mehta roce->ae_algo = nic->ae_algo; 2538e2cb1decSSalil Mehta roce->numa_node_mask = nic->numa_node_mask; 2539e2cb1decSSalil Mehta 2540e2cb1decSSalil Mehta return 0; 2541e2cb1decSSalil Mehta } 2542e2cb1decSSalil Mehta 25433462207dSYufeng Mo static int hclgevf_config_gro(struct hclgevf_dev *hdev) 2544b26a6feaSPeng Li { 2545b26a6feaSPeng Li struct hclgevf_cfg_gro_status_cmd *req; 25466befad60SJie Wang struct hclge_desc desc; 2547b26a6feaSPeng Li int ret; 2548b26a6feaSPeng Li 2549b26a6feaSPeng Li if (!hnae3_dev_gro_supported(hdev)) 2550b26a6feaSPeng Li return 0; 2551b26a6feaSPeng Li 2552b26a6feaSPeng Li hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2553b26a6feaSPeng Li false); 2554b26a6feaSPeng Li req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2555b26a6feaSPeng Li 25563462207dSYufeng Mo req->gro_en = hdev->gro_en ? 1 : 0; 2557b26a6feaSPeng Li 2558b26a6feaSPeng Li ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2559b26a6feaSPeng Li if (ret) 2560b26a6feaSPeng Li dev_err(&hdev->pdev->dev, 2561b26a6feaSPeng Li "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2562b26a6feaSPeng Li 2563b26a6feaSPeng Li return ret; 2564b26a6feaSPeng Li } 2565b26a6feaSPeng Li 256687ce161eSGuangbin Huang static int hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) 2567e2cb1decSSalil Mehta { 256887ce161eSGuangbin Huang u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size; 2569*027733b1SJie Wang struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 2570*027733b1SJie Wang struct hclge_comm_rss_tuple_cfg *tuple_sets; 25714093d1a2SGuangbin Huang u32 i; 2572e2cb1decSSalil Mehta 2573*027733b1SJie Wang rss_cfg->rss_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 25744093d1a2SGuangbin Huang rss_cfg->rss_size = hdev->nic.kinfo.rss_size; 2575944de484SGuojia Liao tuple_sets = &rss_cfg->rss_tuple_sets; 2576295ba232SGuangbin Huang if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2577*027733b1SJie Wang u16 *rss_ind_tbl; 257887ce161eSGuangbin Huang 2579*027733b1SJie Wang rss_cfg->rss_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 258087ce161eSGuangbin Huang 258187ce161eSGuangbin Huang rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size, 258287ce161eSGuangbin Huang sizeof(*rss_ind_tbl), GFP_KERNEL); 258387ce161eSGuangbin Huang if (!rss_ind_tbl) 258487ce161eSGuangbin Huang return -ENOMEM; 258587ce161eSGuangbin Huang 258687ce161eSGuangbin Huang rss_cfg->rss_indirection_tbl = rss_ind_tbl; 2587472d7eceSJian Shen memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2588374ad291SJian Shen HCLGEVF_RSS_KEY_SIZE); 2589374ad291SJian Shen 2590944de484SGuojia Liao tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2591944de484SGuojia Liao tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2592944de484SGuojia Liao tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2593944de484SGuojia Liao tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2594944de484SGuojia Liao tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2595944de484SGuojia Liao tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2596ab6e32d2SJian Shen tuple_sets->ipv6_sctp_en = 2597ab6e32d2SJian Shen hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ? 2598ab6e32d2SJian Shen HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT : 2599ab6e32d2SJian Shen HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2600944de484SGuojia Liao tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2601374ad291SJian Shen } 2602374ad291SJian Shen 26039b2f3477SWeihang Li /* Initialize RSS indirect table */ 260487ce161eSGuangbin Huang for (i = 0; i < rss_ind_tbl_size; i++) 26054093d1a2SGuangbin Huang rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 260687ce161eSGuangbin Huang 260787ce161eSGuangbin Huang return 0; 2608944de484SGuojia Liao } 2609944de484SGuojia Liao 2610944de484SGuojia Liao static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2611944de484SGuojia Liao { 2612*027733b1SJie Wang struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 2613944de484SGuojia Liao int ret; 2614944de484SGuojia Liao 2615295ba232SGuangbin Huang if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2616*027733b1SJie Wang ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->rss_algo, 2617944de484SGuojia Liao rss_cfg->rss_hash_key); 2618944de484SGuojia Liao if (ret) 2619944de484SGuojia Liao return ret; 2620944de484SGuojia Liao 2621944de484SGuojia Liao ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2622944de484SGuojia Liao if (ret) 2623944de484SGuojia Liao return ret; 2624944de484SGuojia Liao } 2625e2cb1decSSalil Mehta 2626e2cb1decSSalil Mehta ret = hclgevf_set_rss_indir_table(hdev); 2627e2cb1decSSalil Mehta if (ret) 2628e2cb1decSSalil Mehta return ret; 2629e2cb1decSSalil Mehta 26304093d1a2SGuangbin Huang return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); 2631e2cb1decSSalil Mehta } 2632e2cb1decSSalil Mehta 2633e2cb1decSSalil Mehta static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2634e2cb1decSSalil Mehta { 2635bbfd4506SJian Shen struct hnae3_handle *nic = &hdev->nic; 2636bbfd4506SJian Shen int ret; 2637bbfd4506SJian Shen 2638bbfd4506SJian Shen ret = hclgevf_en_hw_strip_rxvtag(nic, true); 2639bbfd4506SJian Shen if (ret) { 2640bbfd4506SJian Shen dev_err(&hdev->pdev->dev, 2641bbfd4506SJian Shen "failed to enable rx vlan offload, ret = %d\n", ret); 2642bbfd4506SJian Shen return ret; 2643bbfd4506SJian Shen } 2644bbfd4506SJian Shen 2645e2cb1decSSalil Mehta return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2646e2cb1decSSalil Mehta false); 2647e2cb1decSSalil Mehta } 2648e2cb1decSSalil Mehta 2649ff200099SYunsheng Lin static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2650ff200099SYunsheng Lin { 2651ff200099SYunsheng Lin #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2652ff200099SYunsheng Lin 2653ff200099SYunsheng Lin unsigned long last = hdev->serv_processed_cnt; 2654ff200099SYunsheng Lin int i = 0; 2655ff200099SYunsheng Lin 2656ff200099SYunsheng Lin while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2657ff200099SYunsheng Lin i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2658ff200099SYunsheng Lin last == hdev->serv_processed_cnt) 2659ff200099SYunsheng Lin usleep_range(1, 1); 2660ff200099SYunsheng Lin } 2661ff200099SYunsheng Lin 26628cdb992fSJian Shen static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 26638cdb992fSJian Shen { 26648cdb992fSJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 26658cdb992fSJian Shen 26668cdb992fSJian Shen if (enable) { 2667ff200099SYunsheng Lin hclgevf_task_schedule(hdev, 0); 26688cdb992fSJian Shen } else { 2669b3c3fe8eSYunsheng Lin set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2670ff200099SYunsheng Lin 2671ff200099SYunsheng Lin /* flush memory to make sure DOWN is seen by service task */ 2672ff200099SYunsheng Lin smp_mb__before_atomic(); 2673ff200099SYunsheng Lin hclgevf_flush_link_update(hdev); 26748cdb992fSJian Shen } 26758cdb992fSJian Shen } 26768cdb992fSJian Shen 2677e2cb1decSSalil Mehta static int hclgevf_ae_start(struct hnae3_handle *handle) 2678e2cb1decSSalil Mehta { 2679e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2680e2cb1decSSalil Mehta 2681ed7bedd2SGuangbin Huang clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 268201305e16SGuangbin Huang clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state); 2683ed7bedd2SGuangbin Huang 2684e2cb1decSSalil Mehta hclgevf_reset_tqp_stats(handle); 2685e2cb1decSSalil Mehta 2686e2cb1decSSalil Mehta hclgevf_request_link_info(hdev); 2687e2cb1decSSalil Mehta 26889194d18bSliuzhongzhu hclgevf_update_link_mode(hdev); 26899194d18bSliuzhongzhu 2690e2cb1decSSalil Mehta return 0; 2691e2cb1decSSalil Mehta } 2692e2cb1decSSalil Mehta 2693e2cb1decSSalil Mehta static void hclgevf_ae_stop(struct hnae3_handle *handle) 2694e2cb1decSSalil Mehta { 2695e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2696e2cb1decSSalil Mehta 26972f7e4896SFuyun Liang set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 26982f7e4896SFuyun Liang 2699146e92c1SHuazhong Tan if (hdev->reset_type != HNAE3_VF_RESET) 27008fa86551SYufeng Mo hclgevf_reset_tqp(handle); 270139cfbc9cSHuazhong Tan 2702e2cb1decSSalil Mehta hclgevf_reset_tqp_stats(handle); 27038cc6c1f7SFuyun Liang hclgevf_update_link_status(hdev, 0); 2704e2cb1decSSalil Mehta } 2705e2cb1decSSalil Mehta 2706a6d818e3SYunsheng Lin static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2707a6d818e3SYunsheng Lin { 2708d3410018SYufeng Mo #define HCLGEVF_STATE_ALIVE 1 2709d3410018SYufeng Mo #define HCLGEVF_STATE_NOT_ALIVE 0 2710a6d818e3SYunsheng Lin 2711d3410018SYufeng Mo struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2712d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 2713d3410018SYufeng Mo 2714d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2715d3410018SYufeng Mo send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2716d3410018SYufeng Mo HCLGEVF_STATE_NOT_ALIVE; 2717d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2718a6d818e3SYunsheng Lin } 2719a6d818e3SYunsheng Lin 2720a6d818e3SYunsheng Lin static int hclgevf_client_start(struct hnae3_handle *handle) 2721a6d818e3SYunsheng Lin { 2722f621df96SQinglang Miao return hclgevf_set_alive(handle, true); 2723a6d818e3SYunsheng Lin } 2724a6d818e3SYunsheng Lin 2725a6d818e3SYunsheng Lin static void hclgevf_client_stop(struct hnae3_handle *handle) 2726a6d818e3SYunsheng Lin { 2727a6d818e3SYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2728a6d818e3SYunsheng Lin int ret; 2729a6d818e3SYunsheng Lin 2730a6d818e3SYunsheng Lin ret = hclgevf_set_alive(handle, false); 2731a6d818e3SYunsheng Lin if (ret) 2732a6d818e3SYunsheng Lin dev_warn(&hdev->pdev->dev, 2733a6d818e3SYunsheng Lin "%s failed %d\n", __func__, ret); 2734a6d818e3SYunsheng Lin } 2735a6d818e3SYunsheng Lin 2736e2cb1decSSalil Mehta static void hclgevf_state_init(struct hclgevf_dev *hdev) 2737e2cb1decSSalil Mehta { 2738e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2739e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2740d5432455SGuojia Liao clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2741e2cb1decSSalil Mehta 2742b3c3fe8eSYunsheng Lin INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 274335a1e503SSalil Mehta 2744e2cb1decSSalil Mehta mutex_init(&hdev->mbx_resp.mbx_mutex); 2745f28368bbSHuazhong Tan sema_init(&hdev->reset_sem, 1); 2746e2cb1decSSalil Mehta 2747ee4bcd3bSJian Shen spin_lock_init(&hdev->mac_table.mac_list_lock); 2748ee4bcd3bSJian Shen INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); 2749ee4bcd3bSJian Shen INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); 2750ee4bcd3bSJian Shen 2751e2cb1decSSalil Mehta /* bring the device down */ 2752e2cb1decSSalil Mehta set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2753e2cb1decSSalil Mehta } 2754e2cb1decSSalil Mehta 2755e2cb1decSSalil Mehta static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2756e2cb1decSSalil Mehta { 2757e2cb1decSSalil Mehta set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2758acfc3d55SHuazhong Tan set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2759e2cb1decSSalil Mehta 2760b3c3fe8eSYunsheng Lin if (hdev->service_task.work.func) 2761b3c3fe8eSYunsheng Lin cancel_delayed_work_sync(&hdev->service_task); 2762e2cb1decSSalil Mehta 2763e2cb1decSSalil Mehta mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2764e2cb1decSSalil Mehta } 2765e2cb1decSSalil Mehta 2766e2cb1decSSalil Mehta static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2767e2cb1decSSalil Mehta { 2768e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 2769e2cb1decSSalil Mehta int vectors; 2770e2cb1decSSalil Mehta int i; 2771e2cb1decSSalil Mehta 2772580a05f9SYonglong Liu if (hnae3_dev_roce_supported(hdev)) 277307acf909SJian Shen vectors = pci_alloc_irq_vectors(pdev, 277407acf909SJian Shen hdev->roce_base_msix_offset + 1, 277507acf909SJian Shen hdev->num_msi, 277607acf909SJian Shen PCI_IRQ_MSIX); 277707acf909SJian Shen else 2778580a05f9SYonglong Liu vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2779580a05f9SYonglong Liu hdev->num_msi, 2780e2cb1decSSalil Mehta PCI_IRQ_MSI | PCI_IRQ_MSIX); 278107acf909SJian Shen 2782e2cb1decSSalil Mehta if (vectors < 0) { 2783e2cb1decSSalil Mehta dev_err(&pdev->dev, 2784e2cb1decSSalil Mehta "failed(%d) to allocate MSI/MSI-X vectors\n", 2785e2cb1decSSalil Mehta vectors); 2786e2cb1decSSalil Mehta return vectors; 2787e2cb1decSSalil Mehta } 2788e2cb1decSSalil Mehta if (vectors < hdev->num_msi) 2789e2cb1decSSalil Mehta dev_warn(&hdev->pdev->dev, 2790adcf738bSGuojia Liao "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2791e2cb1decSSalil Mehta hdev->num_msi, vectors); 2792e2cb1decSSalil Mehta 2793e2cb1decSSalil Mehta hdev->num_msi = vectors; 2794e2cb1decSSalil Mehta hdev->num_msi_left = vectors; 2795580a05f9SYonglong Liu 2796e2cb1decSSalil Mehta hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2797e2cb1decSSalil Mehta sizeof(u16), GFP_KERNEL); 2798e2cb1decSSalil Mehta if (!hdev->vector_status) { 2799e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 2800e2cb1decSSalil Mehta return -ENOMEM; 2801e2cb1decSSalil Mehta } 2802e2cb1decSSalil Mehta 2803e2cb1decSSalil Mehta for (i = 0; i < hdev->num_msi; i++) 2804e2cb1decSSalil Mehta hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2805e2cb1decSSalil Mehta 2806e2cb1decSSalil Mehta hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2807e2cb1decSSalil Mehta sizeof(int), GFP_KERNEL); 2808e2cb1decSSalil Mehta if (!hdev->vector_irq) { 2809862d969aSHuazhong Tan devm_kfree(&pdev->dev, hdev->vector_status); 2810e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 2811e2cb1decSSalil Mehta return -ENOMEM; 2812e2cb1decSSalil Mehta } 2813e2cb1decSSalil Mehta 2814e2cb1decSSalil Mehta return 0; 2815e2cb1decSSalil Mehta } 2816e2cb1decSSalil Mehta 2817e2cb1decSSalil Mehta static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2818e2cb1decSSalil Mehta { 2819e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 2820e2cb1decSSalil Mehta 2821862d969aSHuazhong Tan devm_kfree(&pdev->dev, hdev->vector_status); 2822862d969aSHuazhong Tan devm_kfree(&pdev->dev, hdev->vector_irq); 2823e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 2824e2cb1decSSalil Mehta } 2825e2cb1decSSalil Mehta 2826e2cb1decSSalil Mehta static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2827e2cb1decSSalil Mehta { 2828cdd332acSGuojia Liao int ret; 2829e2cb1decSSalil Mehta 2830e2cb1decSSalil Mehta hclgevf_get_misc_vector(hdev); 2831e2cb1decSSalil Mehta 2832f97c4d82SYonglong Liu snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2833f97c4d82SYonglong Liu HCLGEVF_NAME, pci_name(hdev->pdev)); 2834e2cb1decSSalil Mehta ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2835f97c4d82SYonglong Liu 0, hdev->misc_vector.name, hdev); 2836e2cb1decSSalil Mehta if (ret) { 2837e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2838e2cb1decSSalil Mehta hdev->misc_vector.vector_irq); 2839e2cb1decSSalil Mehta return ret; 2840e2cb1decSSalil Mehta } 2841e2cb1decSSalil Mehta 28421819e409SXi Wang hclgevf_clear_event_cause(hdev, 0); 28431819e409SXi Wang 2844e2cb1decSSalil Mehta /* enable misc. vector(vector 0) */ 2845e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, true); 2846e2cb1decSSalil Mehta 2847e2cb1decSSalil Mehta return ret; 2848e2cb1decSSalil Mehta } 2849e2cb1decSSalil Mehta 2850e2cb1decSSalil Mehta static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2851e2cb1decSSalil Mehta { 2852e2cb1decSSalil Mehta /* disable misc vector(vector 0) */ 2853e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, false); 28541819e409SXi Wang synchronize_irq(hdev->misc_vector.vector_irq); 2855e2cb1decSSalil Mehta free_irq(hdev->misc_vector.vector_irq, hdev); 2856e2cb1decSSalil Mehta hclgevf_free_vector(hdev, 0); 2857e2cb1decSSalil Mehta } 2858e2cb1decSSalil Mehta 2859bb87be87SYonglong Liu static void hclgevf_info_show(struct hclgevf_dev *hdev) 2860bb87be87SYonglong Liu { 2861bb87be87SYonglong Liu struct device *dev = &hdev->pdev->dev; 2862bb87be87SYonglong Liu 2863bb87be87SYonglong Liu dev_info(dev, "VF info begin:\n"); 2864bb87be87SYonglong Liu 2865adcf738bSGuojia Liao dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2866adcf738bSGuojia Liao dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2867adcf738bSGuojia Liao dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2868adcf738bSGuojia Liao dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2869adcf738bSGuojia Liao dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2870adcf738bSGuojia Liao dev_info(dev, "PF media type of this VF: %u\n", 2871bb87be87SYonglong Liu hdev->hw.mac.media_type); 2872bb87be87SYonglong Liu 2873bb87be87SYonglong Liu dev_info(dev, "VF info end.\n"); 2874bb87be87SYonglong Liu } 2875bb87be87SYonglong Liu 28761db58f86SHuazhong Tan static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 28771db58f86SHuazhong Tan struct hnae3_client *client) 28781db58f86SHuazhong Tan { 28791db58f86SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 28804cd5beaaSGuangbin Huang int rst_cnt = hdev->rst_stats.rst_cnt; 28811db58f86SHuazhong Tan int ret; 28821db58f86SHuazhong Tan 28831db58f86SHuazhong Tan ret = client->ops->init_instance(&hdev->nic); 28841db58f86SHuazhong Tan if (ret) 28851db58f86SHuazhong Tan return ret; 28861db58f86SHuazhong Tan 28871db58f86SHuazhong Tan set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 28884cd5beaaSGuangbin Huang if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 28894cd5beaaSGuangbin Huang rst_cnt != hdev->rst_stats.rst_cnt) { 28904cd5beaaSGuangbin Huang clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 28914cd5beaaSGuangbin Huang 28924cd5beaaSGuangbin Huang client->ops->uninit_instance(&hdev->nic, 0); 28934cd5beaaSGuangbin Huang return -EBUSY; 28944cd5beaaSGuangbin Huang } 28954cd5beaaSGuangbin Huang 28961db58f86SHuazhong Tan hnae3_set_client_init_flag(client, ae_dev, 1); 28971db58f86SHuazhong Tan 28981db58f86SHuazhong Tan if (netif_msg_drv(&hdev->nic)) 28991db58f86SHuazhong Tan hclgevf_info_show(hdev); 29001db58f86SHuazhong Tan 29011db58f86SHuazhong Tan return 0; 29021db58f86SHuazhong Tan } 29031db58f86SHuazhong Tan 29041db58f86SHuazhong Tan static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 29051db58f86SHuazhong Tan struct hnae3_client *client) 29061db58f86SHuazhong Tan { 29071db58f86SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 29081db58f86SHuazhong Tan int ret; 29091db58f86SHuazhong Tan 29101db58f86SHuazhong Tan if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 29111db58f86SHuazhong Tan !hdev->nic_client) 29121db58f86SHuazhong Tan return 0; 29131db58f86SHuazhong Tan 29141db58f86SHuazhong Tan ret = hclgevf_init_roce_base_info(hdev); 29151db58f86SHuazhong Tan if (ret) 29161db58f86SHuazhong Tan return ret; 29171db58f86SHuazhong Tan 29181db58f86SHuazhong Tan ret = client->ops->init_instance(&hdev->roce); 29191db58f86SHuazhong Tan if (ret) 29201db58f86SHuazhong Tan return ret; 29211db58f86SHuazhong Tan 2922fe735c84SHuazhong Tan set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 29231db58f86SHuazhong Tan hnae3_set_client_init_flag(client, ae_dev, 1); 29241db58f86SHuazhong Tan 29251db58f86SHuazhong Tan return 0; 29261db58f86SHuazhong Tan } 29271db58f86SHuazhong Tan 2928e718a93fSPeng Li static int hclgevf_init_client_instance(struct hnae3_client *client, 2929e718a93fSPeng Li struct hnae3_ae_dev *ae_dev) 2930e2cb1decSSalil Mehta { 2931e718a93fSPeng Li struct hclgevf_dev *hdev = ae_dev->priv; 2932e2cb1decSSalil Mehta int ret; 2933e2cb1decSSalil Mehta 2934e2cb1decSSalil Mehta switch (client->type) { 2935e2cb1decSSalil Mehta case HNAE3_CLIENT_KNIC: 2936e2cb1decSSalil Mehta hdev->nic_client = client; 2937e2cb1decSSalil Mehta hdev->nic.client = client; 2938e2cb1decSSalil Mehta 29391db58f86SHuazhong Tan ret = hclgevf_init_nic_client_instance(ae_dev, client); 2940e2cb1decSSalil Mehta if (ret) 294149dd8054SJian Shen goto clear_nic; 2942e2cb1decSSalil Mehta 29431db58f86SHuazhong Tan ret = hclgevf_init_roce_client_instance(ae_dev, 29441db58f86SHuazhong Tan hdev->roce_client); 2945e2cb1decSSalil Mehta if (ret) 294649dd8054SJian Shen goto clear_roce; 2947d9f28fc2SJian Shen 2948e2cb1decSSalil Mehta break; 2949e2cb1decSSalil Mehta case HNAE3_CLIENT_ROCE: 2950544a7bcdSLijun Ou if (hnae3_dev_roce_supported(hdev)) { 2951e2cb1decSSalil Mehta hdev->roce_client = client; 2952e2cb1decSSalil Mehta hdev->roce.client = client; 2953544a7bcdSLijun Ou } 2954e2cb1decSSalil Mehta 29551db58f86SHuazhong Tan ret = hclgevf_init_roce_client_instance(ae_dev, client); 2956e2cb1decSSalil Mehta if (ret) 295749dd8054SJian Shen goto clear_roce; 2958e2cb1decSSalil Mehta 2959fa7a4bd5SJian Shen break; 2960fa7a4bd5SJian Shen default: 2961fa7a4bd5SJian Shen return -EINVAL; 2962e2cb1decSSalil Mehta } 2963e2cb1decSSalil Mehta 2964e2cb1decSSalil Mehta return 0; 296549dd8054SJian Shen 296649dd8054SJian Shen clear_nic: 296749dd8054SJian Shen hdev->nic_client = NULL; 296849dd8054SJian Shen hdev->nic.client = NULL; 296949dd8054SJian Shen return ret; 297049dd8054SJian Shen clear_roce: 297149dd8054SJian Shen hdev->roce_client = NULL; 297249dd8054SJian Shen hdev->roce.client = NULL; 297349dd8054SJian Shen return ret; 2974e2cb1decSSalil Mehta } 2975e2cb1decSSalil Mehta 2976e718a93fSPeng Li static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2977e718a93fSPeng Li struct hnae3_ae_dev *ae_dev) 2978e2cb1decSSalil Mehta { 2979e718a93fSPeng Li struct hclgevf_dev *hdev = ae_dev->priv; 2980e718a93fSPeng Li 2981e2cb1decSSalil Mehta /* un-init roce, if it exists */ 298249dd8054SJian Shen if (hdev->roce_client) { 2983e140c798SYufeng Mo while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 2984e140c798SYufeng Mo msleep(HCLGEVF_WAIT_RESET_DONE); 2985fe735c84SHuazhong Tan clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2986e140c798SYufeng Mo 2987e2cb1decSSalil Mehta hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 298849dd8054SJian Shen hdev->roce_client = NULL; 298949dd8054SJian Shen hdev->roce.client = NULL; 299049dd8054SJian Shen } 2991e2cb1decSSalil Mehta 2992e2cb1decSSalil Mehta /* un-init nic/unic, if this was not called by roce client */ 299349dd8054SJian Shen if (client->ops->uninit_instance && hdev->nic_client && 299449dd8054SJian Shen client->type != HNAE3_CLIENT_ROCE) { 2995e140c798SYufeng Mo while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 2996e140c798SYufeng Mo msleep(HCLGEVF_WAIT_RESET_DONE); 299725d1817cSHuazhong Tan clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 299825d1817cSHuazhong Tan 2999e2cb1decSSalil Mehta client->ops->uninit_instance(&hdev->nic, 0); 300049dd8054SJian Shen hdev->nic_client = NULL; 300149dd8054SJian Shen hdev->nic.client = NULL; 300249dd8054SJian Shen } 3003e2cb1decSSalil Mehta } 3004e2cb1decSSalil Mehta 300530ae7f8aSHuazhong Tan static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev) 300630ae7f8aSHuazhong Tan { 300730ae7f8aSHuazhong Tan #define HCLGEVF_MEM_BAR 4 300830ae7f8aSHuazhong Tan 300930ae7f8aSHuazhong Tan struct pci_dev *pdev = hdev->pdev; 301030ae7f8aSHuazhong Tan struct hclgevf_hw *hw = &hdev->hw; 301130ae7f8aSHuazhong Tan 301230ae7f8aSHuazhong Tan /* for device does not have device memory, return directly */ 301330ae7f8aSHuazhong Tan if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR))) 301430ae7f8aSHuazhong Tan return 0; 301530ae7f8aSHuazhong Tan 3016076bb537SJie Wang hw->hw.mem_base = 3017076bb537SJie Wang devm_ioremap_wc(&pdev->dev, 3018076bb537SJie Wang pci_resource_start(pdev, HCLGEVF_MEM_BAR), 301930ae7f8aSHuazhong Tan pci_resource_len(pdev, HCLGEVF_MEM_BAR)); 3020076bb537SJie Wang if (!hw->hw.mem_base) { 3021be419fcaSColin Ian King dev_err(&pdev->dev, "failed to map device memory\n"); 302230ae7f8aSHuazhong Tan return -EFAULT; 302330ae7f8aSHuazhong Tan } 302430ae7f8aSHuazhong Tan 302530ae7f8aSHuazhong Tan return 0; 302630ae7f8aSHuazhong Tan } 302730ae7f8aSHuazhong Tan 3028e2cb1decSSalil Mehta static int hclgevf_pci_init(struct hclgevf_dev *hdev) 3029e2cb1decSSalil Mehta { 3030e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 3031e2cb1decSSalil Mehta struct hclgevf_hw *hw; 3032e2cb1decSSalil Mehta int ret; 3033e2cb1decSSalil Mehta 3034e2cb1decSSalil Mehta ret = pci_enable_device(pdev); 3035e2cb1decSSalil Mehta if (ret) { 3036e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed to enable PCI device\n"); 30373e249d3bSFuyun Liang return ret; 3038e2cb1decSSalil Mehta } 3039e2cb1decSSalil Mehta 3040e2cb1decSSalil Mehta ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3041e2cb1decSSalil Mehta if (ret) { 3042e2cb1decSSalil Mehta dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 3043e2cb1decSSalil Mehta goto err_disable_device; 3044e2cb1decSSalil Mehta } 3045e2cb1decSSalil Mehta 3046e2cb1decSSalil Mehta ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 3047e2cb1decSSalil Mehta if (ret) { 3048e2cb1decSSalil Mehta dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 3049e2cb1decSSalil Mehta goto err_disable_device; 3050e2cb1decSSalil Mehta } 3051e2cb1decSSalil Mehta 3052e2cb1decSSalil Mehta pci_set_master(pdev); 3053e2cb1decSSalil Mehta hw = &hdev->hw; 3054076bb537SJie Wang hw->hw.io_base = pci_iomap(pdev, 2, 0); 3055076bb537SJie Wang if (!hw->hw.io_base) { 3056e2cb1decSSalil Mehta dev_err(&pdev->dev, "can't map configuration register space\n"); 3057e2cb1decSSalil Mehta ret = -ENOMEM; 3058e2cb1decSSalil Mehta goto err_clr_master; 3059e2cb1decSSalil Mehta } 3060e2cb1decSSalil Mehta 306130ae7f8aSHuazhong Tan ret = hclgevf_dev_mem_map(hdev); 306230ae7f8aSHuazhong Tan if (ret) 306330ae7f8aSHuazhong Tan goto err_unmap_io_base; 306430ae7f8aSHuazhong Tan 3065e2cb1decSSalil Mehta return 0; 3066e2cb1decSSalil Mehta 306730ae7f8aSHuazhong Tan err_unmap_io_base: 3068076bb537SJie Wang pci_iounmap(pdev, hdev->hw.hw.io_base); 3069e2cb1decSSalil Mehta err_clr_master: 3070e2cb1decSSalil Mehta pci_clear_master(pdev); 3071e2cb1decSSalil Mehta pci_release_regions(pdev); 3072e2cb1decSSalil Mehta err_disable_device: 3073e2cb1decSSalil Mehta pci_disable_device(pdev); 30743e249d3bSFuyun Liang 3075e2cb1decSSalil Mehta return ret; 3076e2cb1decSSalil Mehta } 3077e2cb1decSSalil Mehta 3078e2cb1decSSalil Mehta static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 3079e2cb1decSSalil Mehta { 3080e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 3081e2cb1decSSalil Mehta 3082076bb537SJie Wang if (hdev->hw.hw.mem_base) 3083076bb537SJie Wang devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); 308430ae7f8aSHuazhong Tan 3085076bb537SJie Wang pci_iounmap(pdev, hdev->hw.hw.io_base); 3086e2cb1decSSalil Mehta pci_clear_master(pdev); 3087e2cb1decSSalil Mehta pci_release_regions(pdev); 3088e2cb1decSSalil Mehta pci_disable_device(pdev); 3089e2cb1decSSalil Mehta } 3090e2cb1decSSalil Mehta 309107acf909SJian Shen static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 309207acf909SJian Shen { 309307acf909SJian Shen struct hclgevf_query_res_cmd *req; 30946befad60SJie Wang struct hclge_desc desc; 309507acf909SJian Shen int ret; 309607acf909SJian Shen 309707acf909SJian Shen hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 309807acf909SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 309907acf909SJian Shen if (ret) { 310007acf909SJian Shen dev_err(&hdev->pdev->dev, 310107acf909SJian Shen "query vf resource failed, ret = %d.\n", ret); 310207acf909SJian Shen return ret; 310307acf909SJian Shen } 310407acf909SJian Shen 310507acf909SJian Shen req = (struct hclgevf_query_res_cmd *)desc.data; 310607acf909SJian Shen 3107580a05f9SYonglong Liu if (hnae3_dev_roce_supported(hdev)) { 310807acf909SJian Shen hdev->roce_base_msix_offset = 310960df7e91SHuazhong Tan hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 311007acf909SJian Shen HCLGEVF_MSIX_OFT_ROCEE_M, 311107acf909SJian Shen HCLGEVF_MSIX_OFT_ROCEE_S); 311207acf909SJian Shen hdev->num_roce_msix = 311360df7e91SHuazhong Tan hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 311407acf909SJian Shen HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 311507acf909SJian Shen 3116580a05f9SYonglong Liu /* nic's msix numbers is always equals to the roce's. */ 3117580a05f9SYonglong Liu hdev->num_nic_msix = hdev->num_roce_msix; 3118580a05f9SYonglong Liu 311907acf909SJian Shen /* VF should have NIC vectors and Roce vectors, NIC vectors 312007acf909SJian Shen * are queued before Roce vectors. The offset is fixed to 64. 312107acf909SJian Shen */ 312207acf909SJian Shen hdev->num_msi = hdev->num_roce_msix + 312307acf909SJian Shen hdev->roce_base_msix_offset; 312407acf909SJian Shen } else { 312507acf909SJian Shen hdev->num_msi = 312660df7e91SHuazhong Tan hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 312707acf909SJian Shen HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 3128580a05f9SYonglong Liu 3129580a05f9SYonglong Liu hdev->num_nic_msix = hdev->num_msi; 3130580a05f9SYonglong Liu } 3131580a05f9SYonglong Liu 3132580a05f9SYonglong Liu if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 3133580a05f9SYonglong Liu dev_err(&hdev->pdev->dev, 3134580a05f9SYonglong Liu "Just %u msi resources, not enough for vf(min:2).\n", 3135580a05f9SYonglong Liu hdev->num_nic_msix); 3136580a05f9SYonglong Liu return -EINVAL; 313707acf909SJian Shen } 313807acf909SJian Shen 313907acf909SJian Shen return 0; 314007acf909SJian Shen } 314107acf909SJian Shen 3142af2aedc5SGuangbin Huang static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) 3143af2aedc5SGuangbin Huang { 3144af2aedc5SGuangbin Huang #define HCLGEVF_MAX_NON_TSO_BD_NUM 8U 3145af2aedc5SGuangbin Huang 3146af2aedc5SGuangbin Huang struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3147af2aedc5SGuangbin Huang 3148af2aedc5SGuangbin Huang ae_dev->dev_specs.max_non_tso_bd_num = 3149af2aedc5SGuangbin Huang HCLGEVF_MAX_NON_TSO_BD_NUM; 3150af2aedc5SGuangbin Huang ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 3151af2aedc5SGuangbin Huang ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE; 3152ab16b49cSHuazhong Tan ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 3153e070c8b9SYufeng Mo ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME; 3154af2aedc5SGuangbin Huang } 3155af2aedc5SGuangbin Huang 3156af2aedc5SGuangbin Huang static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, 31576befad60SJie Wang struct hclge_desc *desc) 3158af2aedc5SGuangbin Huang { 3159af2aedc5SGuangbin Huang struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3160af2aedc5SGuangbin Huang struct hclgevf_dev_specs_0_cmd *req0; 3161ab16b49cSHuazhong Tan struct hclgevf_dev_specs_1_cmd *req1; 3162af2aedc5SGuangbin Huang 3163af2aedc5SGuangbin Huang req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data; 3164ab16b49cSHuazhong Tan req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data; 3165af2aedc5SGuangbin Huang 3166af2aedc5SGuangbin Huang ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; 3167af2aedc5SGuangbin Huang ae_dev->dev_specs.rss_ind_tbl_size = 3168af2aedc5SGuangbin Huang le16_to_cpu(req0->rss_ind_tbl_size); 316991bfae25SHuazhong Tan ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); 3170af2aedc5SGuangbin Huang ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); 3171ab16b49cSHuazhong Tan ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); 3172e070c8b9SYufeng Mo ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); 3173af2aedc5SGuangbin Huang } 3174af2aedc5SGuangbin Huang 317513297028SGuangbin Huang static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) 317613297028SGuangbin Huang { 317713297028SGuangbin Huang struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; 317813297028SGuangbin Huang 317913297028SGuangbin Huang if (!dev_specs->max_non_tso_bd_num) 318013297028SGuangbin Huang dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM; 318113297028SGuangbin Huang if (!dev_specs->rss_ind_tbl_size) 318213297028SGuangbin Huang dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 318313297028SGuangbin Huang if (!dev_specs->rss_key_size) 318413297028SGuangbin Huang dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE; 3185ab16b49cSHuazhong Tan if (!dev_specs->max_int_gl) 3186ab16b49cSHuazhong Tan dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 3187e070c8b9SYufeng Mo if (!dev_specs->max_frm_size) 3188e070c8b9SYufeng Mo dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME; 318913297028SGuangbin Huang } 319013297028SGuangbin Huang 3191af2aedc5SGuangbin Huang static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) 3192af2aedc5SGuangbin Huang { 31936befad60SJie Wang struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; 3194af2aedc5SGuangbin Huang int ret; 3195af2aedc5SGuangbin Huang int i; 3196af2aedc5SGuangbin Huang 3197af2aedc5SGuangbin Huang /* set default specifications as devices lower than version V3 do not 3198af2aedc5SGuangbin Huang * support querying specifications from firmware. 3199af2aedc5SGuangbin Huang */ 3200af2aedc5SGuangbin Huang if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 3201af2aedc5SGuangbin Huang hclgevf_set_default_dev_specs(hdev); 3202af2aedc5SGuangbin Huang return 0; 3203af2aedc5SGuangbin Huang } 3204af2aedc5SGuangbin Huang 3205af2aedc5SGuangbin Huang for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 3206af2aedc5SGuangbin Huang hclgevf_cmd_setup_basic_desc(&desc[i], 3207af2aedc5SGuangbin Huang HCLGEVF_OPC_QUERY_DEV_SPECS, true); 3208cb413bfaSJie Wang desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 3209af2aedc5SGuangbin Huang } 3210af2aedc5SGuangbin Huang hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS, 3211af2aedc5SGuangbin Huang true); 3212af2aedc5SGuangbin Huang 3213af2aedc5SGuangbin Huang ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM); 3214af2aedc5SGuangbin Huang if (ret) 3215af2aedc5SGuangbin Huang return ret; 3216af2aedc5SGuangbin Huang 3217af2aedc5SGuangbin Huang hclgevf_parse_dev_specs(hdev, desc); 321813297028SGuangbin Huang hclgevf_check_dev_specs(hdev); 3219af2aedc5SGuangbin Huang 3220af2aedc5SGuangbin Huang return 0; 3221af2aedc5SGuangbin Huang } 3222af2aedc5SGuangbin Huang 3223862d969aSHuazhong Tan static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 3224862d969aSHuazhong Tan { 3225862d969aSHuazhong Tan struct pci_dev *pdev = hdev->pdev; 3226862d969aSHuazhong Tan int ret = 0; 3227862d969aSHuazhong Tan 3228862d969aSHuazhong Tan if (hdev->reset_type == HNAE3_VF_FULL_RESET && 3229862d969aSHuazhong Tan test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3230862d969aSHuazhong Tan hclgevf_misc_irq_uninit(hdev); 3231862d969aSHuazhong Tan hclgevf_uninit_msi(hdev); 3232862d969aSHuazhong Tan clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3233862d969aSHuazhong Tan } 3234862d969aSHuazhong Tan 3235862d969aSHuazhong Tan if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3236862d969aSHuazhong Tan pci_set_master(pdev); 3237862d969aSHuazhong Tan ret = hclgevf_init_msi(hdev); 3238862d969aSHuazhong Tan if (ret) { 3239862d969aSHuazhong Tan dev_err(&pdev->dev, 3240862d969aSHuazhong Tan "failed(%d) to init MSI/MSI-X\n", ret); 3241862d969aSHuazhong Tan return ret; 3242862d969aSHuazhong Tan } 3243862d969aSHuazhong Tan 3244862d969aSHuazhong Tan ret = hclgevf_misc_irq_init(hdev); 3245862d969aSHuazhong Tan if (ret) { 3246862d969aSHuazhong Tan hclgevf_uninit_msi(hdev); 3247862d969aSHuazhong Tan dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 3248862d969aSHuazhong Tan ret); 3249862d969aSHuazhong Tan return ret; 3250862d969aSHuazhong Tan } 3251862d969aSHuazhong Tan 3252862d969aSHuazhong Tan set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3253862d969aSHuazhong Tan } 3254862d969aSHuazhong Tan 3255862d969aSHuazhong Tan return ret; 3256862d969aSHuazhong Tan } 3257862d969aSHuazhong Tan 3258039ba863SJian Shen static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) 3259039ba863SJian Shen { 3260039ba863SJian Shen struct hclge_vf_to_pf_msg send_msg; 3261039ba863SJian Shen 3262039ba863SJian Shen hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, 3263039ba863SJian Shen HCLGE_MBX_VPORT_LIST_CLEAR); 3264039ba863SJian Shen return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3265039ba863SJian Shen } 3266039ba863SJian Shen 326779664077SHuazhong Tan static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev) 326879664077SHuazhong Tan { 326979664077SHuazhong Tan if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 327079664077SHuazhong Tan hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1); 327179664077SHuazhong Tan } 327279664077SHuazhong Tan 327379664077SHuazhong Tan static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev) 327479664077SHuazhong Tan { 327579664077SHuazhong Tan if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 327679664077SHuazhong Tan hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0); 327779664077SHuazhong Tan } 327879664077SHuazhong Tan 32799c6f7085SHuazhong Tan static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 3280e2cb1decSSalil Mehta { 32817a01c897SSalil Mehta struct pci_dev *pdev = hdev->pdev; 3282e2cb1decSSalil Mehta int ret; 3283e2cb1decSSalil Mehta 3284862d969aSHuazhong Tan ret = hclgevf_pci_reset(hdev); 3285862d969aSHuazhong Tan if (ret) { 3286862d969aSHuazhong Tan dev_err(&pdev->dev, "pci reset failed %d\n", ret); 3287862d969aSHuazhong Tan return ret; 3288862d969aSHuazhong Tan } 3289862d969aSHuazhong Tan 3290cb413bfaSJie Wang hclgevf_arq_init(hdev); 3291cb413bfaSJie Wang ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, 3292cb413bfaSJie Wang &hdev->fw_version, false, 3293cb413bfaSJie Wang hdev->reset_pending); 32949c6f7085SHuazhong Tan if (ret) { 32959c6f7085SHuazhong Tan dev_err(&pdev->dev, "cmd failed %d\n", ret); 32969c6f7085SHuazhong Tan return ret; 32977a01c897SSalil Mehta } 3298e2cb1decSSalil Mehta 32999c6f7085SHuazhong Tan ret = hclgevf_rss_init_hw(hdev); 33009c6f7085SHuazhong Tan if (ret) { 33019c6f7085SHuazhong Tan dev_err(&hdev->pdev->dev, 33029c6f7085SHuazhong Tan "failed(%d) to initialize RSS\n", ret); 33039c6f7085SHuazhong Tan return ret; 33049c6f7085SHuazhong Tan } 33059c6f7085SHuazhong Tan 33063462207dSYufeng Mo ret = hclgevf_config_gro(hdev); 3307b26a6feaSPeng Li if (ret) 3308b26a6feaSPeng Li return ret; 3309b26a6feaSPeng Li 33109c6f7085SHuazhong Tan ret = hclgevf_init_vlan_config(hdev); 33119c6f7085SHuazhong Tan if (ret) { 33129c6f7085SHuazhong Tan dev_err(&hdev->pdev->dev, 33139c6f7085SHuazhong Tan "failed(%d) to initialize VLAN config\n", ret); 33149c6f7085SHuazhong Tan return ret; 33159c6f7085SHuazhong Tan } 33169c6f7085SHuazhong Tan 3317c631c696SJian Shen set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 3318c631c696SJian Shen 331979664077SHuazhong Tan hclgevf_init_rxd_adv_layout(hdev); 332079664077SHuazhong Tan 33219c6f7085SHuazhong Tan dev_info(&hdev->pdev->dev, "Reset done\n"); 33229c6f7085SHuazhong Tan 33239c6f7085SHuazhong Tan return 0; 33249c6f7085SHuazhong Tan } 33259c6f7085SHuazhong Tan 33269c6f7085SHuazhong Tan static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 33279c6f7085SHuazhong Tan { 33289c6f7085SHuazhong Tan struct pci_dev *pdev = hdev->pdev; 33299c6f7085SHuazhong Tan int ret; 33309c6f7085SHuazhong Tan 3331e2cb1decSSalil Mehta ret = hclgevf_pci_init(hdev); 333260df7e91SHuazhong Tan if (ret) 3333e2cb1decSSalil Mehta return ret; 3334e2cb1decSSalil Mehta 3335cd624299SYufeng Mo ret = hclgevf_devlink_init(hdev); 3336cd624299SYufeng Mo if (ret) 3337cd624299SYufeng Mo goto err_devlink_init; 3338cd624299SYufeng Mo 3339cb413bfaSJie Wang ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); 334060df7e91SHuazhong Tan if (ret) 33418b0195a3SHuazhong Tan goto err_cmd_queue_init; 33428b0195a3SHuazhong Tan 3343cb413bfaSJie Wang hclgevf_arq_init(hdev); 3344cb413bfaSJie Wang ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, 3345cb413bfaSJie Wang &hdev->fw_version, false, 3346cb413bfaSJie Wang hdev->reset_pending); 3347eddf0462SYunsheng Lin if (ret) 3348eddf0462SYunsheng Lin goto err_cmd_init; 3349eddf0462SYunsheng Lin 335007acf909SJian Shen /* Get vf resource */ 335107acf909SJian Shen ret = hclgevf_query_vf_resource(hdev); 335260df7e91SHuazhong Tan if (ret) 33538b0195a3SHuazhong Tan goto err_cmd_init; 335407acf909SJian Shen 3355af2aedc5SGuangbin Huang ret = hclgevf_query_dev_specs(hdev); 3356af2aedc5SGuangbin Huang if (ret) { 3357af2aedc5SGuangbin Huang dev_err(&pdev->dev, 3358af2aedc5SGuangbin Huang "failed to query dev specifications, ret = %d\n", ret); 3359af2aedc5SGuangbin Huang goto err_cmd_init; 3360af2aedc5SGuangbin Huang } 3361af2aedc5SGuangbin Huang 336207acf909SJian Shen ret = hclgevf_init_msi(hdev); 336307acf909SJian Shen if (ret) { 336407acf909SJian Shen dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 33658b0195a3SHuazhong Tan goto err_cmd_init; 336607acf909SJian Shen } 336707acf909SJian Shen 336807acf909SJian Shen hclgevf_state_init(hdev); 3369dea846e8SHuazhong Tan hdev->reset_level = HNAE3_VF_FUNC_RESET; 3370afb6afdbSHuazhong Tan hdev->reset_type = HNAE3_NONE_RESET; 337107acf909SJian Shen 3372e2cb1decSSalil Mehta ret = hclgevf_misc_irq_init(hdev); 337360df7e91SHuazhong Tan if (ret) 3374e2cb1decSSalil Mehta goto err_misc_irq_init; 3375e2cb1decSSalil Mehta 3376862d969aSHuazhong Tan set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3377862d969aSHuazhong Tan 3378e2cb1decSSalil Mehta ret = hclgevf_configure(hdev); 3379e2cb1decSSalil Mehta if (ret) { 3380e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 3381e2cb1decSSalil Mehta goto err_config; 3382e2cb1decSSalil Mehta } 3383e2cb1decSSalil Mehta 3384e2cb1decSSalil Mehta ret = hclgevf_alloc_tqps(hdev); 3385e2cb1decSSalil Mehta if (ret) { 3386e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 3387e2cb1decSSalil Mehta goto err_config; 3388e2cb1decSSalil Mehta } 3389e2cb1decSSalil Mehta 3390e2cb1decSSalil Mehta ret = hclgevf_set_handle_info(hdev); 339160df7e91SHuazhong Tan if (ret) 3392e2cb1decSSalil Mehta goto err_config; 3393e2cb1decSSalil Mehta 33943462207dSYufeng Mo ret = hclgevf_config_gro(hdev); 3395b26a6feaSPeng Li if (ret) 3396b26a6feaSPeng Li goto err_config; 3397b26a6feaSPeng Li 3398e2cb1decSSalil Mehta /* Initialize RSS for this VF */ 339987ce161eSGuangbin Huang ret = hclgevf_rss_init_cfg(hdev); 340087ce161eSGuangbin Huang if (ret) { 340187ce161eSGuangbin Huang dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); 340287ce161eSGuangbin Huang goto err_config; 340387ce161eSGuangbin Huang } 340487ce161eSGuangbin Huang 3405e2cb1decSSalil Mehta ret = hclgevf_rss_init_hw(hdev); 3406e2cb1decSSalil Mehta if (ret) { 3407e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 3408e2cb1decSSalil Mehta "failed(%d) to initialize RSS\n", ret); 3409e2cb1decSSalil Mehta goto err_config; 3410e2cb1decSSalil Mehta } 3411e2cb1decSSalil Mehta 3412039ba863SJian Shen /* ensure vf tbl list as empty before init*/ 3413039ba863SJian Shen ret = hclgevf_clear_vport_list(hdev); 3414039ba863SJian Shen if (ret) { 3415039ba863SJian Shen dev_err(&pdev->dev, 3416039ba863SJian Shen "failed to clear tbl list configuration, ret = %d.\n", 3417039ba863SJian Shen ret); 3418039ba863SJian Shen goto err_config; 3419039ba863SJian Shen } 3420039ba863SJian Shen 3421e2cb1decSSalil Mehta ret = hclgevf_init_vlan_config(hdev); 3422e2cb1decSSalil Mehta if (ret) { 3423e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 3424e2cb1decSSalil Mehta "failed(%d) to initialize VLAN config\n", ret); 3425e2cb1decSSalil Mehta goto err_config; 3426e2cb1decSSalil Mehta } 3427e2cb1decSSalil Mehta 342879664077SHuazhong Tan hclgevf_init_rxd_adv_layout(hdev); 342979664077SHuazhong Tan 34300251d196SGuangbin Huang set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state); 34310251d196SGuangbin Huang 34320742ed7cSHuazhong Tan hdev->last_reset_time = jiffies; 343308d80a4cSHuazhong Tan dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 343408d80a4cSHuazhong Tan HCLGEVF_DRIVER_NAME); 3435e2cb1decSSalil Mehta 3436ff200099SYunsheng Lin hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 3437ff200099SYunsheng Lin 3438e2cb1decSSalil Mehta return 0; 3439e2cb1decSSalil Mehta 3440e2cb1decSSalil Mehta err_config: 3441e2cb1decSSalil Mehta hclgevf_misc_irq_uninit(hdev); 3442e2cb1decSSalil Mehta err_misc_irq_init: 3443e2cb1decSSalil Mehta hclgevf_state_uninit(hdev); 3444e2cb1decSSalil Mehta hclgevf_uninit_msi(hdev); 344507acf909SJian Shen err_cmd_init: 34469970308fSJie Wang hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 34478b0195a3SHuazhong Tan err_cmd_queue_init: 3448cd624299SYufeng Mo hclgevf_devlink_uninit(hdev); 3449cd624299SYufeng Mo err_devlink_init: 3450e2cb1decSSalil Mehta hclgevf_pci_uninit(hdev); 3451862d969aSHuazhong Tan clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3452e2cb1decSSalil Mehta return ret; 3453e2cb1decSSalil Mehta } 3454e2cb1decSSalil Mehta 34557a01c897SSalil Mehta static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 3456e2cb1decSSalil Mehta { 3457d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 3458d3410018SYufeng Mo 3459e2cb1decSSalil Mehta hclgevf_state_uninit(hdev); 346079664077SHuazhong Tan hclgevf_uninit_rxd_adv_layout(hdev); 3461862d969aSHuazhong Tan 3462d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 3463d3410018SYufeng Mo hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 346423b4201dSJian Shen 3465862d969aSHuazhong Tan if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3466eddf0462SYunsheng Lin hclgevf_misc_irq_uninit(hdev); 3467e2cb1decSSalil Mehta hclgevf_uninit_msi(hdev); 34687a01c897SSalil Mehta } 34697a01c897SSalil Mehta 34709970308fSJie Wang hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 3471cd624299SYufeng Mo hclgevf_devlink_uninit(hdev); 3472e3364c5fSZenghui Yu hclgevf_pci_uninit(hdev); 3473ee4bcd3bSJian Shen hclgevf_uninit_mac_list(hdev); 3474862d969aSHuazhong Tan } 3475862d969aSHuazhong Tan 34767a01c897SSalil Mehta static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 34777a01c897SSalil Mehta { 34787a01c897SSalil Mehta struct pci_dev *pdev = ae_dev->pdev; 34797a01c897SSalil Mehta int ret; 34807a01c897SSalil Mehta 34817a01c897SSalil Mehta ret = hclgevf_alloc_hdev(ae_dev); 34827a01c897SSalil Mehta if (ret) { 34837a01c897SSalil Mehta dev_err(&pdev->dev, "hclge device allocation failed\n"); 34847a01c897SSalil Mehta return ret; 34857a01c897SSalil Mehta } 34867a01c897SSalil Mehta 34877a01c897SSalil Mehta ret = hclgevf_init_hdev(ae_dev->priv); 3488a6d818e3SYunsheng Lin if (ret) { 34897a01c897SSalil Mehta dev_err(&pdev->dev, "hclge device initialization failed\n"); 34907a01c897SSalil Mehta return ret; 34917a01c897SSalil Mehta } 34927a01c897SSalil Mehta 3493a6d818e3SYunsheng Lin return 0; 3494a6d818e3SYunsheng Lin } 3495a6d818e3SYunsheng Lin 34967a01c897SSalil Mehta static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 34977a01c897SSalil Mehta { 34987a01c897SSalil Mehta struct hclgevf_dev *hdev = ae_dev->priv; 34997a01c897SSalil Mehta 35007a01c897SSalil Mehta hclgevf_uninit_hdev(hdev); 3501e2cb1decSSalil Mehta ae_dev->priv = NULL; 3502e2cb1decSSalil Mehta } 3503e2cb1decSSalil Mehta 3504849e4607SPeng Li static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3505849e4607SPeng Li { 3506849e4607SPeng Li struct hnae3_handle *nic = &hdev->nic; 3507849e4607SPeng Li struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3508849e4607SPeng Li 35098be73621SHuazhong Tan return min_t(u32, hdev->rss_size_max, 351035244430SJian Shen hdev->num_tqps / kinfo->tc_info.num_tc); 3511849e4607SPeng Li } 3512849e4607SPeng Li 3513849e4607SPeng Li /** 3514849e4607SPeng Li * hclgevf_get_channels - Get the current channels enabled and max supported. 3515849e4607SPeng Li * @handle: hardware information for network interface 3516849e4607SPeng Li * @ch: ethtool channels structure 3517849e4607SPeng Li * 3518849e4607SPeng Li * We don't support separate tx and rx queues as channels. The other count 3519849e4607SPeng Li * represents how many queues are being used for control. max_combined counts 3520849e4607SPeng Li * how many queue pairs we can support. They may not be mapped 1 to 1 with 3521849e4607SPeng Li * q_vectors since we support a lot more queue pairs than q_vectors. 3522849e4607SPeng Li **/ 3523849e4607SPeng Li static void hclgevf_get_channels(struct hnae3_handle *handle, 3524849e4607SPeng Li struct ethtool_channels *ch) 3525849e4607SPeng Li { 3526849e4607SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3527849e4607SPeng Li 3528849e4607SPeng Li ch->max_combined = hclgevf_get_max_channels(hdev); 3529849e4607SPeng Li ch->other_count = 0; 3530849e4607SPeng Li ch->max_other = 0; 35318be73621SHuazhong Tan ch->combined_count = handle->kinfo.rss_size; 3532849e4607SPeng Li } 3533849e4607SPeng Li 3534cc719218SPeng Li static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 35350d43bf45SHuazhong Tan u16 *alloc_tqps, u16 *max_rss_size) 3536cc719218SPeng Li { 3537cc719218SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3538cc719218SPeng Li 35390d43bf45SHuazhong Tan *alloc_tqps = hdev->num_tqps; 3540cc719218SPeng Li *max_rss_size = hdev->rss_size_max; 3541cc719218SPeng Li } 3542cc719218SPeng Li 35434093d1a2SGuangbin Huang static void hclgevf_update_rss_size(struct hnae3_handle *handle, 35444093d1a2SGuangbin Huang u32 new_tqps_num) 35454093d1a2SGuangbin Huang { 35464093d1a2SGuangbin Huang struct hnae3_knic_private_info *kinfo = &handle->kinfo; 35474093d1a2SGuangbin Huang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 35484093d1a2SGuangbin Huang u16 max_rss_size; 35494093d1a2SGuangbin Huang 35504093d1a2SGuangbin Huang kinfo->req_rss_size = new_tqps_num; 35514093d1a2SGuangbin Huang 35524093d1a2SGuangbin Huang max_rss_size = min_t(u16, hdev->rss_size_max, 355335244430SJian Shen hdev->num_tqps / kinfo->tc_info.num_tc); 35544093d1a2SGuangbin Huang 35554093d1a2SGuangbin Huang /* Use the user's configuration when it is not larger than 35564093d1a2SGuangbin Huang * max_rss_size, otherwise, use the maximum specification value. 35574093d1a2SGuangbin Huang */ 35584093d1a2SGuangbin Huang if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 35594093d1a2SGuangbin Huang kinfo->req_rss_size <= max_rss_size) 35604093d1a2SGuangbin Huang kinfo->rss_size = kinfo->req_rss_size; 35614093d1a2SGuangbin Huang else if (kinfo->rss_size > max_rss_size || 35624093d1a2SGuangbin Huang (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 35634093d1a2SGuangbin Huang kinfo->rss_size = max_rss_size; 35644093d1a2SGuangbin Huang 356535244430SJian Shen kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size; 35664093d1a2SGuangbin Huang } 35674093d1a2SGuangbin Huang 35684093d1a2SGuangbin Huang static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 35694093d1a2SGuangbin Huang bool rxfh_configured) 35704093d1a2SGuangbin Huang { 35714093d1a2SGuangbin Huang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 35724093d1a2SGuangbin Huang struct hnae3_knic_private_info *kinfo = &handle->kinfo; 35734093d1a2SGuangbin Huang u16 cur_rss_size = kinfo->rss_size; 35744093d1a2SGuangbin Huang u16 cur_tqps = kinfo->num_tqps; 35754093d1a2SGuangbin Huang u32 *rss_indir; 35764093d1a2SGuangbin Huang unsigned int i; 35774093d1a2SGuangbin Huang int ret; 35784093d1a2SGuangbin Huang 35794093d1a2SGuangbin Huang hclgevf_update_rss_size(handle, new_tqps_num); 35804093d1a2SGuangbin Huang 35814093d1a2SGuangbin Huang ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); 35824093d1a2SGuangbin Huang if (ret) 35834093d1a2SGuangbin Huang return ret; 35844093d1a2SGuangbin Huang 3585cd7e963dSSalil Mehta /* RSS indirection table has been configured by user */ 35864093d1a2SGuangbin Huang if (rxfh_configured) 35874093d1a2SGuangbin Huang goto out; 35884093d1a2SGuangbin Huang 35894093d1a2SGuangbin Huang /* Reinitializes the rss indirect table according to the new RSS size */ 359087ce161eSGuangbin Huang rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size, 359187ce161eSGuangbin Huang sizeof(u32), GFP_KERNEL); 35924093d1a2SGuangbin Huang if (!rss_indir) 35934093d1a2SGuangbin Huang return -ENOMEM; 35944093d1a2SGuangbin Huang 359587ce161eSGuangbin Huang for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 35964093d1a2SGuangbin Huang rss_indir[i] = i % kinfo->rss_size; 35974093d1a2SGuangbin Huang 3598944de484SGuojia Liao hdev->rss_cfg.rss_size = kinfo->rss_size; 3599944de484SGuojia Liao 36004093d1a2SGuangbin Huang ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 36014093d1a2SGuangbin Huang if (ret) 36024093d1a2SGuangbin Huang dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 36034093d1a2SGuangbin Huang ret); 36044093d1a2SGuangbin Huang 36054093d1a2SGuangbin Huang kfree(rss_indir); 36064093d1a2SGuangbin Huang 36074093d1a2SGuangbin Huang out: 36084093d1a2SGuangbin Huang if (!ret) 36094093d1a2SGuangbin Huang dev_info(&hdev->pdev->dev, 36104093d1a2SGuangbin Huang "Channels changed, rss_size from %u to %u, tqps from %u to %u", 36114093d1a2SGuangbin Huang cur_rss_size, kinfo->rss_size, 361235244430SJian Shen cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); 36134093d1a2SGuangbin Huang 36144093d1a2SGuangbin Huang return ret; 36154093d1a2SGuangbin Huang } 36164093d1a2SGuangbin Huang 3617175ec96bSFuyun Liang static int hclgevf_get_status(struct hnae3_handle *handle) 3618175ec96bSFuyun Liang { 3619175ec96bSFuyun Liang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3620175ec96bSFuyun Liang 3621175ec96bSFuyun Liang return hdev->hw.mac.link; 3622175ec96bSFuyun Liang } 3623175ec96bSFuyun Liang 36244a152de9SFuyun Liang static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 36254a152de9SFuyun Liang u8 *auto_neg, u32 *speed, 36264a152de9SFuyun Liang u8 *duplex) 36274a152de9SFuyun Liang { 36284a152de9SFuyun Liang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 36294a152de9SFuyun Liang 36304a152de9SFuyun Liang if (speed) 36314a152de9SFuyun Liang *speed = hdev->hw.mac.speed; 36324a152de9SFuyun Liang if (duplex) 36334a152de9SFuyun Liang *duplex = hdev->hw.mac.duplex; 36344a152de9SFuyun Liang if (auto_neg) 36354a152de9SFuyun Liang *auto_neg = AUTONEG_DISABLE; 36364a152de9SFuyun Liang } 36374a152de9SFuyun Liang 36384a152de9SFuyun Liang void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 36394a152de9SFuyun Liang u8 duplex) 36404a152de9SFuyun Liang { 36414a152de9SFuyun Liang hdev->hw.mac.speed = speed; 36424a152de9SFuyun Liang hdev->hw.mac.duplex = duplex; 36434a152de9SFuyun Liang } 36444a152de9SFuyun Liang 36451731be4cSYonglong Liu static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 36465c9f6b39SPeng Li { 36475c9f6b39SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 36483462207dSYufeng Mo bool gro_en_old = hdev->gro_en; 36493462207dSYufeng Mo int ret; 36505c9f6b39SPeng Li 36513462207dSYufeng Mo hdev->gro_en = enable; 36523462207dSYufeng Mo ret = hclgevf_config_gro(hdev); 36533462207dSYufeng Mo if (ret) 36543462207dSYufeng Mo hdev->gro_en = gro_en_old; 36553462207dSYufeng Mo 36563462207dSYufeng Mo return ret; 36575c9f6b39SPeng Li } 36585c9f6b39SPeng Li 365988d10bd6SJian Shen static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 366088d10bd6SJian Shen u8 *module_type) 3661c136b884SPeng Li { 3662c136b884SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 366388d10bd6SJian Shen 3664c136b884SPeng Li if (media_type) 3665c136b884SPeng Li *media_type = hdev->hw.mac.media_type; 366688d10bd6SJian Shen 366788d10bd6SJian Shen if (module_type) 366888d10bd6SJian Shen *module_type = hdev->hw.mac.module_type; 3669c136b884SPeng Li } 3670c136b884SPeng Li 36714d60291bSHuazhong Tan static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 36724d60291bSHuazhong Tan { 36734d60291bSHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 36744d60291bSHuazhong Tan 3675aa5c4f17SHuazhong Tan return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 36764d60291bSHuazhong Tan } 36774d60291bSHuazhong Tan 3678fe735c84SHuazhong Tan static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle) 3679fe735c84SHuazhong Tan { 3680fe735c84SHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3681fe735c84SHuazhong Tan 3682076bb537SJie Wang return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 3683fe735c84SHuazhong Tan } 3684fe735c84SHuazhong Tan 36854d60291bSHuazhong Tan static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 36864d60291bSHuazhong Tan { 36874d60291bSHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 36884d60291bSHuazhong Tan 36894d60291bSHuazhong Tan return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 36904d60291bSHuazhong Tan } 36914d60291bSHuazhong Tan 36924d60291bSHuazhong Tan static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 36934d60291bSHuazhong Tan { 36944d60291bSHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 36954d60291bSHuazhong Tan 3696c88a6e7dSHuazhong Tan return hdev->rst_stats.hw_rst_done_cnt; 36974d60291bSHuazhong Tan } 36984d60291bSHuazhong Tan 36999194d18bSliuzhongzhu static void hclgevf_get_link_mode(struct hnae3_handle *handle, 37009194d18bSliuzhongzhu unsigned long *supported, 37019194d18bSliuzhongzhu unsigned long *advertising) 37029194d18bSliuzhongzhu { 37039194d18bSliuzhongzhu struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 37049194d18bSliuzhongzhu 37059194d18bSliuzhongzhu *supported = hdev->hw.mac.supported; 37069194d18bSliuzhongzhu *advertising = hdev->hw.mac.advertising; 37079194d18bSliuzhongzhu } 37089194d18bSliuzhongzhu 37091600c3e5SJian Shen #define MAX_SEPARATE_NUM 4 3710e407efddSHuazhong Tan #define SEPARATOR_VALUE 0xFDFCFBFA 37111600c3e5SJian Shen #define REG_NUM_PER_LINE 4 37121600c3e5SJian Shen #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 37131600c3e5SJian Shen 37141600c3e5SJian Shen static int hclgevf_get_regs_len(struct hnae3_handle *handle) 37151600c3e5SJian Shen { 37161600c3e5SJian Shen int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 37171600c3e5SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 37181600c3e5SJian Shen 37191600c3e5SJian Shen cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 37201600c3e5SJian Shen common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 37211600c3e5SJian Shen ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 37221600c3e5SJian Shen tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 37231600c3e5SJian Shen 37241600c3e5SJian Shen return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 37251600c3e5SJian Shen tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 37261600c3e5SJian Shen } 37271600c3e5SJian Shen 37281600c3e5SJian Shen static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 37291600c3e5SJian Shen void *data) 37301600c3e5SJian Shen { 37311600c3e5SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 37321600c3e5SJian Shen int i, j, reg_um, separator_num; 37331600c3e5SJian Shen u32 *reg = data; 37341600c3e5SJian Shen 37351600c3e5SJian Shen *version = hdev->fw_version; 37361600c3e5SJian Shen 37371600c3e5SJian Shen /* fetching per-VF registers values from VF PCIe register space */ 37381600c3e5SJian Shen reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 37391600c3e5SJian Shen separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 37401600c3e5SJian Shen for (i = 0; i < reg_um; i++) 37411600c3e5SJian Shen *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 37421600c3e5SJian Shen for (i = 0; i < separator_num; i++) 37431600c3e5SJian Shen *reg++ = SEPARATOR_VALUE; 37441600c3e5SJian Shen 37451600c3e5SJian Shen reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 37461600c3e5SJian Shen separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 37471600c3e5SJian Shen for (i = 0; i < reg_um; i++) 37481600c3e5SJian Shen *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 37491600c3e5SJian Shen for (i = 0; i < separator_num; i++) 37501600c3e5SJian Shen *reg++ = SEPARATOR_VALUE; 37511600c3e5SJian Shen 37521600c3e5SJian Shen reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 37531600c3e5SJian Shen separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 37541600c3e5SJian Shen for (j = 0; j < hdev->num_tqps; j++) { 37551600c3e5SJian Shen for (i = 0; i < reg_um; i++) 37561600c3e5SJian Shen *reg++ = hclgevf_read_dev(&hdev->hw, 37571600c3e5SJian Shen ring_reg_addr_list[i] + 37581600c3e5SJian Shen 0x200 * j); 37591600c3e5SJian Shen for (i = 0; i < separator_num; i++) 37601600c3e5SJian Shen *reg++ = SEPARATOR_VALUE; 37611600c3e5SJian Shen } 37621600c3e5SJian Shen 37631600c3e5SJian Shen reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 37641600c3e5SJian Shen separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 37651600c3e5SJian Shen for (j = 0; j < hdev->num_msi_used - 1; j++) { 37661600c3e5SJian Shen for (i = 0; i < reg_um; i++) 37671600c3e5SJian Shen *reg++ = hclgevf_read_dev(&hdev->hw, 37681600c3e5SJian Shen tqp_intr_reg_addr_list[i] + 37691600c3e5SJian Shen 4 * j); 37701600c3e5SJian Shen for (i = 0; i < separator_num; i++) 37711600c3e5SJian Shen *reg++ = SEPARATOR_VALUE; 37721600c3e5SJian Shen } 37731600c3e5SJian Shen } 37741600c3e5SJian Shen 377592f11ea1SJian Shen void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 377692f11ea1SJian Shen u8 *port_base_vlan_info, u8 data_size) 377792f11ea1SJian Shen { 377892f11ea1SJian Shen struct hnae3_handle *nic = &hdev->nic; 3779d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 3780a6f7bfdcSJian Shen int ret; 378192f11ea1SJian Shen 378292f11ea1SJian Shen rtnl_lock(); 3783a6f7bfdcSJian Shen 3784b7b5d25bSGuojia Liao if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 3785b7b5d25bSGuojia Liao test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { 3786a6f7bfdcSJian Shen dev_warn(&hdev->pdev->dev, 3787a6f7bfdcSJian Shen "is resetting when updating port based vlan info\n"); 378892f11ea1SJian Shen rtnl_unlock(); 3789a6f7bfdcSJian Shen return; 3790a6f7bfdcSJian Shen } 3791a6f7bfdcSJian Shen 3792a6f7bfdcSJian Shen ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3793a6f7bfdcSJian Shen if (ret) { 3794a6f7bfdcSJian Shen rtnl_unlock(); 3795a6f7bfdcSJian Shen return; 3796a6f7bfdcSJian Shen } 379792f11ea1SJian Shen 379892f11ea1SJian Shen /* send msg to PF and wait update port based vlan info */ 3799d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3800d3410018SYufeng Mo HCLGE_MBX_PORT_BASE_VLAN_CFG); 3801d3410018SYufeng Mo memcpy(send_msg.data, port_base_vlan_info, data_size); 3802a6f7bfdcSJian Shen ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3803a6f7bfdcSJian Shen if (!ret) { 380492f11ea1SJian Shen if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3805a6f7bfdcSJian Shen nic->port_base_vlan_state = state; 380692f11ea1SJian Shen else 380792f11ea1SJian Shen nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3808a6f7bfdcSJian Shen } 380992f11ea1SJian Shen 381092f11ea1SJian Shen hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 381192f11ea1SJian Shen rtnl_unlock(); 381292f11ea1SJian Shen } 381392f11ea1SJian Shen 3814e2cb1decSSalil Mehta static const struct hnae3_ae_ops hclgevf_ops = { 3815e2cb1decSSalil Mehta .init_ae_dev = hclgevf_init_ae_dev, 3816e2cb1decSSalil Mehta .uninit_ae_dev = hclgevf_uninit_ae_dev, 3817bb1890d5SJiaran Zhang .reset_prepare = hclgevf_reset_prepare_general, 3818bb1890d5SJiaran Zhang .reset_done = hclgevf_reset_done, 3819e718a93fSPeng Li .init_client_instance = hclgevf_init_client_instance, 3820e718a93fSPeng Li .uninit_client_instance = hclgevf_uninit_client_instance, 3821e2cb1decSSalil Mehta .start = hclgevf_ae_start, 3822e2cb1decSSalil Mehta .stop = hclgevf_ae_stop, 3823a6d818e3SYunsheng Lin .client_start = hclgevf_client_start, 3824a6d818e3SYunsheng Lin .client_stop = hclgevf_client_stop, 3825e2cb1decSSalil Mehta .map_ring_to_vector = hclgevf_map_ring_to_vector, 3826e2cb1decSSalil Mehta .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3827e2cb1decSSalil Mehta .get_vector = hclgevf_get_vector, 38280d3e6631SYunsheng Lin .put_vector = hclgevf_put_vector, 3829e2cb1decSSalil Mehta .reset_queue = hclgevf_reset_tqp, 3830e2cb1decSSalil Mehta .get_mac_addr = hclgevf_get_mac_addr, 3831e2cb1decSSalil Mehta .set_mac_addr = hclgevf_set_mac_addr, 3832e2cb1decSSalil Mehta .add_uc_addr = hclgevf_add_uc_addr, 3833e2cb1decSSalil Mehta .rm_uc_addr = hclgevf_rm_uc_addr, 3834e2cb1decSSalil Mehta .add_mc_addr = hclgevf_add_mc_addr, 3835e2cb1decSSalil Mehta .rm_mc_addr = hclgevf_rm_mc_addr, 3836e2cb1decSSalil Mehta .get_stats = hclgevf_get_stats, 3837e2cb1decSSalil Mehta .update_stats = hclgevf_update_stats, 3838e2cb1decSSalil Mehta .get_strings = hclgevf_get_strings, 3839e2cb1decSSalil Mehta .get_sset_count = hclgevf_get_sset_count, 3840*027733b1SJie Wang .get_rss_key_size = hclge_comm_get_rss_key_size, 3841e2cb1decSSalil Mehta .get_rss = hclgevf_get_rss, 3842e2cb1decSSalil Mehta .set_rss = hclgevf_set_rss, 3843d97b3072SJian Shen .get_rss_tuple = hclgevf_get_rss_tuple, 3844d97b3072SJian Shen .set_rss_tuple = hclgevf_set_rss_tuple, 3845e2cb1decSSalil Mehta .get_tc_size = hclgevf_get_tc_size, 3846e2cb1decSSalil Mehta .get_fw_version = hclgevf_get_fw_version, 3847e2cb1decSSalil Mehta .set_vlan_filter = hclgevf_set_vlan_filter, 3848fa6a262aSJian Shen .enable_vlan_filter = hclgevf_enable_vlan_filter, 3849b2641e2aSYunsheng Lin .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 38506d4c3981SSalil Mehta .reset_event = hclgevf_reset_event, 3851720bd583SHuazhong Tan .set_default_reset_request = hclgevf_set_def_reset_request, 38524093d1a2SGuangbin Huang .set_channels = hclgevf_set_channels, 3853849e4607SPeng Li .get_channels = hclgevf_get_channels, 3854cc719218SPeng Li .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 38551600c3e5SJian Shen .get_regs_len = hclgevf_get_regs_len, 38561600c3e5SJian Shen .get_regs = hclgevf_get_regs, 3857175ec96bSFuyun Liang .get_status = hclgevf_get_status, 38584a152de9SFuyun Liang .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3859c136b884SPeng Li .get_media_type = hclgevf_get_media_type, 38604d60291bSHuazhong Tan .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 38614d60291bSHuazhong Tan .ae_dev_resetting = hclgevf_ae_dev_resetting, 38624d60291bSHuazhong Tan .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 38635c9f6b39SPeng Li .set_gro_en = hclgevf_gro_en, 3864818f1675SYunsheng Lin .set_mtu = hclgevf_set_mtu, 38650c29d191Sliuzhongzhu .get_global_queue_id = hclgevf_get_qid_global, 38668cdb992fSJian Shen .set_timer_task = hclgevf_set_timer_task, 38679194d18bSliuzhongzhu .get_link_mode = hclgevf_get_link_mode, 3868e196ec75SJian Shen .set_promisc_mode = hclgevf_set_promisc_mode, 3869c631c696SJian Shen .request_update_promisc_mode = hclgevf_request_update_promisc_mode, 3870fe735c84SHuazhong Tan .get_cmdq_stat = hclgevf_get_cmdq_stat, 3871e2cb1decSSalil Mehta }; 3872e2cb1decSSalil Mehta 3873e2cb1decSSalil Mehta static struct hnae3_ae_algo ae_algovf = { 3874e2cb1decSSalil Mehta .ops = &hclgevf_ops, 3875e2cb1decSSalil Mehta .pdev_id_table = ae_algovf_pci_tbl, 3876e2cb1decSSalil Mehta }; 3877e2cb1decSSalil Mehta 3878e2cb1decSSalil Mehta static int hclgevf_init(void) 3879e2cb1decSSalil Mehta { 3880e2cb1decSSalil Mehta pr_info("%s is initializing\n", HCLGEVF_NAME); 3881e2cb1decSSalil Mehta 3882f29da408SYufeng Mo hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME); 38830ea68902SYunsheng Lin if (!hclgevf_wq) { 38840ea68902SYunsheng Lin pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 38850ea68902SYunsheng Lin return -ENOMEM; 38860ea68902SYunsheng Lin } 38870ea68902SYunsheng Lin 3888854cf33aSFuyun Liang hnae3_register_ae_algo(&ae_algovf); 3889854cf33aSFuyun Liang 3890854cf33aSFuyun Liang return 0; 3891e2cb1decSSalil Mehta } 3892e2cb1decSSalil Mehta 3893e2cb1decSSalil Mehta static void hclgevf_exit(void) 3894e2cb1decSSalil Mehta { 3895e2cb1decSSalil Mehta hnae3_unregister_ae_algo(&ae_algovf); 38960ea68902SYunsheng Lin destroy_workqueue(hclgevf_wq); 3897e2cb1decSSalil Mehta } 3898e2cb1decSSalil Mehta module_init(hclgevf_init); 3899e2cb1decSSalil Mehta module_exit(hclgevf_exit); 3900e2cb1decSSalil Mehta 3901e2cb1decSSalil Mehta MODULE_LICENSE("GPL"); 3902e2cb1decSSalil Mehta MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3903e2cb1decSSalil Mehta MODULE_DESCRIPTION("HCLGEVF Driver"); 3904e2cb1decSSalil Mehta MODULE_VERSION(HCLGEVF_MOD_VERSION); 3905