1e2cb1decSSalil Mehta // SPDX-License-Identifier: GPL-2.0+ 2e2cb1decSSalil Mehta // Copyright (c) 2016-2017 Hisilicon Limited. 3e2cb1decSSalil Mehta 4e2cb1decSSalil Mehta #include <linux/etherdevice.h> 56988eb2aSSalil Mehta #include <net/rtnetlink.h> 6e2cb1decSSalil Mehta #include "hclgevf_cmd.h" 7e2cb1decSSalil Mehta #include "hclgevf_main.h" 8e2cb1decSSalil Mehta #include "hclge_mbx.h" 9e2cb1decSSalil Mehta #include "hnae3.h" 10e2cb1decSSalil Mehta 11e2cb1decSSalil Mehta #define HCLGEVF_NAME "hclgevf" 12e2cb1decSSalil Mehta 137a01c897SSalil Mehta static int hclgevf_init_hdev(struct hclgevf_dev *hdev); 147a01c897SSalil Mehta static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev); 15e2cb1decSSalil Mehta static struct hnae3_ae_algo ae_algovf; 16e2cb1decSSalil Mehta 17e2cb1decSSalil Mehta static const struct pci_device_id ae_algovf_pci_tbl[] = { 18e2cb1decSSalil Mehta {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19e2cb1decSSalil Mehta {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20e2cb1decSSalil Mehta /* required last entry */ 21e2cb1decSSalil Mehta {0, } 22e2cb1decSSalil Mehta }; 23e2cb1decSSalil Mehta 242f550a46SYunsheng Lin MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 252f550a46SYunsheng Lin 26e2cb1decSSalil Mehta static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 27e2cb1decSSalil Mehta struct hnae3_handle *handle) 28e2cb1decSSalil Mehta { 29e2cb1decSSalil Mehta return container_of(handle, struct hclgevf_dev, nic); 30e2cb1decSSalil Mehta } 31e2cb1decSSalil Mehta 32e2cb1decSSalil Mehta static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 33e2cb1decSSalil Mehta { 34e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 35e2cb1decSSalil Mehta struct hnae3_queue *queue; 36e2cb1decSSalil Mehta struct hclgevf_desc desc; 37e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 38e2cb1decSSalil Mehta int status; 39e2cb1decSSalil Mehta int i; 40e2cb1decSSalil Mehta 41e2cb1decSSalil Mehta for (i = 0; i < hdev->num_tqps; i++) { 42e2cb1decSSalil Mehta queue = handle->kinfo.tqp[i]; 43e2cb1decSSalil Mehta tqp = container_of(queue, struct hclgevf_tqp, q); 44e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, 45e2cb1decSSalil Mehta HCLGEVF_OPC_QUERY_RX_STATUS, 46e2cb1decSSalil Mehta true); 47e2cb1decSSalil Mehta 48e2cb1decSSalil Mehta desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 49e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 50e2cb1decSSalil Mehta if (status) { 51e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 52e2cb1decSSalil Mehta "Query tqp stat fail, status = %d,queue = %d\n", 53e2cb1decSSalil Mehta status, i); 54e2cb1decSSalil Mehta return status; 55e2cb1decSSalil Mehta } 56e2cb1decSSalil Mehta tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 57cf72fa63SJian Shen le32_to_cpu(desc.data[1]); 58e2cb1decSSalil Mehta 59e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 60e2cb1decSSalil Mehta true); 61e2cb1decSSalil Mehta 62e2cb1decSSalil Mehta desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 63e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 64e2cb1decSSalil Mehta if (status) { 65e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 66e2cb1decSSalil Mehta "Query tqp stat fail, status = %d,queue = %d\n", 67e2cb1decSSalil Mehta status, i); 68e2cb1decSSalil Mehta return status; 69e2cb1decSSalil Mehta } 70e2cb1decSSalil Mehta tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 71cf72fa63SJian Shen le32_to_cpu(desc.data[1]); 72e2cb1decSSalil Mehta } 73e2cb1decSSalil Mehta 74e2cb1decSSalil Mehta return 0; 75e2cb1decSSalil Mehta } 76e2cb1decSSalil Mehta 77e2cb1decSSalil Mehta static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 78e2cb1decSSalil Mehta { 79e2cb1decSSalil Mehta struct hnae3_knic_private_info *kinfo = &handle->kinfo; 80e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 81e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 82e2cb1decSSalil Mehta u64 *buff = data; 83e2cb1decSSalil Mehta int i; 84e2cb1decSSalil Mehta 85e2cb1decSSalil Mehta for (i = 0; i < hdev->num_tqps; i++) { 86e2cb1decSSalil Mehta tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); 87e2cb1decSSalil Mehta *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 88e2cb1decSSalil Mehta } 89e2cb1decSSalil Mehta for (i = 0; i < kinfo->num_tqps; i++) { 90e2cb1decSSalil Mehta tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); 91e2cb1decSSalil Mehta *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 92e2cb1decSSalil Mehta } 93e2cb1decSSalil Mehta 94e2cb1decSSalil Mehta return buff; 95e2cb1decSSalil Mehta } 96e2cb1decSSalil Mehta 97e2cb1decSSalil Mehta static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 98e2cb1decSSalil Mehta { 99e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 100e2cb1decSSalil Mehta 101e2cb1decSSalil Mehta return hdev->num_tqps * 2; 102e2cb1decSSalil Mehta } 103e2cb1decSSalil Mehta 104e2cb1decSSalil Mehta static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 105e2cb1decSSalil Mehta { 106e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 107e2cb1decSSalil Mehta u8 *buff = data; 108e2cb1decSSalil Mehta int i = 0; 109e2cb1decSSalil Mehta 110e2cb1decSSalil Mehta for (i = 0; i < hdev->num_tqps; i++) { 111e2cb1decSSalil Mehta struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], 112e2cb1decSSalil Mehta struct hclgevf_tqp, q); 1130c218123SJian Shen snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 114e2cb1decSSalil Mehta tqp->index); 115e2cb1decSSalil Mehta buff += ETH_GSTRING_LEN; 116e2cb1decSSalil Mehta } 117e2cb1decSSalil Mehta 118e2cb1decSSalil Mehta for (i = 0; i < hdev->num_tqps; i++) { 119e2cb1decSSalil Mehta struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], 120e2cb1decSSalil Mehta struct hclgevf_tqp, q); 1210c218123SJian Shen snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 122e2cb1decSSalil Mehta tqp->index); 123e2cb1decSSalil Mehta buff += ETH_GSTRING_LEN; 124e2cb1decSSalil Mehta } 125e2cb1decSSalil Mehta 126e2cb1decSSalil Mehta return buff; 127e2cb1decSSalil Mehta } 128e2cb1decSSalil Mehta 129e2cb1decSSalil Mehta static void hclgevf_update_stats(struct hnae3_handle *handle, 130e2cb1decSSalil Mehta struct net_device_stats *net_stats) 131e2cb1decSSalil Mehta { 132e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 133e2cb1decSSalil Mehta int status; 134e2cb1decSSalil Mehta 135e2cb1decSSalil Mehta status = hclgevf_tqps_update_stats(handle); 136e2cb1decSSalil Mehta if (status) 137e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 138e2cb1decSSalil Mehta "VF update of TQPS stats fail, status = %d.\n", 139e2cb1decSSalil Mehta status); 140e2cb1decSSalil Mehta } 141e2cb1decSSalil Mehta 142e2cb1decSSalil Mehta static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 143e2cb1decSSalil Mehta { 144e2cb1decSSalil Mehta if (strset == ETH_SS_TEST) 145e2cb1decSSalil Mehta return -EOPNOTSUPP; 146e2cb1decSSalil Mehta else if (strset == ETH_SS_STATS) 147e2cb1decSSalil Mehta return hclgevf_tqps_get_sset_count(handle, strset); 148e2cb1decSSalil Mehta 149e2cb1decSSalil Mehta return 0; 150e2cb1decSSalil Mehta } 151e2cb1decSSalil Mehta 152e2cb1decSSalil Mehta static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 153e2cb1decSSalil Mehta u8 *data) 154e2cb1decSSalil Mehta { 155e2cb1decSSalil Mehta u8 *p = (char *)data; 156e2cb1decSSalil Mehta 157e2cb1decSSalil Mehta if (strset == ETH_SS_STATS) 158e2cb1decSSalil Mehta p = hclgevf_tqps_get_strings(handle, p); 159e2cb1decSSalil Mehta } 160e2cb1decSSalil Mehta 161e2cb1decSSalil Mehta static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 162e2cb1decSSalil Mehta { 163e2cb1decSSalil Mehta hclgevf_tqps_get_stats(handle, data); 164e2cb1decSSalil Mehta } 165e2cb1decSSalil Mehta 166e2cb1decSSalil Mehta static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 167e2cb1decSSalil Mehta { 168e2cb1decSSalil Mehta u8 resp_msg; 169e2cb1decSSalil Mehta int status; 170e2cb1decSSalil Mehta 171e2cb1decSSalil Mehta status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 172e2cb1decSSalil Mehta true, &resp_msg, sizeof(u8)); 173e2cb1decSSalil Mehta if (status) { 174e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 175e2cb1decSSalil Mehta "VF request to get TC info from PF failed %d", 176e2cb1decSSalil Mehta status); 177e2cb1decSSalil Mehta return status; 178e2cb1decSSalil Mehta } 179e2cb1decSSalil Mehta 180e2cb1decSSalil Mehta hdev->hw_tc_map = resp_msg; 181e2cb1decSSalil Mehta 182e2cb1decSSalil Mehta return 0; 183e2cb1decSSalil Mehta } 184e2cb1decSSalil Mehta 185e2cb1decSSalil Mehta static int hclge_get_queue_info(struct hclgevf_dev *hdev) 186e2cb1decSSalil Mehta { 187e2cb1decSSalil Mehta #define HCLGEVF_TQPS_RSS_INFO_LEN 8 188e2cb1decSSalil Mehta u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 189e2cb1decSSalil Mehta int status; 190e2cb1decSSalil Mehta 191e2cb1decSSalil Mehta status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 192e2cb1decSSalil Mehta true, resp_msg, 193e2cb1decSSalil Mehta HCLGEVF_TQPS_RSS_INFO_LEN); 194e2cb1decSSalil Mehta if (status) { 195e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 196e2cb1decSSalil Mehta "VF request to get tqp info from PF failed %d", 197e2cb1decSSalil Mehta status); 198e2cb1decSSalil Mehta return status; 199e2cb1decSSalil Mehta } 200e2cb1decSSalil Mehta 201e2cb1decSSalil Mehta memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 202e2cb1decSSalil Mehta memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 203e2cb1decSSalil Mehta memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 204e2cb1decSSalil Mehta memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 205e2cb1decSSalil Mehta 206e2cb1decSSalil Mehta return 0; 207e2cb1decSSalil Mehta } 208e2cb1decSSalil Mehta 209e2cb1decSSalil Mehta static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 210e2cb1decSSalil Mehta { 211e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 212e2cb1decSSalil Mehta int i; 213e2cb1decSSalil Mehta 2147a01c897SSalil Mehta /* if this is on going reset then we need to re-allocate the TPQs 2157a01c897SSalil Mehta * since we cannot assume we would get same number of TPQs back from PF 2167a01c897SSalil Mehta */ 2177a01c897SSalil Mehta if (hclgevf_dev_ongoing_reset(hdev)) 2187a01c897SSalil Mehta devm_kfree(&hdev->pdev->dev, hdev->htqp); 2197a01c897SSalil Mehta 220e2cb1decSSalil Mehta hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 221e2cb1decSSalil Mehta sizeof(struct hclgevf_tqp), GFP_KERNEL); 222e2cb1decSSalil Mehta if (!hdev->htqp) 223e2cb1decSSalil Mehta return -ENOMEM; 224e2cb1decSSalil Mehta 225e2cb1decSSalil Mehta tqp = hdev->htqp; 226e2cb1decSSalil Mehta 227e2cb1decSSalil Mehta for (i = 0; i < hdev->num_tqps; i++) { 228e2cb1decSSalil Mehta tqp->dev = &hdev->pdev->dev; 229e2cb1decSSalil Mehta tqp->index = i; 230e2cb1decSSalil Mehta 231e2cb1decSSalil Mehta tqp->q.ae_algo = &ae_algovf; 232e2cb1decSSalil Mehta tqp->q.buf_size = hdev->rx_buf_len; 233e2cb1decSSalil Mehta tqp->q.desc_num = hdev->num_desc; 234e2cb1decSSalil Mehta tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 235e2cb1decSSalil Mehta i * HCLGEVF_TQP_REG_SIZE; 236e2cb1decSSalil Mehta 237e2cb1decSSalil Mehta tqp++; 238e2cb1decSSalil Mehta } 239e2cb1decSSalil Mehta 240e2cb1decSSalil Mehta return 0; 241e2cb1decSSalil Mehta } 242e2cb1decSSalil Mehta 243e2cb1decSSalil Mehta static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 244e2cb1decSSalil Mehta { 245e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 246e2cb1decSSalil Mehta struct hnae3_knic_private_info *kinfo; 247e2cb1decSSalil Mehta u16 new_tqps = hdev->num_tqps; 248e2cb1decSSalil Mehta int i; 249e2cb1decSSalil Mehta 250e2cb1decSSalil Mehta kinfo = &nic->kinfo; 251e2cb1decSSalil Mehta kinfo->num_tc = 0; 252e2cb1decSSalil Mehta kinfo->num_desc = hdev->num_desc; 253e2cb1decSSalil Mehta kinfo->rx_buf_len = hdev->rx_buf_len; 254e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 255e2cb1decSSalil Mehta if (hdev->hw_tc_map & BIT(i)) 256e2cb1decSSalil Mehta kinfo->num_tc++; 257e2cb1decSSalil Mehta 258e2cb1decSSalil Mehta kinfo->rss_size 259e2cb1decSSalil Mehta = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 260e2cb1decSSalil Mehta new_tqps = kinfo->rss_size * kinfo->num_tc; 261e2cb1decSSalil Mehta kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 262e2cb1decSSalil Mehta 2637a01c897SSalil Mehta /* if this is on going reset then we need to re-allocate the hnae queues 2647a01c897SSalil Mehta * as well since number of TPQs from PF might have changed. 2657a01c897SSalil Mehta */ 2667a01c897SSalil Mehta if (hclgevf_dev_ongoing_reset(hdev)) 2677a01c897SSalil Mehta devm_kfree(&hdev->pdev->dev, kinfo->tqp); 2687a01c897SSalil Mehta 269e2cb1decSSalil Mehta kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 270e2cb1decSSalil Mehta sizeof(struct hnae3_queue *), GFP_KERNEL); 271e2cb1decSSalil Mehta if (!kinfo->tqp) 272e2cb1decSSalil Mehta return -ENOMEM; 273e2cb1decSSalil Mehta 274e2cb1decSSalil Mehta for (i = 0; i < kinfo->num_tqps; i++) { 275e2cb1decSSalil Mehta hdev->htqp[i].q.handle = &hdev->nic; 276e2cb1decSSalil Mehta hdev->htqp[i].q.tqp_index = i; 277e2cb1decSSalil Mehta kinfo->tqp[i] = &hdev->htqp[i].q; 278e2cb1decSSalil Mehta } 279e2cb1decSSalil Mehta 280e2cb1decSSalil Mehta return 0; 281e2cb1decSSalil Mehta } 282e2cb1decSSalil Mehta 283e2cb1decSSalil Mehta static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 284e2cb1decSSalil Mehta { 285e2cb1decSSalil Mehta int status; 286e2cb1decSSalil Mehta u8 resp_msg; 287e2cb1decSSalil Mehta 288e2cb1decSSalil Mehta status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 289e2cb1decSSalil Mehta 0, false, &resp_msg, sizeof(u8)); 290e2cb1decSSalil Mehta if (status) 291e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 292e2cb1decSSalil Mehta "VF failed to fetch link status(%d) from PF", status); 293e2cb1decSSalil Mehta } 294e2cb1decSSalil Mehta 295e2cb1decSSalil Mehta void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 296e2cb1decSSalil Mehta { 297e2cb1decSSalil Mehta struct hnae3_handle *handle = &hdev->nic; 298e2cb1decSSalil Mehta struct hnae3_client *client; 299e2cb1decSSalil Mehta 300e2cb1decSSalil Mehta client = handle->client; 301e2cb1decSSalil Mehta 302582d37bbSPeng Li link_state = 303582d37bbSPeng Li test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 304582d37bbSPeng Li 305e2cb1decSSalil Mehta if (link_state != hdev->hw.mac.link) { 306e2cb1decSSalil Mehta client->ops->link_status_change(handle, !!link_state); 307e2cb1decSSalil Mehta hdev->hw.mac.link = link_state; 308e2cb1decSSalil Mehta } 309e2cb1decSSalil Mehta } 310e2cb1decSSalil Mehta 311e2cb1decSSalil Mehta static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 312e2cb1decSSalil Mehta { 313e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 314e2cb1decSSalil Mehta int ret; 315e2cb1decSSalil Mehta 316e2cb1decSSalil Mehta nic->ae_algo = &ae_algovf; 317e2cb1decSSalil Mehta nic->pdev = hdev->pdev; 318e2cb1decSSalil Mehta nic->numa_node_mask = hdev->numa_node_mask; 319424eb834SSalil Mehta nic->flags |= HNAE3_SUPPORT_VF; 320e2cb1decSSalil Mehta 321e2cb1decSSalil Mehta if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 322e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 323e2cb1decSSalil Mehta hdev->ae_dev->dev_type); 324e2cb1decSSalil Mehta return -EINVAL; 325e2cb1decSSalil Mehta } 326e2cb1decSSalil Mehta 327e2cb1decSSalil Mehta ret = hclgevf_knic_setup(hdev); 328e2cb1decSSalil Mehta if (ret) 329e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 330e2cb1decSSalil Mehta ret); 331e2cb1decSSalil Mehta return ret; 332e2cb1decSSalil Mehta } 333e2cb1decSSalil Mehta 334e2cb1decSSalil Mehta static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 335e2cb1decSSalil Mehta { 33636cbbdf6SPeng Li if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 33736cbbdf6SPeng Li dev_warn(&hdev->pdev->dev, 33836cbbdf6SPeng Li "vector(vector_id %d) has been freed.\n", vector_id); 33936cbbdf6SPeng Li return; 34036cbbdf6SPeng Li } 34136cbbdf6SPeng Li 342e2cb1decSSalil Mehta hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 343e2cb1decSSalil Mehta hdev->num_msi_left += 1; 344e2cb1decSSalil Mehta hdev->num_msi_used -= 1; 345e2cb1decSSalil Mehta } 346e2cb1decSSalil Mehta 347e2cb1decSSalil Mehta static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 348e2cb1decSSalil Mehta struct hnae3_vector_info *vector_info) 349e2cb1decSSalil Mehta { 350e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 351e2cb1decSSalil Mehta struct hnae3_vector_info *vector = vector_info; 352e2cb1decSSalil Mehta int alloc = 0; 353e2cb1decSSalil Mehta int i, j; 354e2cb1decSSalil Mehta 355e2cb1decSSalil Mehta vector_num = min(hdev->num_msi_left, vector_num); 356e2cb1decSSalil Mehta 357e2cb1decSSalil Mehta for (j = 0; j < vector_num; j++) { 358e2cb1decSSalil Mehta for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 359e2cb1decSSalil Mehta if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 360e2cb1decSSalil Mehta vector->vector = pci_irq_vector(hdev->pdev, i); 361e2cb1decSSalil Mehta vector->io_addr = hdev->hw.io_base + 362e2cb1decSSalil Mehta HCLGEVF_VECTOR_REG_BASE + 363e2cb1decSSalil Mehta (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 364e2cb1decSSalil Mehta hdev->vector_status[i] = 0; 365e2cb1decSSalil Mehta hdev->vector_irq[i] = vector->vector; 366e2cb1decSSalil Mehta 367e2cb1decSSalil Mehta vector++; 368e2cb1decSSalil Mehta alloc++; 369e2cb1decSSalil Mehta 370e2cb1decSSalil Mehta break; 371e2cb1decSSalil Mehta } 372e2cb1decSSalil Mehta } 373e2cb1decSSalil Mehta } 374e2cb1decSSalil Mehta hdev->num_msi_left -= alloc; 375e2cb1decSSalil Mehta hdev->num_msi_used += alloc; 376e2cb1decSSalil Mehta 377e2cb1decSSalil Mehta return alloc; 378e2cb1decSSalil Mehta } 379e2cb1decSSalil Mehta 380e2cb1decSSalil Mehta static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 381e2cb1decSSalil Mehta { 382e2cb1decSSalil Mehta int i; 383e2cb1decSSalil Mehta 384e2cb1decSSalil Mehta for (i = 0; i < hdev->num_msi; i++) 385e2cb1decSSalil Mehta if (vector == hdev->vector_irq[i]) 386e2cb1decSSalil Mehta return i; 387e2cb1decSSalil Mehta 388e2cb1decSSalil Mehta return -EINVAL; 389e2cb1decSSalil Mehta } 390e2cb1decSSalil Mehta 391e2cb1decSSalil Mehta static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 392e2cb1decSSalil Mehta { 393e2cb1decSSalil Mehta return HCLGEVF_RSS_KEY_SIZE; 394e2cb1decSSalil Mehta } 395e2cb1decSSalil Mehta 396e2cb1decSSalil Mehta static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 397e2cb1decSSalil Mehta { 398e2cb1decSSalil Mehta return HCLGEVF_RSS_IND_TBL_SIZE; 399e2cb1decSSalil Mehta } 400e2cb1decSSalil Mehta 401e2cb1decSSalil Mehta static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 402e2cb1decSSalil Mehta { 403e2cb1decSSalil Mehta const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 404e2cb1decSSalil Mehta struct hclgevf_rss_indirection_table_cmd *req; 405e2cb1decSSalil Mehta struct hclgevf_desc desc; 406e2cb1decSSalil Mehta int status; 407e2cb1decSSalil Mehta int i, j; 408e2cb1decSSalil Mehta 409e2cb1decSSalil Mehta req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 410e2cb1decSSalil Mehta 411e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 412e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 413e2cb1decSSalil Mehta false); 414e2cb1decSSalil Mehta req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 415e2cb1decSSalil Mehta req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 416e2cb1decSSalil Mehta for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 417e2cb1decSSalil Mehta req->rss_result[j] = 418e2cb1decSSalil Mehta indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 419e2cb1decSSalil Mehta 420e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 421e2cb1decSSalil Mehta if (status) { 422e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 423e2cb1decSSalil Mehta "VF failed(=%d) to set RSS indirection table\n", 424e2cb1decSSalil Mehta status); 425e2cb1decSSalil Mehta return status; 426e2cb1decSSalil Mehta } 427e2cb1decSSalil Mehta } 428e2cb1decSSalil Mehta 429e2cb1decSSalil Mehta return 0; 430e2cb1decSSalil Mehta } 431e2cb1decSSalil Mehta 432e2cb1decSSalil Mehta static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 433e2cb1decSSalil Mehta { 434e2cb1decSSalil Mehta struct hclgevf_rss_tc_mode_cmd *req; 435e2cb1decSSalil Mehta u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 436e2cb1decSSalil Mehta u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 437e2cb1decSSalil Mehta u16 tc_size[HCLGEVF_MAX_TC_NUM]; 438e2cb1decSSalil Mehta struct hclgevf_desc desc; 439e2cb1decSSalil Mehta u16 roundup_size; 440e2cb1decSSalil Mehta int status; 441e2cb1decSSalil Mehta int i; 442e2cb1decSSalil Mehta 443e2cb1decSSalil Mehta req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 444e2cb1decSSalil Mehta 445e2cb1decSSalil Mehta roundup_size = roundup_pow_of_two(rss_size); 446e2cb1decSSalil Mehta roundup_size = ilog2(roundup_size); 447e2cb1decSSalil Mehta 448e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 449e2cb1decSSalil Mehta tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 450e2cb1decSSalil Mehta tc_size[i] = roundup_size; 451e2cb1decSSalil Mehta tc_offset[i] = rss_size * i; 452e2cb1decSSalil Mehta } 453e2cb1decSSalil Mehta 454e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 455e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 456e4e87715SPeng Li hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 457e2cb1decSSalil Mehta (tc_valid[i] & 0x1)); 458e4e87715SPeng Li hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 459e2cb1decSSalil Mehta HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 460e4e87715SPeng Li hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 461e2cb1decSSalil Mehta HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 462e2cb1decSSalil Mehta } 463e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 464e2cb1decSSalil Mehta if (status) 465e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 466e2cb1decSSalil Mehta "VF failed(=%d) to set rss tc mode\n", status); 467e2cb1decSSalil Mehta 468e2cb1decSSalil Mehta return status; 469e2cb1decSSalil Mehta } 470e2cb1decSSalil Mehta 471e2cb1decSSalil Mehta static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash, 472e2cb1decSSalil Mehta u8 *key) 473e2cb1decSSalil Mehta { 474e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 475e2cb1decSSalil Mehta struct hclgevf_rss_config_cmd *req; 476e2cb1decSSalil Mehta int lkup_times = key ? 3 : 1; 477e2cb1decSSalil Mehta struct hclgevf_desc desc; 478e2cb1decSSalil Mehta int key_offset; 479e2cb1decSSalil Mehta int key_size; 480e2cb1decSSalil Mehta int status; 481e2cb1decSSalil Mehta 482e2cb1decSSalil Mehta req = (struct hclgevf_rss_config_cmd *)desc.data; 483e2cb1decSSalil Mehta lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0); 484e2cb1decSSalil Mehta 485e2cb1decSSalil Mehta for (key_offset = 0; key_offset < lkup_times; key_offset++) { 486e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, 487e2cb1decSSalil Mehta HCLGEVF_OPC_RSS_GENERIC_CONFIG, 488e2cb1decSSalil Mehta true); 489e2cb1decSSalil Mehta req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET); 490e2cb1decSSalil Mehta 491e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 492e2cb1decSSalil Mehta if (status) { 493e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 494e2cb1decSSalil Mehta "failed to get hardware RSS cfg, status = %d\n", 495e2cb1decSSalil Mehta status); 496e2cb1decSSalil Mehta return status; 497e2cb1decSSalil Mehta } 498e2cb1decSSalil Mehta 499e2cb1decSSalil Mehta if (key_offset == 2) 500e2cb1decSSalil Mehta key_size = 501e2cb1decSSalil Mehta HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 502e2cb1decSSalil Mehta else 503e2cb1decSSalil Mehta key_size = HCLGEVF_RSS_HASH_KEY_NUM; 504e2cb1decSSalil Mehta 505e2cb1decSSalil Mehta if (key) 506e2cb1decSSalil Mehta memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, 507e2cb1decSSalil Mehta req->hash_key, 508e2cb1decSSalil Mehta key_size); 509e2cb1decSSalil Mehta } 510e2cb1decSSalil Mehta 511e2cb1decSSalil Mehta if (hash) { 512e2cb1decSSalil Mehta if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ) 513e2cb1decSSalil Mehta *hash = ETH_RSS_HASH_TOP; 514e2cb1decSSalil Mehta else 515e2cb1decSSalil Mehta *hash = ETH_RSS_HASH_UNKNOWN; 516e2cb1decSSalil Mehta } 517e2cb1decSSalil Mehta 518e2cb1decSSalil Mehta return 0; 519e2cb1decSSalil Mehta } 520e2cb1decSSalil Mehta 521e2cb1decSSalil Mehta static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 522e2cb1decSSalil Mehta u8 *hfunc) 523e2cb1decSSalil Mehta { 524e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 525e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 526e2cb1decSSalil Mehta int i; 527e2cb1decSSalil Mehta 528e2cb1decSSalil Mehta if (indir) 529e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 530e2cb1decSSalil Mehta indir[i] = rss_cfg->rss_indirection_tbl[i]; 531e2cb1decSSalil Mehta 532e2cb1decSSalil Mehta return hclgevf_get_rss_hw_cfg(handle, hfunc, key); 533e2cb1decSSalil Mehta } 534e2cb1decSSalil Mehta 535e2cb1decSSalil Mehta static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 536e2cb1decSSalil Mehta const u8 *key, const u8 hfunc) 537e2cb1decSSalil Mehta { 538e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 539e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 540e2cb1decSSalil Mehta int i; 541e2cb1decSSalil Mehta 542e2cb1decSSalil Mehta /* update the shadow RSS table with user specified qids */ 543e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 544e2cb1decSSalil Mehta rss_cfg->rss_indirection_tbl[i] = indir[i]; 545e2cb1decSSalil Mehta 546e2cb1decSSalil Mehta /* update the hardware */ 547e2cb1decSSalil Mehta return hclgevf_set_rss_indir_table(hdev); 548e2cb1decSSalil Mehta } 549e2cb1decSSalil Mehta 550e2cb1decSSalil Mehta static int hclgevf_get_tc_size(struct hnae3_handle *handle) 551e2cb1decSSalil Mehta { 552e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 553e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 554e2cb1decSSalil Mehta 555e2cb1decSSalil Mehta return rss_cfg->rss_size; 556e2cb1decSSalil Mehta } 557e2cb1decSSalil Mehta 558e2cb1decSSalil Mehta static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 559b204bc74SPeng Li int vector_id, 560e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 561e2cb1decSSalil Mehta { 562e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 563e2cb1decSSalil Mehta struct hnae3_ring_chain_node *node; 564e2cb1decSSalil Mehta struct hclge_mbx_vf_to_pf_cmd *req; 565e2cb1decSSalil Mehta struct hclgevf_desc desc; 566b204bc74SPeng Li int i = 0; 567e2cb1decSSalil Mehta int status; 568e2cb1decSSalil Mehta u8 type; 569e2cb1decSSalil Mehta 570e2cb1decSSalil Mehta req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 571e2cb1decSSalil Mehta 572e2cb1decSSalil Mehta for (node = ring_chain; node; node = node->next) { 5735d02a58dSYunsheng Lin int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 5745d02a58dSYunsheng Lin HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 5755d02a58dSYunsheng Lin 5765d02a58dSYunsheng Lin if (i == 0) { 5775d02a58dSYunsheng Lin hclgevf_cmd_setup_basic_desc(&desc, 5785d02a58dSYunsheng Lin HCLGEVF_OPC_MBX_VF_TO_PF, 5795d02a58dSYunsheng Lin false); 5805d02a58dSYunsheng Lin type = en ? 5815d02a58dSYunsheng Lin HCLGE_MBX_MAP_RING_TO_VECTOR : 5825d02a58dSYunsheng Lin HCLGE_MBX_UNMAP_RING_TO_VECTOR; 5835d02a58dSYunsheng Lin req->msg[0] = type; 5845d02a58dSYunsheng Lin req->msg[1] = vector_id; 5855d02a58dSYunsheng Lin } 5865d02a58dSYunsheng Lin 5875d02a58dSYunsheng Lin req->msg[idx_offset] = 588e4e87715SPeng Li hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 5895d02a58dSYunsheng Lin req->msg[idx_offset + 1] = node->tqp_index; 590e4e87715SPeng Li req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 59179eee410SFuyun Liang HNAE3_RING_GL_IDX_M, 59279eee410SFuyun Liang HNAE3_RING_GL_IDX_S); 59379eee410SFuyun Liang 5945d02a58dSYunsheng Lin i++; 5955d02a58dSYunsheng Lin if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 5965d02a58dSYunsheng Lin HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 5975d02a58dSYunsheng Lin HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 5985d02a58dSYunsheng Lin !node->next) { 599e2cb1decSSalil Mehta req->msg[2] = i; 600e2cb1decSSalil Mehta 601e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 602e2cb1decSSalil Mehta if (status) { 603e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 604e2cb1decSSalil Mehta "Map TQP fail, status is %d.\n", 605e2cb1decSSalil Mehta status); 606e2cb1decSSalil Mehta return status; 607e2cb1decSSalil Mehta } 608e2cb1decSSalil Mehta i = 0; 609e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, 610e2cb1decSSalil Mehta HCLGEVF_OPC_MBX_VF_TO_PF, 611e2cb1decSSalil Mehta false); 612e2cb1decSSalil Mehta req->msg[0] = type; 613e2cb1decSSalil Mehta req->msg[1] = vector_id; 614e2cb1decSSalil Mehta } 615e2cb1decSSalil Mehta } 616e2cb1decSSalil Mehta 617e2cb1decSSalil Mehta return 0; 618e2cb1decSSalil Mehta } 619e2cb1decSSalil Mehta 620e2cb1decSSalil Mehta static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 621e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 622e2cb1decSSalil Mehta { 623b204bc74SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 624b204bc74SPeng Li int vector_id; 625b204bc74SPeng Li 626b204bc74SPeng Li vector_id = hclgevf_get_vector_index(hdev, vector); 627b204bc74SPeng Li if (vector_id < 0) { 628b204bc74SPeng Li dev_err(&handle->pdev->dev, 629b204bc74SPeng Li "Get vector index fail. ret =%d\n", vector_id); 630b204bc74SPeng Li return vector_id; 631b204bc74SPeng Li } 632b204bc74SPeng Li 633b204bc74SPeng Li return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 634e2cb1decSSalil Mehta } 635e2cb1decSSalil Mehta 636e2cb1decSSalil Mehta static int hclgevf_unmap_ring_from_vector( 637e2cb1decSSalil Mehta struct hnae3_handle *handle, 638e2cb1decSSalil Mehta int vector, 639e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 640e2cb1decSSalil Mehta { 641e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 642e2cb1decSSalil Mehta int ret, vector_id; 643e2cb1decSSalil Mehta 644e2cb1decSSalil Mehta vector_id = hclgevf_get_vector_index(hdev, vector); 645e2cb1decSSalil Mehta if (vector_id < 0) { 646e2cb1decSSalil Mehta dev_err(&handle->pdev->dev, 647e2cb1decSSalil Mehta "Get vector index fail. ret =%d\n", vector_id); 648e2cb1decSSalil Mehta return vector_id; 649e2cb1decSSalil Mehta } 650e2cb1decSSalil Mehta 651b204bc74SPeng Li ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 6520d3e6631SYunsheng Lin if (ret) 653e2cb1decSSalil Mehta dev_err(&handle->pdev->dev, 654e2cb1decSSalil Mehta "Unmap ring from vector fail. vector=%d, ret =%d\n", 655e2cb1decSSalil Mehta vector_id, 656e2cb1decSSalil Mehta ret); 6570d3e6631SYunsheng Lin 658e2cb1decSSalil Mehta return ret; 659e2cb1decSSalil Mehta } 660e2cb1decSSalil Mehta 6610d3e6631SYunsheng Lin static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 6620d3e6631SYunsheng Lin { 6630d3e6631SYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 66403718db9SYunsheng Lin int vector_id; 6650d3e6631SYunsheng Lin 66603718db9SYunsheng Lin vector_id = hclgevf_get_vector_index(hdev, vector); 66703718db9SYunsheng Lin if (vector_id < 0) { 66803718db9SYunsheng Lin dev_err(&handle->pdev->dev, 66903718db9SYunsheng Lin "hclgevf_put_vector get vector index fail. ret =%d\n", 67003718db9SYunsheng Lin vector_id); 67103718db9SYunsheng Lin return vector_id; 67203718db9SYunsheng Lin } 67303718db9SYunsheng Lin 67403718db9SYunsheng Lin hclgevf_free_vector(hdev, vector_id); 675e2cb1decSSalil Mehta 676e2cb1decSSalil Mehta return 0; 677e2cb1decSSalil Mehta } 678e2cb1decSSalil Mehta 6793b75c3dfSPeng Li static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 6803b75c3dfSPeng Li bool en_uc_pmc, bool en_mc_pmc) 681e2cb1decSSalil Mehta { 682e2cb1decSSalil Mehta struct hclge_mbx_vf_to_pf_cmd *req; 683e2cb1decSSalil Mehta struct hclgevf_desc desc; 684e2cb1decSSalil Mehta int status; 685e2cb1decSSalil Mehta 686e2cb1decSSalil Mehta req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 687e2cb1decSSalil Mehta 688e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 689e2cb1decSSalil Mehta req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 6903b75c3dfSPeng Li req->msg[1] = en_uc_pmc ? 1 : 0; 6913b75c3dfSPeng Li req->msg[2] = en_mc_pmc ? 1 : 0; 692e2cb1decSSalil Mehta 693e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 694e2cb1decSSalil Mehta if (status) 695e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 696e2cb1decSSalil Mehta "Set promisc mode fail, status is %d.\n", status); 697e2cb1decSSalil Mehta 698e2cb1decSSalil Mehta return status; 699e2cb1decSSalil Mehta } 700e2cb1decSSalil Mehta 7013b75c3dfSPeng Li static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, 7023b75c3dfSPeng Li bool en_uc_pmc, bool en_mc_pmc) 703e2cb1decSSalil Mehta { 704e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 705e2cb1decSSalil Mehta 7063b75c3dfSPeng Li hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); 707e2cb1decSSalil Mehta } 708e2cb1decSSalil Mehta 709e2cb1decSSalil Mehta static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 710e2cb1decSSalil Mehta int stream_id, bool enable) 711e2cb1decSSalil Mehta { 712e2cb1decSSalil Mehta struct hclgevf_cfg_com_tqp_queue_cmd *req; 713e2cb1decSSalil Mehta struct hclgevf_desc desc; 714e2cb1decSSalil Mehta int status; 715e2cb1decSSalil Mehta 716e2cb1decSSalil Mehta req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 717e2cb1decSSalil Mehta 718e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 719e2cb1decSSalil Mehta false); 720e2cb1decSSalil Mehta req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 721e2cb1decSSalil Mehta req->stream_id = cpu_to_le16(stream_id); 722e2cb1decSSalil Mehta req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 723e2cb1decSSalil Mehta 724e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 725e2cb1decSSalil Mehta if (status) 726e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 727e2cb1decSSalil Mehta "TQP enable fail, status =%d.\n", status); 728e2cb1decSSalil Mehta 729e2cb1decSSalil Mehta return status; 730e2cb1decSSalil Mehta } 731e2cb1decSSalil Mehta 732e2cb1decSSalil Mehta static int hclgevf_get_queue_id(struct hnae3_queue *queue) 733e2cb1decSSalil Mehta { 734e2cb1decSSalil Mehta struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q); 735e2cb1decSSalil Mehta 736e2cb1decSSalil Mehta return tqp->index; 737e2cb1decSSalil Mehta } 738e2cb1decSSalil Mehta 739e2cb1decSSalil Mehta static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 740e2cb1decSSalil Mehta { 741e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 742e2cb1decSSalil Mehta struct hnae3_queue *queue; 743e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 744e2cb1decSSalil Mehta int i; 745e2cb1decSSalil Mehta 746e2cb1decSSalil Mehta for (i = 0; i < hdev->num_tqps; i++) { 747e2cb1decSSalil Mehta queue = handle->kinfo.tqp[i]; 748e2cb1decSSalil Mehta tqp = container_of(queue, struct hclgevf_tqp, q); 749e2cb1decSSalil Mehta memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 750e2cb1decSSalil Mehta } 751e2cb1decSSalil Mehta } 752e2cb1decSSalil Mehta 7533a678b58SXi Wang static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev) 7543a678b58SXi Wang { 7553a678b58SXi Wang u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX; 7563a678b58SXi Wang int ret; 7573a678b58SXi Wang 7583a678b58SXi Wang ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 7593a678b58SXi Wang HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, 7603a678b58SXi Wang NULL, 0, true, &resp_msg, sizeof(u8)); 7613a678b58SXi Wang 7623a678b58SXi Wang if (ret) { 7633a678b58SXi Wang dev_err(&hdev->pdev->dev, 7643a678b58SXi Wang "Read mta type fail, ret=%d.\n", ret); 7653a678b58SXi Wang return ret; 7663a678b58SXi Wang } 7673a678b58SXi Wang 7683a678b58SXi Wang if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) { 7693a678b58SXi Wang dev_err(&hdev->pdev->dev, 7703a678b58SXi Wang "Read mta type invalid, resp=%d.\n", resp_msg); 7713a678b58SXi Wang return -EINVAL; 7723a678b58SXi Wang } 7733a678b58SXi Wang 7743a678b58SXi Wang hdev->mta_mac_sel_type = resp_msg; 7753a678b58SXi Wang 7763a678b58SXi Wang return 0; 7773a678b58SXi Wang } 7783a678b58SXi Wang 7793a678b58SXi Wang static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev, 7803a678b58SXi Wang const u8 *addr) 7813a678b58SXi Wang { 7823a678b58SXi Wang u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type; 7833a678b58SXi Wang u16 high_val = addr[1] | (addr[0] << 8); 7843a678b58SXi Wang 7853a678b58SXi Wang return (high_val >> rsh) & 0xfff; 7863a678b58SXi Wang } 7873a678b58SXi Wang 7883a678b58SXi Wang static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev, 7893a678b58SXi Wang unsigned long *status) 7903a678b58SXi Wang { 7913a678b58SXi Wang #define HCLGEVF_MTA_STATUS_MSG_SIZE 13 7923a678b58SXi Wang #define HCLGEVF_MTA_STATUS_MSG_BITS \ 7933a678b58SXi Wang (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) 7943a678b58SXi Wang #define HCLGEVF_MTA_STATUS_MSG_END_BITS \ 7953a678b58SXi Wang (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS) 7963a678b58SXi Wang u16 tbl_cnt; 7973a678b58SXi Wang u16 tbl_idx; 7983a678b58SXi Wang u8 msg_cnt; 7993a678b58SXi Wang u8 msg_idx; 8003a678b58SXi Wang int ret; 8013a678b58SXi Wang 8023a678b58SXi Wang msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE, 8033a678b58SXi Wang HCLGEVF_MTA_STATUS_MSG_BITS); 8043a678b58SXi Wang tbl_idx = 0; 8053a678b58SXi Wang msg_idx = 0; 8063a678b58SXi Wang while (msg_cnt--) { 8073a678b58SXi Wang u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1]; 8083a678b58SXi Wang u8 *p = &msg[1]; 8093a678b58SXi Wang u8 msg_ofs; 8103a678b58SXi Wang u8 msg_bit; 8113a678b58SXi Wang 8123a678b58SXi Wang memset(msg, 0, sizeof(msg)); 8133a678b58SXi Wang 8143a678b58SXi Wang /* set index field */ 8153a678b58SXi Wang msg[0] = 0x7F & msg_idx; 8163a678b58SXi Wang 8173a678b58SXi Wang /* set end flag field */ 8183a678b58SXi Wang if (msg_cnt == 0) { 8193a678b58SXi Wang msg[0] |= 0x80; 8203a678b58SXi Wang tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS; 8213a678b58SXi Wang } else { 8223a678b58SXi Wang tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS; 8233a678b58SXi Wang } 8243a678b58SXi Wang 8253a678b58SXi Wang /* set status field */ 8263a678b58SXi Wang msg_ofs = 0; 8273a678b58SXi Wang msg_bit = 0; 8283a678b58SXi Wang while (tbl_cnt--) { 8293a678b58SXi Wang if (test_bit(tbl_idx, status)) 8303a678b58SXi Wang p[msg_ofs] |= BIT(msg_bit); 8313a678b58SXi Wang 8323a678b58SXi Wang tbl_idx++; 8333a678b58SXi Wang 8343a678b58SXi Wang msg_bit++; 8353a678b58SXi Wang if (msg_bit == BITS_PER_BYTE) { 8363a678b58SXi Wang msg_bit = 0; 8373a678b58SXi Wang msg_ofs++; 8383a678b58SXi Wang } 8393a678b58SXi Wang } 8403a678b58SXi Wang 8413a678b58SXi Wang ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 8423a678b58SXi Wang HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, 8433a678b58SXi Wang msg, sizeof(msg), false, NULL, 0); 8443a678b58SXi Wang if (ret) 8453a678b58SXi Wang break; 8463a678b58SXi Wang 8473a678b58SXi Wang msg_idx++; 8483a678b58SXi Wang } 8493a678b58SXi Wang 8503a678b58SXi Wang return ret; 8513a678b58SXi Wang } 8523a678b58SXi Wang 8533a678b58SXi Wang static int hclgevf_update_mta_status(struct hnae3_handle *handle) 8543a678b58SXi Wang { 8553a678b58SXi Wang unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)]; 8563a678b58SXi Wang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 8573a678b58SXi Wang struct net_device *netdev = hdev->nic.kinfo.netdev; 8583a678b58SXi Wang struct netdev_hw_addr *ha; 8593a678b58SXi Wang u16 tbl_idx; 8603a678b58SXi Wang 8613a678b58SXi Wang /* clear status */ 8623a678b58SXi Wang memset(mta_status, 0, sizeof(mta_status)); 8633a678b58SXi Wang 8643a678b58SXi Wang /* update status from mc addr list */ 8653a678b58SXi Wang netdev_for_each_mc_addr(ha, netdev) { 8663a678b58SXi Wang tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr); 8673a678b58SXi Wang set_bit(tbl_idx, mta_status); 8683a678b58SXi Wang } 8693a678b58SXi Wang 8703a678b58SXi Wang return hclgevf_do_update_mta_status(hdev, mta_status); 8713a678b58SXi Wang } 8723a678b58SXi Wang 873e2cb1decSSalil Mehta static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 874e2cb1decSSalil Mehta { 875e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 876e2cb1decSSalil Mehta 877e2cb1decSSalil Mehta ether_addr_copy(p, hdev->hw.mac.mac_addr); 878e2cb1decSSalil Mehta } 879e2cb1decSSalil Mehta 88059098055SFuyun Liang static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 88159098055SFuyun Liang bool is_first) 882e2cb1decSSalil Mehta { 883e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 884e2cb1decSSalil Mehta u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 885e2cb1decSSalil Mehta u8 *new_mac_addr = (u8 *)p; 886e2cb1decSSalil Mehta u8 msg_data[ETH_ALEN * 2]; 88759098055SFuyun Liang u16 subcode; 888e2cb1decSSalil Mehta int status; 889e2cb1decSSalil Mehta 890e2cb1decSSalil Mehta ether_addr_copy(msg_data, new_mac_addr); 891e2cb1decSSalil Mehta ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 892e2cb1decSSalil Mehta 89359098055SFuyun Liang subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 89459098055SFuyun Liang HCLGE_MBX_MAC_VLAN_UC_MODIFY; 89559098055SFuyun Liang 896e2cb1decSSalil Mehta status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 89759098055SFuyun Liang subcode, msg_data, ETH_ALEN * 2, 8982097fdefSJian Shen true, NULL, 0); 899e2cb1decSSalil Mehta if (!status) 900e2cb1decSSalil Mehta ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 901e2cb1decSSalil Mehta 902e2cb1decSSalil Mehta return status; 903e2cb1decSSalil Mehta } 904e2cb1decSSalil Mehta 905e2cb1decSSalil Mehta static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 906e2cb1decSSalil Mehta const unsigned char *addr) 907e2cb1decSSalil Mehta { 908e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 909e2cb1decSSalil Mehta 910e2cb1decSSalil Mehta return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 911e2cb1decSSalil Mehta HCLGE_MBX_MAC_VLAN_UC_ADD, 912e2cb1decSSalil Mehta addr, ETH_ALEN, false, NULL, 0); 913e2cb1decSSalil Mehta } 914e2cb1decSSalil Mehta 915e2cb1decSSalil Mehta static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 916e2cb1decSSalil Mehta const unsigned char *addr) 917e2cb1decSSalil Mehta { 918e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 919e2cb1decSSalil Mehta 920e2cb1decSSalil Mehta return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 921e2cb1decSSalil Mehta HCLGE_MBX_MAC_VLAN_UC_REMOVE, 922e2cb1decSSalil Mehta addr, ETH_ALEN, false, NULL, 0); 923e2cb1decSSalil Mehta } 924e2cb1decSSalil Mehta 925e2cb1decSSalil Mehta static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 926e2cb1decSSalil Mehta const unsigned char *addr) 927e2cb1decSSalil Mehta { 928e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 929e2cb1decSSalil Mehta 930e2cb1decSSalil Mehta return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 931e2cb1decSSalil Mehta HCLGE_MBX_MAC_VLAN_MC_ADD, 932e2cb1decSSalil Mehta addr, ETH_ALEN, false, NULL, 0); 933e2cb1decSSalil Mehta } 934e2cb1decSSalil Mehta 935e2cb1decSSalil Mehta static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 936e2cb1decSSalil Mehta const unsigned char *addr) 937e2cb1decSSalil Mehta { 938e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 939e2cb1decSSalil Mehta 940e2cb1decSSalil Mehta return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 941e2cb1decSSalil Mehta HCLGE_MBX_MAC_VLAN_MC_REMOVE, 942e2cb1decSSalil Mehta addr, ETH_ALEN, false, NULL, 0); 943e2cb1decSSalil Mehta } 944e2cb1decSSalil Mehta 945e2cb1decSSalil Mehta static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 946e2cb1decSSalil Mehta __be16 proto, u16 vlan_id, 947e2cb1decSSalil Mehta bool is_kill) 948e2cb1decSSalil Mehta { 949e2cb1decSSalil Mehta #define HCLGEVF_VLAN_MBX_MSG_LEN 5 950e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 951e2cb1decSSalil Mehta u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 952e2cb1decSSalil Mehta 953e2cb1decSSalil Mehta if (vlan_id > 4095) 954e2cb1decSSalil Mehta return -EINVAL; 955e2cb1decSSalil Mehta 956e2cb1decSSalil Mehta if (proto != htons(ETH_P_8021Q)) 957e2cb1decSSalil Mehta return -EPROTONOSUPPORT; 958e2cb1decSSalil Mehta 959e2cb1decSSalil Mehta msg_data[0] = is_kill; 960e2cb1decSSalil Mehta memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 961e2cb1decSSalil Mehta memcpy(&msg_data[3], &proto, sizeof(proto)); 962e2cb1decSSalil Mehta return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 963e2cb1decSSalil Mehta HCLGE_MBX_VLAN_FILTER, msg_data, 964e2cb1decSSalil Mehta HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 965e2cb1decSSalil Mehta } 966e2cb1decSSalil Mehta 967b2641e2aSYunsheng Lin static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 968b2641e2aSYunsheng Lin { 969b2641e2aSYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 970b2641e2aSYunsheng Lin u8 msg_data; 971b2641e2aSYunsheng Lin 972b2641e2aSYunsheng Lin msg_data = enable ? 1 : 0; 973b2641e2aSYunsheng Lin return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 974b2641e2aSYunsheng Lin HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 975b2641e2aSYunsheng Lin 1, false, NULL, 0); 976b2641e2aSYunsheng Lin } 977b2641e2aSYunsheng Lin 978e2cb1decSSalil Mehta static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 979e2cb1decSSalil Mehta { 980e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 981e2cb1decSSalil Mehta u8 msg_data[2]; 9821a426f8bSPeng Li int ret; 983e2cb1decSSalil Mehta 984e2cb1decSSalil Mehta memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 985e2cb1decSSalil Mehta 9861a426f8bSPeng Li /* disable vf queue before send queue reset msg to PF */ 9871a426f8bSPeng Li ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 9881a426f8bSPeng Li if (ret) 9891a426f8bSPeng Li return; 9901a426f8bSPeng Li 9911a426f8bSPeng Li hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 9921a426f8bSPeng Li 2, true, NULL, 0); 993e2cb1decSSalil Mehta } 994e2cb1decSSalil Mehta 9956988eb2aSSalil Mehta static int hclgevf_notify_client(struct hclgevf_dev *hdev, 9966988eb2aSSalil Mehta enum hnae3_reset_notify_type type) 9976988eb2aSSalil Mehta { 9986988eb2aSSalil Mehta struct hnae3_client *client = hdev->nic_client; 9996988eb2aSSalil Mehta struct hnae3_handle *handle = &hdev->nic; 10006988eb2aSSalil Mehta 10016988eb2aSSalil Mehta if (!client->ops->reset_notify) 10026988eb2aSSalil Mehta return -EOPNOTSUPP; 10036988eb2aSSalil Mehta 10046988eb2aSSalil Mehta return client->ops->reset_notify(handle, type); 10056988eb2aSSalil Mehta } 10066988eb2aSSalil Mehta 10076988eb2aSSalil Mehta static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 10086988eb2aSSalil Mehta { 10096988eb2aSSalil Mehta #define HCLGEVF_RESET_WAIT_MS 500 10106988eb2aSSalil Mehta #define HCLGEVF_RESET_WAIT_CNT 20 10116988eb2aSSalil Mehta u32 val, cnt = 0; 10126988eb2aSSalil Mehta 10136988eb2aSSalil Mehta /* wait to check the hardware reset completion status */ 10146988eb2aSSalil Mehta val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 1015e4e87715SPeng Li while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) && 10166988eb2aSSalil Mehta (cnt < HCLGEVF_RESET_WAIT_CNT)) { 10176988eb2aSSalil Mehta msleep(HCLGEVF_RESET_WAIT_MS); 10186988eb2aSSalil Mehta val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 10196988eb2aSSalil Mehta cnt++; 10206988eb2aSSalil Mehta } 10216988eb2aSSalil Mehta 10226988eb2aSSalil Mehta /* hardware completion status should be available by this time */ 10236988eb2aSSalil Mehta if (cnt >= HCLGEVF_RESET_WAIT_CNT) { 10246988eb2aSSalil Mehta dev_warn(&hdev->pdev->dev, 10256988eb2aSSalil Mehta "could'nt get reset done status from h/w, timeout!\n"); 10266988eb2aSSalil Mehta return -EBUSY; 10276988eb2aSSalil Mehta } 10286988eb2aSSalil Mehta 10296988eb2aSSalil Mehta /* we will wait a bit more to let reset of the stack to complete. This 10306988eb2aSSalil Mehta * might happen in case reset assertion was made by PF. Yes, this also 10316988eb2aSSalil Mehta * means we might end up waiting bit more even for VF reset. 10326988eb2aSSalil Mehta */ 10336988eb2aSSalil Mehta msleep(5000); 10346988eb2aSSalil Mehta 10356988eb2aSSalil Mehta return 0; 10366988eb2aSSalil Mehta } 10376988eb2aSSalil Mehta 10386988eb2aSSalil Mehta static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 10396988eb2aSSalil Mehta { 10407a01c897SSalil Mehta int ret; 10417a01c897SSalil Mehta 10426988eb2aSSalil Mehta /* uninitialize the nic client */ 10436988eb2aSSalil Mehta hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 10446988eb2aSSalil Mehta 10457a01c897SSalil Mehta /* re-initialize the hclge device */ 10467a01c897SSalil Mehta ret = hclgevf_init_hdev(hdev); 10477a01c897SSalil Mehta if (ret) { 10487a01c897SSalil Mehta dev_err(&hdev->pdev->dev, 10497a01c897SSalil Mehta "hclge device re-init failed, VF is disabled!\n"); 10507a01c897SSalil Mehta return ret; 10517a01c897SSalil Mehta } 10526988eb2aSSalil Mehta 10536988eb2aSSalil Mehta /* bring up the nic client again */ 10546988eb2aSSalil Mehta hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 10556988eb2aSSalil Mehta 10566988eb2aSSalil Mehta return 0; 10576988eb2aSSalil Mehta } 10586988eb2aSSalil Mehta 10596988eb2aSSalil Mehta static int hclgevf_reset(struct hclgevf_dev *hdev) 10606988eb2aSSalil Mehta { 10616988eb2aSSalil Mehta int ret; 10626988eb2aSSalil Mehta 10636988eb2aSSalil Mehta rtnl_lock(); 10646988eb2aSSalil Mehta 10656988eb2aSSalil Mehta /* bring down the nic to stop any ongoing TX/RX */ 10666988eb2aSSalil Mehta hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 10676988eb2aSSalil Mehta 10686988eb2aSSalil Mehta /* check if VF could successfully fetch the hardware reset completion 10696988eb2aSSalil Mehta * status from the hardware 10706988eb2aSSalil Mehta */ 10716988eb2aSSalil Mehta ret = hclgevf_reset_wait(hdev); 10726988eb2aSSalil Mehta if (ret) { 10736988eb2aSSalil Mehta /* can't do much in this situation, will disable VF */ 10746988eb2aSSalil Mehta dev_err(&hdev->pdev->dev, 10756988eb2aSSalil Mehta "VF failed(=%d) to fetch H/W reset completion status\n", 10766988eb2aSSalil Mehta ret); 10776988eb2aSSalil Mehta 10786988eb2aSSalil Mehta dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); 10796988eb2aSSalil Mehta hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 10806988eb2aSSalil Mehta 10816988eb2aSSalil Mehta rtnl_unlock(); 10826988eb2aSSalil Mehta return ret; 10836988eb2aSSalil Mehta } 10846988eb2aSSalil Mehta 10856988eb2aSSalil Mehta /* now, re-initialize the nic client and ae device*/ 10866988eb2aSSalil Mehta ret = hclgevf_reset_stack(hdev); 10876988eb2aSSalil Mehta if (ret) 10886988eb2aSSalil Mehta dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 10896988eb2aSSalil Mehta 10906988eb2aSSalil Mehta /* bring up the nic to enable TX/RX again */ 10916988eb2aSSalil Mehta hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 10926988eb2aSSalil Mehta 10936988eb2aSSalil Mehta rtnl_unlock(); 10946988eb2aSSalil Mehta 10956988eb2aSSalil Mehta return ret; 10966988eb2aSSalil Mehta } 10976988eb2aSSalil Mehta 1098a8dedb65SSalil Mehta static int hclgevf_do_reset(struct hclgevf_dev *hdev) 1099a8dedb65SSalil Mehta { 1100a8dedb65SSalil Mehta int status; 1101a8dedb65SSalil Mehta u8 respmsg; 1102a8dedb65SSalil Mehta 1103a8dedb65SSalil Mehta status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1104a8dedb65SSalil Mehta 0, false, &respmsg, sizeof(u8)); 1105a8dedb65SSalil Mehta if (status) 1106a8dedb65SSalil Mehta dev_err(&hdev->pdev->dev, 1107a8dedb65SSalil Mehta "VF reset request to PF failed(=%d)\n", status); 1108a8dedb65SSalil Mehta 1109a8dedb65SSalil Mehta return status; 1110a8dedb65SSalil Mehta } 1111a8dedb65SSalil Mehta 11126d4c3981SSalil Mehta static void hclgevf_reset_event(struct hnae3_handle *handle) 11136d4c3981SSalil Mehta { 11146d4c3981SSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 11156d4c3981SSalil Mehta 11166d4c3981SSalil Mehta dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 11176d4c3981SSalil Mehta 11186d4c3981SSalil Mehta handle->reset_level = HNAE3_VF_RESET; 11196d4c3981SSalil Mehta 1120436667d2SSalil Mehta /* reset of this VF requested */ 1121436667d2SSalil Mehta set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1122436667d2SSalil Mehta hclgevf_reset_task_schedule(hdev); 11236d4c3981SSalil Mehta 11246d4c3981SSalil Mehta handle->last_reset_time = jiffies; 11256d4c3981SSalil Mehta } 11266d4c3981SSalil Mehta 1127e2cb1decSSalil Mehta static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1128e2cb1decSSalil Mehta { 1129e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1130e2cb1decSSalil Mehta 1131e2cb1decSSalil Mehta return hdev->fw_version; 1132e2cb1decSSalil Mehta } 1133e2cb1decSSalil Mehta 1134e2cb1decSSalil Mehta static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1135e2cb1decSSalil Mehta { 1136e2cb1decSSalil Mehta struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1137e2cb1decSSalil Mehta 1138e2cb1decSSalil Mehta vector->vector_irq = pci_irq_vector(hdev->pdev, 1139e2cb1decSSalil Mehta HCLGEVF_MISC_VECTOR_NUM); 1140e2cb1decSSalil Mehta vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1141e2cb1decSSalil Mehta /* vector status always valid for Vector 0 */ 1142e2cb1decSSalil Mehta hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1143e2cb1decSSalil Mehta hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1144e2cb1decSSalil Mehta 1145e2cb1decSSalil Mehta hdev->num_msi_left -= 1; 1146e2cb1decSSalil Mehta hdev->num_msi_used += 1; 1147e2cb1decSSalil Mehta } 1148e2cb1decSSalil Mehta 114935a1e503SSalil Mehta void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 115035a1e503SSalil Mehta { 115135a1e503SSalil Mehta if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 115235a1e503SSalil Mehta !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 115335a1e503SSalil Mehta set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 115435a1e503SSalil Mehta schedule_work(&hdev->rst_service_task); 115535a1e503SSalil Mehta } 115635a1e503SSalil Mehta } 115735a1e503SSalil Mehta 115807a0556aSSalil Mehta void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1159e2cb1decSSalil Mehta { 116007a0556aSSalil Mehta if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 116107a0556aSSalil Mehta !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 116207a0556aSSalil Mehta set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1163e2cb1decSSalil Mehta schedule_work(&hdev->mbx_service_task); 1164e2cb1decSSalil Mehta } 116507a0556aSSalil Mehta } 1166e2cb1decSSalil Mehta 1167e2cb1decSSalil Mehta static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1168e2cb1decSSalil Mehta { 1169e2cb1decSSalil Mehta if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1170e2cb1decSSalil Mehta !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1171e2cb1decSSalil Mehta schedule_work(&hdev->service_task); 1172e2cb1decSSalil Mehta } 1173e2cb1decSSalil Mehta 1174436667d2SSalil Mehta static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1175436667d2SSalil Mehta { 117607a0556aSSalil Mehta /* if we have any pending mailbox event then schedule the mbx task */ 117707a0556aSSalil Mehta if (hdev->mbx_event_pending) 117807a0556aSSalil Mehta hclgevf_mbx_task_schedule(hdev); 117907a0556aSSalil Mehta 1180436667d2SSalil Mehta if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1181436667d2SSalil Mehta hclgevf_reset_task_schedule(hdev); 1182436667d2SSalil Mehta } 1183436667d2SSalil Mehta 1184e2cb1decSSalil Mehta static void hclgevf_service_timer(struct timer_list *t) 1185e2cb1decSSalil Mehta { 1186e2cb1decSSalil Mehta struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1187e2cb1decSSalil Mehta 1188e2cb1decSSalil Mehta mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1189e2cb1decSSalil Mehta 1190e2cb1decSSalil Mehta hclgevf_task_schedule(hdev); 1191e2cb1decSSalil Mehta } 1192e2cb1decSSalil Mehta 119335a1e503SSalil Mehta static void hclgevf_reset_service_task(struct work_struct *work) 119435a1e503SSalil Mehta { 119535a1e503SSalil Mehta struct hclgevf_dev *hdev = 119635a1e503SSalil Mehta container_of(work, struct hclgevf_dev, rst_service_task); 1197a8dedb65SSalil Mehta int ret; 119835a1e503SSalil Mehta 119935a1e503SSalil Mehta if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 120035a1e503SSalil Mehta return; 120135a1e503SSalil Mehta 120235a1e503SSalil Mehta clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 120335a1e503SSalil Mehta 1204436667d2SSalil Mehta if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1205436667d2SSalil Mehta &hdev->reset_state)) { 1206436667d2SSalil Mehta /* PF has initmated that it is about to reset the hardware. 1207436667d2SSalil Mehta * We now have to poll & check if harware has actually completed 1208436667d2SSalil Mehta * the reset sequence. On hardware reset completion, VF needs to 1209436667d2SSalil Mehta * reset the client and ae device. 121035a1e503SSalil Mehta */ 1211436667d2SSalil Mehta hdev->reset_attempts = 0; 1212436667d2SSalil Mehta 12136988eb2aSSalil Mehta ret = hclgevf_reset(hdev); 12146988eb2aSSalil Mehta if (ret) 12156988eb2aSSalil Mehta dev_err(&hdev->pdev->dev, "VF stack reset failed.\n"); 1216436667d2SSalil Mehta } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1217436667d2SSalil Mehta &hdev->reset_state)) { 1218436667d2SSalil Mehta /* we could be here when either of below happens: 1219436667d2SSalil Mehta * 1. reset was initiated due to watchdog timeout due to 1220436667d2SSalil Mehta * a. IMP was earlier reset and our TX got choked down and 1221436667d2SSalil Mehta * which resulted in watchdog reacting and inducing VF 1222436667d2SSalil Mehta * reset. This also means our cmdq would be unreliable. 1223436667d2SSalil Mehta * b. problem in TX due to other lower layer(example link 1224436667d2SSalil Mehta * layer not functioning properly etc.) 1225436667d2SSalil Mehta * 2. VF reset might have been initiated due to some config 1226436667d2SSalil Mehta * change. 1227436667d2SSalil Mehta * 1228436667d2SSalil Mehta * NOTE: Theres no clear way to detect above cases than to react 1229436667d2SSalil Mehta * to the response of PF for this reset request. PF will ack the 1230436667d2SSalil Mehta * 1b and 2. cases but we will not get any intimation about 1a 1231436667d2SSalil Mehta * from PF as cmdq would be in unreliable state i.e. mailbox 1232436667d2SSalil Mehta * communication between PF and VF would be broken. 1233436667d2SSalil Mehta */ 1234436667d2SSalil Mehta 1235436667d2SSalil Mehta /* if we are never geting into pending state it means either: 1236436667d2SSalil Mehta * 1. PF is not receiving our request which could be due to IMP 1237436667d2SSalil Mehta * reset 1238436667d2SSalil Mehta * 2. PF is screwed 1239436667d2SSalil Mehta * We cannot do much for 2. but to check first we can try reset 1240436667d2SSalil Mehta * our PCIe + stack and see if it alleviates the problem. 1241436667d2SSalil Mehta */ 1242436667d2SSalil Mehta if (hdev->reset_attempts > 3) { 1243436667d2SSalil Mehta /* prepare for full reset of stack + pcie interface */ 1244436667d2SSalil Mehta hdev->nic.reset_level = HNAE3_VF_FULL_RESET; 1245436667d2SSalil Mehta 1246436667d2SSalil Mehta /* "defer" schedule the reset task again */ 1247436667d2SSalil Mehta set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1248436667d2SSalil Mehta } else { 1249436667d2SSalil Mehta hdev->reset_attempts++; 1250436667d2SSalil Mehta 1251436667d2SSalil Mehta /* request PF for resetting this VF via mailbox */ 1252a8dedb65SSalil Mehta ret = hclgevf_do_reset(hdev); 1253a8dedb65SSalil Mehta if (ret) 1254a8dedb65SSalil Mehta dev_warn(&hdev->pdev->dev, 1255a8dedb65SSalil Mehta "VF rst fail, stack will call\n"); 1256436667d2SSalil Mehta } 1257436667d2SSalil Mehta } 125835a1e503SSalil Mehta 125935a1e503SSalil Mehta clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 126035a1e503SSalil Mehta } 126135a1e503SSalil Mehta 1262e2cb1decSSalil Mehta static void hclgevf_mailbox_service_task(struct work_struct *work) 1263e2cb1decSSalil Mehta { 1264e2cb1decSSalil Mehta struct hclgevf_dev *hdev; 1265e2cb1decSSalil Mehta 1266e2cb1decSSalil Mehta hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1267e2cb1decSSalil Mehta 1268e2cb1decSSalil Mehta if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1269e2cb1decSSalil Mehta return; 1270e2cb1decSSalil Mehta 1271e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1272e2cb1decSSalil Mehta 127307a0556aSSalil Mehta hclgevf_mbx_async_handler(hdev); 1274e2cb1decSSalil Mehta 1275e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1276e2cb1decSSalil Mehta } 1277e2cb1decSSalil Mehta 1278e2cb1decSSalil Mehta static void hclgevf_service_task(struct work_struct *work) 1279e2cb1decSSalil Mehta { 1280e2cb1decSSalil Mehta struct hclgevf_dev *hdev; 1281e2cb1decSSalil Mehta 1282e2cb1decSSalil Mehta hdev = container_of(work, struct hclgevf_dev, service_task); 1283e2cb1decSSalil Mehta 1284e2cb1decSSalil Mehta /* request the link status from the PF. PF would be able to tell VF 1285e2cb1decSSalil Mehta * about such updates in future so we might remove this later 1286e2cb1decSSalil Mehta */ 1287e2cb1decSSalil Mehta hclgevf_request_link_info(hdev); 1288e2cb1decSSalil Mehta 1289436667d2SSalil Mehta hclgevf_deferred_task_schedule(hdev); 1290436667d2SSalil Mehta 1291e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1292e2cb1decSSalil Mehta } 1293e2cb1decSSalil Mehta 1294e2cb1decSSalil Mehta static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1295e2cb1decSSalil Mehta { 1296e2cb1decSSalil Mehta hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1297e2cb1decSSalil Mehta } 1298e2cb1decSSalil Mehta 1299e2cb1decSSalil Mehta static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) 1300e2cb1decSSalil Mehta { 1301e2cb1decSSalil Mehta u32 cmdq_src_reg; 1302e2cb1decSSalil Mehta 1303e2cb1decSSalil Mehta /* fetch the events from their corresponding regs */ 1304e2cb1decSSalil Mehta cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1305e2cb1decSSalil Mehta HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1306e2cb1decSSalil Mehta 1307e2cb1decSSalil Mehta /* check for vector0 mailbox(=CMDQ RX) event source */ 1308e2cb1decSSalil Mehta if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1309e2cb1decSSalil Mehta cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1310e2cb1decSSalil Mehta *clearval = cmdq_src_reg; 1311e2cb1decSSalil Mehta return true; 1312e2cb1decSSalil Mehta } 1313e2cb1decSSalil Mehta 1314e2cb1decSSalil Mehta dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1315e2cb1decSSalil Mehta 1316e2cb1decSSalil Mehta return false; 1317e2cb1decSSalil Mehta } 1318e2cb1decSSalil Mehta 1319e2cb1decSSalil Mehta static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1320e2cb1decSSalil Mehta { 1321e2cb1decSSalil Mehta writel(en ? 1 : 0, vector->addr); 1322e2cb1decSSalil Mehta } 1323e2cb1decSSalil Mehta 1324e2cb1decSSalil Mehta static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1325e2cb1decSSalil Mehta { 1326e2cb1decSSalil Mehta struct hclgevf_dev *hdev = data; 1327e2cb1decSSalil Mehta u32 clearval; 1328e2cb1decSSalil Mehta 1329e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, false); 1330e2cb1decSSalil Mehta if (!hclgevf_check_event_cause(hdev, &clearval)) 1331e2cb1decSSalil Mehta goto skip_sched; 1332e2cb1decSSalil Mehta 133307a0556aSSalil Mehta hclgevf_mbx_handler(hdev); 1334e2cb1decSSalil Mehta 1335e2cb1decSSalil Mehta hclgevf_clear_event_cause(hdev, clearval); 1336e2cb1decSSalil Mehta 1337e2cb1decSSalil Mehta skip_sched: 1338e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, true); 1339e2cb1decSSalil Mehta 1340e2cb1decSSalil Mehta return IRQ_HANDLED; 1341e2cb1decSSalil Mehta } 1342e2cb1decSSalil Mehta 1343e2cb1decSSalil Mehta static int hclgevf_configure(struct hclgevf_dev *hdev) 1344e2cb1decSSalil Mehta { 1345e2cb1decSSalil Mehta int ret; 1346e2cb1decSSalil Mehta 1347e2cb1decSSalil Mehta /* get queue configuration from PF */ 1348e2cb1decSSalil Mehta ret = hclge_get_queue_info(hdev); 1349e2cb1decSSalil Mehta if (ret) 1350e2cb1decSSalil Mehta return ret; 1351e2cb1decSSalil Mehta /* get tc configuration from PF */ 1352e2cb1decSSalil Mehta return hclgevf_get_tc_info(hdev); 1353e2cb1decSSalil Mehta } 1354e2cb1decSSalil Mehta 13557a01c897SSalil Mehta static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 13567a01c897SSalil Mehta { 13577a01c897SSalil Mehta struct pci_dev *pdev = ae_dev->pdev; 13587a01c897SSalil Mehta struct hclgevf_dev *hdev = ae_dev->priv; 13597a01c897SSalil Mehta 13607a01c897SSalil Mehta hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 13617a01c897SSalil Mehta if (!hdev) 13627a01c897SSalil Mehta return -ENOMEM; 13637a01c897SSalil Mehta 13647a01c897SSalil Mehta hdev->pdev = pdev; 13657a01c897SSalil Mehta hdev->ae_dev = ae_dev; 13667a01c897SSalil Mehta ae_dev->priv = hdev; 13677a01c897SSalil Mehta 13687a01c897SSalil Mehta return 0; 13697a01c897SSalil Mehta } 13707a01c897SSalil Mehta 1371e2cb1decSSalil Mehta static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1372e2cb1decSSalil Mehta { 1373e2cb1decSSalil Mehta struct hnae3_handle *roce = &hdev->roce; 1374e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 1375e2cb1decSSalil Mehta 137607acf909SJian Shen roce->rinfo.num_vectors = hdev->num_roce_msix; 1377e2cb1decSSalil Mehta 1378e2cb1decSSalil Mehta if (hdev->num_msi_left < roce->rinfo.num_vectors || 1379e2cb1decSSalil Mehta hdev->num_msi_left == 0) 1380e2cb1decSSalil Mehta return -EINVAL; 1381e2cb1decSSalil Mehta 138207acf909SJian Shen roce->rinfo.base_vector = hdev->roce_base_vector; 1383e2cb1decSSalil Mehta 1384e2cb1decSSalil Mehta roce->rinfo.netdev = nic->kinfo.netdev; 1385e2cb1decSSalil Mehta roce->rinfo.roce_io_base = hdev->hw.io_base; 1386e2cb1decSSalil Mehta 1387e2cb1decSSalil Mehta roce->pdev = nic->pdev; 1388e2cb1decSSalil Mehta roce->ae_algo = nic->ae_algo; 1389e2cb1decSSalil Mehta roce->numa_node_mask = nic->numa_node_mask; 1390e2cb1decSSalil Mehta 1391e2cb1decSSalil Mehta return 0; 1392e2cb1decSSalil Mehta } 1393e2cb1decSSalil Mehta 1394e2cb1decSSalil Mehta static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1395e2cb1decSSalil Mehta { 1396e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1397e2cb1decSSalil Mehta int i, ret; 1398e2cb1decSSalil Mehta 1399e2cb1decSSalil Mehta rss_cfg->rss_size = hdev->rss_size_max; 1400e2cb1decSSalil Mehta 1401e2cb1decSSalil Mehta /* Initialize RSS indirect table for each vport */ 1402e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1403e2cb1decSSalil Mehta rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1404e2cb1decSSalil Mehta 1405e2cb1decSSalil Mehta ret = hclgevf_set_rss_indir_table(hdev); 1406e2cb1decSSalil Mehta if (ret) 1407e2cb1decSSalil Mehta return ret; 1408e2cb1decSSalil Mehta 1409e2cb1decSSalil Mehta return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1410e2cb1decSSalil Mehta } 1411e2cb1decSSalil Mehta 1412e2cb1decSSalil Mehta static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1413e2cb1decSSalil Mehta { 1414e2cb1decSSalil Mehta /* other vlan config(like, VLAN TX/RX offload) would also be added 1415e2cb1decSSalil Mehta * here later 1416e2cb1decSSalil Mehta */ 1417e2cb1decSSalil Mehta return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1418e2cb1decSSalil Mehta false); 1419e2cb1decSSalil Mehta } 1420e2cb1decSSalil Mehta 1421e2cb1decSSalil Mehta static int hclgevf_ae_start(struct hnae3_handle *handle) 1422e2cb1decSSalil Mehta { 1423e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1424e2cb1decSSalil Mehta int i, queue_id; 1425e2cb1decSSalil Mehta 1426e2cb1decSSalil Mehta for (i = 0; i < handle->kinfo.num_tqps; i++) { 1427e2cb1decSSalil Mehta /* ring enable */ 1428e2cb1decSSalil Mehta queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); 1429e2cb1decSSalil Mehta if (queue_id < 0) { 1430e2cb1decSSalil Mehta dev_warn(&hdev->pdev->dev, 1431e2cb1decSSalil Mehta "Get invalid queue id, ignore it\n"); 1432e2cb1decSSalil Mehta continue; 1433e2cb1decSSalil Mehta } 1434e2cb1decSSalil Mehta 1435e2cb1decSSalil Mehta hclgevf_tqp_enable(hdev, queue_id, 0, true); 1436e2cb1decSSalil Mehta } 1437e2cb1decSSalil Mehta 1438e2cb1decSSalil Mehta /* reset tqp stats */ 1439e2cb1decSSalil Mehta hclgevf_reset_tqp_stats(handle); 1440e2cb1decSSalil Mehta 1441e2cb1decSSalil Mehta hclgevf_request_link_info(hdev); 1442e2cb1decSSalil Mehta 1443e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1444e2cb1decSSalil Mehta mod_timer(&hdev->service_timer, jiffies + HZ); 1445e2cb1decSSalil Mehta 1446e2cb1decSSalil Mehta return 0; 1447e2cb1decSSalil Mehta } 1448e2cb1decSSalil Mehta 1449e2cb1decSSalil Mehta static void hclgevf_ae_stop(struct hnae3_handle *handle) 1450e2cb1decSSalil Mehta { 1451e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1452e2cb1decSSalil Mehta int i, queue_id; 1453e2cb1decSSalil Mehta 14542f7e4896SFuyun Liang set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 14552f7e4896SFuyun Liang 1456e2cb1decSSalil Mehta for (i = 0; i < hdev->num_tqps; i++) { 1457e2cb1decSSalil Mehta /* Ring disable */ 1458e2cb1decSSalil Mehta queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); 1459e2cb1decSSalil Mehta if (queue_id < 0) { 1460e2cb1decSSalil Mehta dev_warn(&hdev->pdev->dev, 1461e2cb1decSSalil Mehta "Get invalid queue id, ignore it\n"); 1462e2cb1decSSalil Mehta continue; 1463e2cb1decSSalil Mehta } 1464e2cb1decSSalil Mehta 1465e2cb1decSSalil Mehta hclgevf_tqp_enable(hdev, queue_id, 0, false); 1466e2cb1decSSalil Mehta } 1467e2cb1decSSalil Mehta 1468e2cb1decSSalil Mehta /* reset tqp stats */ 1469e2cb1decSSalil Mehta hclgevf_reset_tqp_stats(handle); 14708cc6c1f7SFuyun Liang del_timer_sync(&hdev->service_timer); 14718cc6c1f7SFuyun Liang cancel_work_sync(&hdev->service_task); 1472f5be7967SYunsheng Lin clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 14738cc6c1f7SFuyun Liang hclgevf_update_link_status(hdev, 0); 1474e2cb1decSSalil Mehta } 1475e2cb1decSSalil Mehta 1476e2cb1decSSalil Mehta static void hclgevf_state_init(struct hclgevf_dev *hdev) 1477e2cb1decSSalil Mehta { 14787a01c897SSalil Mehta /* if this is on going reset then skip this initialization */ 14797a01c897SSalil Mehta if (hclgevf_dev_ongoing_reset(hdev)) 14807a01c897SSalil Mehta return; 14817a01c897SSalil Mehta 1482e2cb1decSSalil Mehta /* setup tasks for the MBX */ 1483e2cb1decSSalil Mehta INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1484e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1485e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1486e2cb1decSSalil Mehta 1487e2cb1decSSalil Mehta /* setup tasks for service timer */ 1488e2cb1decSSalil Mehta timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1489e2cb1decSSalil Mehta 1490e2cb1decSSalil Mehta INIT_WORK(&hdev->service_task, hclgevf_service_task); 1491e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1492e2cb1decSSalil Mehta 149335a1e503SSalil Mehta INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 149435a1e503SSalil Mehta 1495e2cb1decSSalil Mehta mutex_init(&hdev->mbx_resp.mbx_mutex); 1496e2cb1decSSalil Mehta 1497e2cb1decSSalil Mehta /* bring the device down */ 1498e2cb1decSSalil Mehta set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1499e2cb1decSSalil Mehta } 1500e2cb1decSSalil Mehta 1501e2cb1decSSalil Mehta static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1502e2cb1decSSalil Mehta { 1503e2cb1decSSalil Mehta set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1504e2cb1decSSalil Mehta 1505e2cb1decSSalil Mehta if (hdev->service_timer.function) 1506e2cb1decSSalil Mehta del_timer_sync(&hdev->service_timer); 1507e2cb1decSSalil Mehta if (hdev->service_task.func) 1508e2cb1decSSalil Mehta cancel_work_sync(&hdev->service_task); 1509e2cb1decSSalil Mehta if (hdev->mbx_service_task.func) 1510e2cb1decSSalil Mehta cancel_work_sync(&hdev->mbx_service_task); 151135a1e503SSalil Mehta if (hdev->rst_service_task.func) 151235a1e503SSalil Mehta cancel_work_sync(&hdev->rst_service_task); 1513e2cb1decSSalil Mehta 1514e2cb1decSSalil Mehta mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1515e2cb1decSSalil Mehta } 1516e2cb1decSSalil Mehta 1517e2cb1decSSalil Mehta static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1518e2cb1decSSalil Mehta { 1519e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 1520e2cb1decSSalil Mehta int vectors; 1521e2cb1decSSalil Mehta int i; 1522e2cb1decSSalil Mehta 15237a01c897SSalil Mehta /* if this is on going reset then skip this initialization */ 15247a01c897SSalil Mehta if (hclgevf_dev_ongoing_reset(hdev)) 15257a01c897SSalil Mehta return 0; 15267a01c897SSalil Mehta 152707acf909SJian Shen if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 152807acf909SJian Shen vectors = pci_alloc_irq_vectors(pdev, 152907acf909SJian Shen hdev->roce_base_msix_offset + 1, 153007acf909SJian Shen hdev->num_msi, 153107acf909SJian Shen PCI_IRQ_MSIX); 153207acf909SJian Shen else 1533e2cb1decSSalil Mehta vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1534e2cb1decSSalil Mehta PCI_IRQ_MSI | PCI_IRQ_MSIX); 153507acf909SJian Shen 1536e2cb1decSSalil Mehta if (vectors < 0) { 1537e2cb1decSSalil Mehta dev_err(&pdev->dev, 1538e2cb1decSSalil Mehta "failed(%d) to allocate MSI/MSI-X vectors\n", 1539e2cb1decSSalil Mehta vectors); 1540e2cb1decSSalil Mehta return vectors; 1541e2cb1decSSalil Mehta } 1542e2cb1decSSalil Mehta if (vectors < hdev->num_msi) 1543e2cb1decSSalil Mehta dev_warn(&hdev->pdev->dev, 1544e2cb1decSSalil Mehta "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1545e2cb1decSSalil Mehta hdev->num_msi, vectors); 1546e2cb1decSSalil Mehta 1547e2cb1decSSalil Mehta hdev->num_msi = vectors; 1548e2cb1decSSalil Mehta hdev->num_msi_left = vectors; 1549e2cb1decSSalil Mehta hdev->base_msi_vector = pdev->irq; 155007acf909SJian Shen hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 1551e2cb1decSSalil Mehta 1552e2cb1decSSalil Mehta hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1553e2cb1decSSalil Mehta sizeof(u16), GFP_KERNEL); 1554e2cb1decSSalil Mehta if (!hdev->vector_status) { 1555e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 1556e2cb1decSSalil Mehta return -ENOMEM; 1557e2cb1decSSalil Mehta } 1558e2cb1decSSalil Mehta 1559e2cb1decSSalil Mehta for (i = 0; i < hdev->num_msi; i++) 1560e2cb1decSSalil Mehta hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 1561e2cb1decSSalil Mehta 1562e2cb1decSSalil Mehta hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 1563e2cb1decSSalil Mehta sizeof(int), GFP_KERNEL); 1564e2cb1decSSalil Mehta if (!hdev->vector_irq) { 1565e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 1566e2cb1decSSalil Mehta return -ENOMEM; 1567e2cb1decSSalil Mehta } 1568e2cb1decSSalil Mehta 1569e2cb1decSSalil Mehta return 0; 1570e2cb1decSSalil Mehta } 1571e2cb1decSSalil Mehta 1572e2cb1decSSalil Mehta static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 1573e2cb1decSSalil Mehta { 1574e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 1575e2cb1decSSalil Mehta 1576e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 1577e2cb1decSSalil Mehta } 1578e2cb1decSSalil Mehta 1579e2cb1decSSalil Mehta static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 1580e2cb1decSSalil Mehta { 1581e2cb1decSSalil Mehta int ret = 0; 1582e2cb1decSSalil Mehta 15837a01c897SSalil Mehta /* if this is on going reset then skip this initialization */ 15847a01c897SSalil Mehta if (hclgevf_dev_ongoing_reset(hdev)) 15857a01c897SSalil Mehta return 0; 15867a01c897SSalil Mehta 1587e2cb1decSSalil Mehta hclgevf_get_misc_vector(hdev); 1588e2cb1decSSalil Mehta 1589e2cb1decSSalil Mehta ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 1590e2cb1decSSalil Mehta 0, "hclgevf_cmd", hdev); 1591e2cb1decSSalil Mehta if (ret) { 1592e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 1593e2cb1decSSalil Mehta hdev->misc_vector.vector_irq); 1594e2cb1decSSalil Mehta return ret; 1595e2cb1decSSalil Mehta } 1596e2cb1decSSalil Mehta 15971819e409SXi Wang hclgevf_clear_event_cause(hdev, 0); 15981819e409SXi Wang 1599e2cb1decSSalil Mehta /* enable misc. vector(vector 0) */ 1600e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, true); 1601e2cb1decSSalil Mehta 1602e2cb1decSSalil Mehta return ret; 1603e2cb1decSSalil Mehta } 1604e2cb1decSSalil Mehta 1605e2cb1decSSalil Mehta static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 1606e2cb1decSSalil Mehta { 1607e2cb1decSSalil Mehta /* disable misc vector(vector 0) */ 1608e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, false); 16091819e409SXi Wang synchronize_irq(hdev->misc_vector.vector_irq); 1610e2cb1decSSalil Mehta free_irq(hdev->misc_vector.vector_irq, hdev); 1611e2cb1decSSalil Mehta hclgevf_free_vector(hdev, 0); 1612e2cb1decSSalil Mehta } 1613e2cb1decSSalil Mehta 1614e718a93fSPeng Li static int hclgevf_init_client_instance(struct hnae3_client *client, 1615e718a93fSPeng Li struct hnae3_ae_dev *ae_dev) 1616e2cb1decSSalil Mehta { 1617e718a93fSPeng Li struct hclgevf_dev *hdev = ae_dev->priv; 1618e2cb1decSSalil Mehta int ret; 1619e2cb1decSSalil Mehta 1620e2cb1decSSalil Mehta switch (client->type) { 1621e2cb1decSSalil Mehta case HNAE3_CLIENT_KNIC: 1622e2cb1decSSalil Mehta hdev->nic_client = client; 1623e2cb1decSSalil Mehta hdev->nic.client = client; 1624e2cb1decSSalil Mehta 1625e2cb1decSSalil Mehta ret = client->ops->init_instance(&hdev->nic); 1626e2cb1decSSalil Mehta if (ret) 162749dd8054SJian Shen goto clear_nic; 1628e2cb1decSSalil Mehta 1629d9f28fc2SJian Shen hnae3_set_client_init_flag(client, ae_dev, 1); 1630d9f28fc2SJian Shen 1631e2cb1decSSalil Mehta if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 1632e2cb1decSSalil Mehta struct hnae3_client *rc = hdev->roce_client; 1633e2cb1decSSalil Mehta 1634e2cb1decSSalil Mehta ret = hclgevf_init_roce_base_info(hdev); 1635e2cb1decSSalil Mehta if (ret) 163649dd8054SJian Shen goto clear_roce; 1637e2cb1decSSalil Mehta ret = rc->ops->init_instance(&hdev->roce); 1638e2cb1decSSalil Mehta if (ret) 163949dd8054SJian Shen goto clear_roce; 1640d9f28fc2SJian Shen 1641d9f28fc2SJian Shen hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 1642d9f28fc2SJian Shen 1); 1643e2cb1decSSalil Mehta } 1644e2cb1decSSalil Mehta break; 1645e2cb1decSSalil Mehta case HNAE3_CLIENT_UNIC: 1646e2cb1decSSalil Mehta hdev->nic_client = client; 1647e2cb1decSSalil Mehta hdev->nic.client = client; 1648e2cb1decSSalil Mehta 1649e2cb1decSSalil Mehta ret = client->ops->init_instance(&hdev->nic); 1650e2cb1decSSalil Mehta if (ret) 165149dd8054SJian Shen goto clear_nic; 1652d9f28fc2SJian Shen 1653d9f28fc2SJian Shen hnae3_set_client_init_flag(client, ae_dev, 1); 1654e2cb1decSSalil Mehta break; 1655e2cb1decSSalil Mehta case HNAE3_CLIENT_ROCE: 1656544a7bcdSLijun Ou if (hnae3_dev_roce_supported(hdev)) { 1657e2cb1decSSalil Mehta hdev->roce_client = client; 1658e2cb1decSSalil Mehta hdev->roce.client = client; 1659544a7bcdSLijun Ou } 1660e2cb1decSSalil Mehta 1661544a7bcdSLijun Ou if (hdev->roce_client && hdev->nic_client) { 1662e2cb1decSSalil Mehta ret = hclgevf_init_roce_base_info(hdev); 1663e2cb1decSSalil Mehta if (ret) 166449dd8054SJian Shen goto clear_roce; 1665e2cb1decSSalil Mehta 1666e2cb1decSSalil Mehta ret = client->ops->init_instance(&hdev->roce); 1667e2cb1decSSalil Mehta if (ret) 166849dd8054SJian Shen goto clear_roce; 1669e2cb1decSSalil Mehta } 1670d9f28fc2SJian Shen 1671d9f28fc2SJian Shen hnae3_set_client_init_flag(client, ae_dev, 1); 1672e2cb1decSSalil Mehta } 1673e2cb1decSSalil Mehta 1674e2cb1decSSalil Mehta return 0; 167549dd8054SJian Shen 167649dd8054SJian Shen clear_nic: 167749dd8054SJian Shen hdev->nic_client = NULL; 167849dd8054SJian Shen hdev->nic.client = NULL; 167949dd8054SJian Shen return ret; 168049dd8054SJian Shen clear_roce: 168149dd8054SJian Shen hdev->roce_client = NULL; 168249dd8054SJian Shen hdev->roce.client = NULL; 168349dd8054SJian Shen return ret; 1684e2cb1decSSalil Mehta } 1685e2cb1decSSalil Mehta 1686e718a93fSPeng Li static void hclgevf_uninit_client_instance(struct hnae3_client *client, 1687e718a93fSPeng Li struct hnae3_ae_dev *ae_dev) 1688e2cb1decSSalil Mehta { 1689e718a93fSPeng Li struct hclgevf_dev *hdev = ae_dev->priv; 1690e718a93fSPeng Li 1691e2cb1decSSalil Mehta /* un-init roce, if it exists */ 169249dd8054SJian Shen if (hdev->roce_client) { 1693e2cb1decSSalil Mehta hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 169449dd8054SJian Shen hdev->roce_client = NULL; 169549dd8054SJian Shen hdev->roce.client = NULL; 169649dd8054SJian Shen } 1697e2cb1decSSalil Mehta 1698e2cb1decSSalil Mehta /* un-init nic/unic, if this was not called by roce client */ 169949dd8054SJian Shen if (client->ops->uninit_instance && hdev->nic_client && 170049dd8054SJian Shen client->type != HNAE3_CLIENT_ROCE) { 1701e2cb1decSSalil Mehta client->ops->uninit_instance(&hdev->nic, 0); 170249dd8054SJian Shen hdev->nic_client = NULL; 170349dd8054SJian Shen hdev->nic.client = NULL; 170449dd8054SJian Shen } 1705e2cb1decSSalil Mehta } 1706e2cb1decSSalil Mehta 1707e2cb1decSSalil Mehta static int hclgevf_pci_init(struct hclgevf_dev *hdev) 1708e2cb1decSSalil Mehta { 1709e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 1710e2cb1decSSalil Mehta struct hclgevf_hw *hw; 1711e2cb1decSSalil Mehta int ret; 1712e2cb1decSSalil Mehta 17137a01c897SSalil Mehta /* check if we need to skip initialization of pci. This will happen if 17147a01c897SSalil Mehta * device is undergoing VF reset. Otherwise, we would need to 17157a01c897SSalil Mehta * re-initialize pci interface again i.e. when device is not going 17167a01c897SSalil Mehta * through *any* reset or actually undergoing full reset. 17177a01c897SSalil Mehta */ 17187a01c897SSalil Mehta if (hclgevf_dev_ongoing_reset(hdev)) 17197a01c897SSalil Mehta return 0; 17207a01c897SSalil Mehta 1721e2cb1decSSalil Mehta ret = pci_enable_device(pdev); 1722e2cb1decSSalil Mehta if (ret) { 1723e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed to enable PCI device\n"); 17243e249d3bSFuyun Liang return ret; 1725e2cb1decSSalil Mehta } 1726e2cb1decSSalil Mehta 1727e2cb1decSSalil Mehta ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1728e2cb1decSSalil Mehta if (ret) { 1729e2cb1decSSalil Mehta dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 1730e2cb1decSSalil Mehta goto err_disable_device; 1731e2cb1decSSalil Mehta } 1732e2cb1decSSalil Mehta 1733e2cb1decSSalil Mehta ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 1734e2cb1decSSalil Mehta if (ret) { 1735e2cb1decSSalil Mehta dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 1736e2cb1decSSalil Mehta goto err_disable_device; 1737e2cb1decSSalil Mehta } 1738e2cb1decSSalil Mehta 1739e2cb1decSSalil Mehta pci_set_master(pdev); 1740e2cb1decSSalil Mehta hw = &hdev->hw; 1741e2cb1decSSalil Mehta hw->hdev = hdev; 17422e1ea493SPeng Li hw->io_base = pci_iomap(pdev, 2, 0); 1743e2cb1decSSalil Mehta if (!hw->io_base) { 1744e2cb1decSSalil Mehta dev_err(&pdev->dev, "can't map configuration register space\n"); 1745e2cb1decSSalil Mehta ret = -ENOMEM; 1746e2cb1decSSalil Mehta goto err_clr_master; 1747e2cb1decSSalil Mehta } 1748e2cb1decSSalil Mehta 1749e2cb1decSSalil Mehta return 0; 1750e2cb1decSSalil Mehta 1751e2cb1decSSalil Mehta err_clr_master: 1752e2cb1decSSalil Mehta pci_clear_master(pdev); 1753e2cb1decSSalil Mehta pci_release_regions(pdev); 1754e2cb1decSSalil Mehta err_disable_device: 1755e2cb1decSSalil Mehta pci_disable_device(pdev); 17563e249d3bSFuyun Liang 1757e2cb1decSSalil Mehta return ret; 1758e2cb1decSSalil Mehta } 1759e2cb1decSSalil Mehta 1760e2cb1decSSalil Mehta static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 1761e2cb1decSSalil Mehta { 1762e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 1763e2cb1decSSalil Mehta 1764e2cb1decSSalil Mehta pci_iounmap(pdev, hdev->hw.io_base); 1765e2cb1decSSalil Mehta pci_clear_master(pdev); 1766e2cb1decSSalil Mehta pci_release_regions(pdev); 1767e2cb1decSSalil Mehta pci_disable_device(pdev); 1768e2cb1decSSalil Mehta } 1769e2cb1decSSalil Mehta 177007acf909SJian Shen static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 177107acf909SJian Shen { 177207acf909SJian Shen struct hclgevf_query_res_cmd *req; 177307acf909SJian Shen struct hclgevf_desc desc; 177407acf909SJian Shen int ret; 177507acf909SJian Shen 177607acf909SJian Shen hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 177707acf909SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 177807acf909SJian Shen if (ret) { 177907acf909SJian Shen dev_err(&hdev->pdev->dev, 178007acf909SJian Shen "query vf resource failed, ret = %d.\n", ret); 178107acf909SJian Shen return ret; 178207acf909SJian Shen } 178307acf909SJian Shen 178407acf909SJian Shen req = (struct hclgevf_query_res_cmd *)desc.data; 178507acf909SJian Shen 178607acf909SJian Shen if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 178707acf909SJian Shen hdev->roce_base_msix_offset = 178807acf909SJian Shen hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 178907acf909SJian Shen HCLGEVF_MSIX_OFT_ROCEE_M, 179007acf909SJian Shen HCLGEVF_MSIX_OFT_ROCEE_S); 179107acf909SJian Shen hdev->num_roce_msix = 179207acf909SJian Shen hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 179307acf909SJian Shen HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 179407acf909SJian Shen 179507acf909SJian Shen /* VF should have NIC vectors and Roce vectors, NIC vectors 179607acf909SJian Shen * are queued before Roce vectors. The offset is fixed to 64. 179707acf909SJian Shen */ 179807acf909SJian Shen hdev->num_msi = hdev->num_roce_msix + 179907acf909SJian Shen hdev->roce_base_msix_offset; 180007acf909SJian Shen } else { 180107acf909SJian Shen hdev->num_msi = 180207acf909SJian Shen hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 180307acf909SJian Shen HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 180407acf909SJian Shen } 180507acf909SJian Shen 180607acf909SJian Shen return 0; 180707acf909SJian Shen } 180807acf909SJian Shen 18097a01c897SSalil Mehta static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 1810e2cb1decSSalil Mehta { 18117a01c897SSalil Mehta struct pci_dev *pdev = hdev->pdev; 1812e2cb1decSSalil Mehta int ret; 1813e2cb1decSSalil Mehta 18147a01c897SSalil Mehta /* check if device is on-going full reset(i.e. pcie as well) */ 18157a01c897SSalil Mehta if (hclgevf_dev_ongoing_full_reset(hdev)) { 18167a01c897SSalil Mehta dev_warn(&pdev->dev, "device is going full reset\n"); 18177a01c897SSalil Mehta hclgevf_uninit_hdev(hdev); 18187a01c897SSalil Mehta } 1819e2cb1decSSalil Mehta 1820e2cb1decSSalil Mehta ret = hclgevf_pci_init(hdev); 1821e2cb1decSSalil Mehta if (ret) { 1822e2cb1decSSalil Mehta dev_err(&pdev->dev, "PCI initialization failed\n"); 1823e2cb1decSSalil Mehta return ret; 1824e2cb1decSSalil Mehta } 1825e2cb1decSSalil Mehta 1826eddf0462SYunsheng Lin ret = hclgevf_cmd_init(hdev); 1827eddf0462SYunsheng Lin if (ret) 1828eddf0462SYunsheng Lin goto err_cmd_init; 1829eddf0462SYunsheng Lin 183007acf909SJian Shen /* Get vf resource */ 183107acf909SJian Shen ret = hclgevf_query_vf_resource(hdev); 183207acf909SJian Shen if (ret) { 183307acf909SJian Shen dev_err(&hdev->pdev->dev, 183407acf909SJian Shen "Query vf status error, ret = %d.\n", ret); 183507acf909SJian Shen goto err_query_vf; 183607acf909SJian Shen } 183707acf909SJian Shen 183807acf909SJian Shen ret = hclgevf_init_msi(hdev); 183907acf909SJian Shen if (ret) { 184007acf909SJian Shen dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 184107acf909SJian Shen goto err_query_vf; 184207acf909SJian Shen } 184307acf909SJian Shen 184407acf909SJian Shen hclgevf_state_init(hdev); 184507acf909SJian Shen 1846e2cb1decSSalil Mehta ret = hclgevf_misc_irq_init(hdev); 1847e2cb1decSSalil Mehta if (ret) { 1848e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 1849e2cb1decSSalil Mehta ret); 1850e2cb1decSSalil Mehta goto err_misc_irq_init; 1851e2cb1decSSalil Mehta } 1852e2cb1decSSalil Mehta 1853e2cb1decSSalil Mehta ret = hclgevf_configure(hdev); 1854e2cb1decSSalil Mehta if (ret) { 1855e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 1856e2cb1decSSalil Mehta goto err_config; 1857e2cb1decSSalil Mehta } 1858e2cb1decSSalil Mehta 1859e2cb1decSSalil Mehta ret = hclgevf_alloc_tqps(hdev); 1860e2cb1decSSalil Mehta if (ret) { 1861e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 1862e2cb1decSSalil Mehta goto err_config; 1863e2cb1decSSalil Mehta } 1864e2cb1decSSalil Mehta 1865e2cb1decSSalil Mehta ret = hclgevf_set_handle_info(hdev); 1866e2cb1decSSalil Mehta if (ret) { 1867e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 1868e2cb1decSSalil Mehta goto err_config; 1869e2cb1decSSalil Mehta } 1870e2cb1decSSalil Mehta 18713a678b58SXi Wang /* Initialize mta type for this VF */ 18723a678b58SXi Wang ret = hclgevf_cfg_func_mta_type(hdev); 1873e2cb1decSSalil Mehta if (ret) { 1874e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 18753a678b58SXi Wang "failed(%d) to initialize MTA type\n", ret); 1876e2cb1decSSalil Mehta goto err_config; 1877e2cb1decSSalil Mehta } 1878e2cb1decSSalil Mehta 1879e2cb1decSSalil Mehta /* Initialize RSS for this VF */ 1880e2cb1decSSalil Mehta ret = hclgevf_rss_init_hw(hdev); 1881e2cb1decSSalil Mehta if (ret) { 1882e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 1883e2cb1decSSalil Mehta "failed(%d) to initialize RSS\n", ret); 1884e2cb1decSSalil Mehta goto err_config; 1885e2cb1decSSalil Mehta } 1886e2cb1decSSalil Mehta 1887e2cb1decSSalil Mehta ret = hclgevf_init_vlan_config(hdev); 1888e2cb1decSSalil Mehta if (ret) { 1889e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 1890e2cb1decSSalil Mehta "failed(%d) to initialize VLAN config\n", ret); 1891e2cb1decSSalil Mehta goto err_config; 1892e2cb1decSSalil Mehta } 1893e2cb1decSSalil Mehta 1894e2cb1decSSalil Mehta pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 1895e2cb1decSSalil Mehta 1896e2cb1decSSalil Mehta return 0; 1897e2cb1decSSalil Mehta 1898e2cb1decSSalil Mehta err_config: 1899e2cb1decSSalil Mehta hclgevf_misc_irq_uninit(hdev); 1900e2cb1decSSalil Mehta err_misc_irq_init: 1901e2cb1decSSalil Mehta hclgevf_state_uninit(hdev); 1902e2cb1decSSalil Mehta hclgevf_uninit_msi(hdev); 190307acf909SJian Shen err_query_vf: 190407acf909SJian Shen hclgevf_cmd_uninit(hdev); 190507acf909SJian Shen err_cmd_init: 1906e2cb1decSSalil Mehta hclgevf_pci_uninit(hdev); 1907e2cb1decSSalil Mehta return ret; 1908e2cb1decSSalil Mehta } 1909e2cb1decSSalil Mehta 19107a01c897SSalil Mehta static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 1911e2cb1decSSalil Mehta { 1912e2cb1decSSalil Mehta hclgevf_state_uninit(hdev); 1913eddf0462SYunsheng Lin hclgevf_misc_irq_uninit(hdev); 1914eddf0462SYunsheng Lin hclgevf_cmd_uninit(hdev); 1915e2cb1decSSalil Mehta hclgevf_uninit_msi(hdev); 1916e2cb1decSSalil Mehta hclgevf_pci_uninit(hdev); 19177a01c897SSalil Mehta } 19187a01c897SSalil Mehta 19197a01c897SSalil Mehta static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 19207a01c897SSalil Mehta { 19217a01c897SSalil Mehta struct pci_dev *pdev = ae_dev->pdev; 19227a01c897SSalil Mehta int ret; 19237a01c897SSalil Mehta 19247a01c897SSalil Mehta ret = hclgevf_alloc_hdev(ae_dev); 19257a01c897SSalil Mehta if (ret) { 19267a01c897SSalil Mehta dev_err(&pdev->dev, "hclge device allocation failed\n"); 19277a01c897SSalil Mehta return ret; 19287a01c897SSalil Mehta } 19297a01c897SSalil Mehta 19307a01c897SSalil Mehta ret = hclgevf_init_hdev(ae_dev->priv); 19317a01c897SSalil Mehta if (ret) 19327a01c897SSalil Mehta dev_err(&pdev->dev, "hclge device initialization failed\n"); 19337a01c897SSalil Mehta 19347a01c897SSalil Mehta return ret; 19357a01c897SSalil Mehta } 19367a01c897SSalil Mehta 19377a01c897SSalil Mehta static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 19387a01c897SSalil Mehta { 19397a01c897SSalil Mehta struct hclgevf_dev *hdev = ae_dev->priv; 19407a01c897SSalil Mehta 19417a01c897SSalil Mehta hclgevf_uninit_hdev(hdev); 1942e2cb1decSSalil Mehta ae_dev->priv = NULL; 1943e2cb1decSSalil Mehta } 1944e2cb1decSSalil Mehta 1945849e4607SPeng Li static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 1946849e4607SPeng Li { 1947849e4607SPeng Li struct hnae3_handle *nic = &hdev->nic; 1948849e4607SPeng Li struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1949849e4607SPeng Li 1950849e4607SPeng Li return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 1951849e4607SPeng Li } 1952849e4607SPeng Li 1953849e4607SPeng Li /** 1954849e4607SPeng Li * hclgevf_get_channels - Get the current channels enabled and max supported. 1955849e4607SPeng Li * @handle: hardware information for network interface 1956849e4607SPeng Li * @ch: ethtool channels structure 1957849e4607SPeng Li * 1958849e4607SPeng Li * We don't support separate tx and rx queues as channels. The other count 1959849e4607SPeng Li * represents how many queues are being used for control. max_combined counts 1960849e4607SPeng Li * how many queue pairs we can support. They may not be mapped 1 to 1 with 1961849e4607SPeng Li * q_vectors since we support a lot more queue pairs than q_vectors. 1962849e4607SPeng Li **/ 1963849e4607SPeng Li static void hclgevf_get_channels(struct hnae3_handle *handle, 1964849e4607SPeng Li struct ethtool_channels *ch) 1965849e4607SPeng Li { 1966849e4607SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1967849e4607SPeng Li 1968849e4607SPeng Li ch->max_combined = hclgevf_get_max_channels(hdev); 1969849e4607SPeng Li ch->other_count = 0; 1970849e4607SPeng Li ch->max_other = 0; 1971849e4607SPeng Li ch->combined_count = hdev->num_tqps; 1972849e4607SPeng Li } 1973849e4607SPeng Li 1974cc719218SPeng Li static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 1975cc719218SPeng Li u16 *free_tqps, u16 *max_rss_size) 1976cc719218SPeng Li { 1977cc719218SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1978cc719218SPeng Li 1979cc719218SPeng Li *free_tqps = 0; 1980cc719218SPeng Li *max_rss_size = hdev->rss_size_max; 1981cc719218SPeng Li } 1982cc719218SPeng Li 1983175ec96bSFuyun Liang static int hclgevf_get_status(struct hnae3_handle *handle) 1984175ec96bSFuyun Liang { 1985175ec96bSFuyun Liang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1986175ec96bSFuyun Liang 1987175ec96bSFuyun Liang return hdev->hw.mac.link; 1988175ec96bSFuyun Liang } 1989175ec96bSFuyun Liang 19904a152de9SFuyun Liang static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 19914a152de9SFuyun Liang u8 *auto_neg, u32 *speed, 19924a152de9SFuyun Liang u8 *duplex) 19934a152de9SFuyun Liang { 19944a152de9SFuyun Liang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 19954a152de9SFuyun Liang 19964a152de9SFuyun Liang if (speed) 19974a152de9SFuyun Liang *speed = hdev->hw.mac.speed; 19984a152de9SFuyun Liang if (duplex) 19994a152de9SFuyun Liang *duplex = hdev->hw.mac.duplex; 20004a152de9SFuyun Liang if (auto_neg) 20014a152de9SFuyun Liang *auto_neg = AUTONEG_DISABLE; 20024a152de9SFuyun Liang } 20034a152de9SFuyun Liang 20044a152de9SFuyun Liang void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 20054a152de9SFuyun Liang u8 duplex) 20064a152de9SFuyun Liang { 20074a152de9SFuyun Liang hdev->hw.mac.speed = speed; 20084a152de9SFuyun Liang hdev->hw.mac.duplex = duplex; 20094a152de9SFuyun Liang } 20104a152de9SFuyun Liang 2011e2cb1decSSalil Mehta static const struct hnae3_ae_ops hclgevf_ops = { 2012e2cb1decSSalil Mehta .init_ae_dev = hclgevf_init_ae_dev, 2013e2cb1decSSalil Mehta .uninit_ae_dev = hclgevf_uninit_ae_dev, 2014e718a93fSPeng Li .init_client_instance = hclgevf_init_client_instance, 2015e718a93fSPeng Li .uninit_client_instance = hclgevf_uninit_client_instance, 2016e2cb1decSSalil Mehta .start = hclgevf_ae_start, 2017e2cb1decSSalil Mehta .stop = hclgevf_ae_stop, 2018e2cb1decSSalil Mehta .map_ring_to_vector = hclgevf_map_ring_to_vector, 2019e2cb1decSSalil Mehta .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2020e2cb1decSSalil Mehta .get_vector = hclgevf_get_vector, 20210d3e6631SYunsheng Lin .put_vector = hclgevf_put_vector, 2022e2cb1decSSalil Mehta .reset_queue = hclgevf_reset_tqp, 2023e2cb1decSSalil Mehta .set_promisc_mode = hclgevf_set_promisc_mode, 2024e2cb1decSSalil Mehta .get_mac_addr = hclgevf_get_mac_addr, 2025e2cb1decSSalil Mehta .set_mac_addr = hclgevf_set_mac_addr, 2026e2cb1decSSalil Mehta .add_uc_addr = hclgevf_add_uc_addr, 2027e2cb1decSSalil Mehta .rm_uc_addr = hclgevf_rm_uc_addr, 2028e2cb1decSSalil Mehta .add_mc_addr = hclgevf_add_mc_addr, 2029e2cb1decSSalil Mehta .rm_mc_addr = hclgevf_rm_mc_addr, 20303a678b58SXi Wang .update_mta_status = hclgevf_update_mta_status, 2031e2cb1decSSalil Mehta .get_stats = hclgevf_get_stats, 2032e2cb1decSSalil Mehta .update_stats = hclgevf_update_stats, 2033e2cb1decSSalil Mehta .get_strings = hclgevf_get_strings, 2034e2cb1decSSalil Mehta .get_sset_count = hclgevf_get_sset_count, 2035e2cb1decSSalil Mehta .get_rss_key_size = hclgevf_get_rss_key_size, 2036e2cb1decSSalil Mehta .get_rss_indir_size = hclgevf_get_rss_indir_size, 2037e2cb1decSSalil Mehta .get_rss = hclgevf_get_rss, 2038e2cb1decSSalil Mehta .set_rss = hclgevf_set_rss, 2039e2cb1decSSalil Mehta .get_tc_size = hclgevf_get_tc_size, 2040e2cb1decSSalil Mehta .get_fw_version = hclgevf_get_fw_version, 2041e2cb1decSSalil Mehta .set_vlan_filter = hclgevf_set_vlan_filter, 2042b2641e2aSYunsheng Lin .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 20436d4c3981SSalil Mehta .reset_event = hclgevf_reset_event, 2044849e4607SPeng Li .get_channels = hclgevf_get_channels, 2045cc719218SPeng Li .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2046175ec96bSFuyun Liang .get_status = hclgevf_get_status, 20474a152de9SFuyun Liang .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2048e2cb1decSSalil Mehta }; 2049e2cb1decSSalil Mehta 2050e2cb1decSSalil Mehta static struct hnae3_ae_algo ae_algovf = { 2051e2cb1decSSalil Mehta .ops = &hclgevf_ops, 2052e2cb1decSSalil Mehta .pdev_id_table = ae_algovf_pci_tbl, 2053e2cb1decSSalil Mehta }; 2054e2cb1decSSalil Mehta 2055e2cb1decSSalil Mehta static int hclgevf_init(void) 2056e2cb1decSSalil Mehta { 2057e2cb1decSSalil Mehta pr_info("%s is initializing\n", HCLGEVF_NAME); 2058e2cb1decSSalil Mehta 2059854cf33aSFuyun Liang hnae3_register_ae_algo(&ae_algovf); 2060854cf33aSFuyun Liang 2061854cf33aSFuyun Liang return 0; 2062e2cb1decSSalil Mehta } 2063e2cb1decSSalil Mehta 2064e2cb1decSSalil Mehta static void hclgevf_exit(void) 2065e2cb1decSSalil Mehta { 2066e2cb1decSSalil Mehta hnae3_unregister_ae_algo(&ae_algovf); 2067e2cb1decSSalil Mehta } 2068e2cb1decSSalil Mehta module_init(hclgevf_init); 2069e2cb1decSSalil Mehta module_exit(hclgevf_exit); 2070e2cb1decSSalil Mehta 2071e2cb1decSSalil Mehta MODULE_LICENSE("GPL"); 2072e2cb1decSSalil Mehta MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2073e2cb1decSSalil Mehta MODULE_DESCRIPTION("HCLGEVF Driver"); 2074e2cb1decSSalil Mehta MODULE_VERSION(HCLGEVF_MOD_VERSION); 2075