1e2cb1decSSalil Mehta // SPDX-License-Identifier: GPL-2.0+ 2e2cb1decSSalil Mehta // Copyright (c) 2016-2017 Hisilicon Limited. 3e2cb1decSSalil Mehta 4e2cb1decSSalil Mehta #include <linux/etherdevice.h> 56988eb2aSSalil Mehta #include <net/rtnetlink.h> 6e2cb1decSSalil Mehta #include "hclgevf_cmd.h" 7e2cb1decSSalil Mehta #include "hclgevf_main.h" 8e2cb1decSSalil Mehta #include "hclge_mbx.h" 9e2cb1decSSalil Mehta #include "hnae3.h" 10e2cb1decSSalil Mehta 11e2cb1decSSalil Mehta #define HCLGEVF_NAME "hclgevf" 12e2cb1decSSalil Mehta 137a01c897SSalil Mehta static int hclgevf_init_hdev(struct hclgevf_dev *hdev); 147a01c897SSalil Mehta static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev); 15e2cb1decSSalil Mehta static struct hnae3_ae_algo ae_algovf; 16e2cb1decSSalil Mehta 17e2cb1decSSalil Mehta static const struct pci_device_id ae_algovf_pci_tbl[] = { 18e2cb1decSSalil Mehta {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19e2cb1decSSalil Mehta {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20e2cb1decSSalil Mehta /* required last entry */ 21e2cb1decSSalil Mehta {0, } 22e2cb1decSSalil Mehta }; 23e2cb1decSSalil Mehta 242f550a46SYunsheng Lin MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 252f550a46SYunsheng Lin 26e2cb1decSSalil Mehta static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 27e2cb1decSSalil Mehta struct hnae3_handle *handle) 28e2cb1decSSalil Mehta { 29e2cb1decSSalil Mehta return container_of(handle, struct hclgevf_dev, nic); 30e2cb1decSSalil Mehta } 31e2cb1decSSalil Mehta 32e2cb1decSSalil Mehta static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 33e2cb1decSSalil Mehta { 34b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 35e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 36e2cb1decSSalil Mehta struct hclgevf_desc desc; 37e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 38e2cb1decSSalil Mehta int status; 39e2cb1decSSalil Mehta int i; 40e2cb1decSSalil Mehta 41b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 42b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 43e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, 44e2cb1decSSalil Mehta HCLGEVF_OPC_QUERY_RX_STATUS, 45e2cb1decSSalil Mehta true); 46e2cb1decSSalil Mehta 47e2cb1decSSalil Mehta desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 48e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 49e2cb1decSSalil Mehta if (status) { 50e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 51e2cb1decSSalil Mehta "Query tqp stat fail, status = %d,queue = %d\n", 52e2cb1decSSalil Mehta status, i); 53e2cb1decSSalil Mehta return status; 54e2cb1decSSalil Mehta } 55e2cb1decSSalil Mehta tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 56cf72fa63SJian Shen le32_to_cpu(desc.data[1]); 57e2cb1decSSalil Mehta 58e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 59e2cb1decSSalil Mehta true); 60e2cb1decSSalil Mehta 61e2cb1decSSalil Mehta desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 62e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 63e2cb1decSSalil Mehta if (status) { 64e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 65e2cb1decSSalil Mehta "Query tqp stat fail, status = %d,queue = %d\n", 66e2cb1decSSalil Mehta status, i); 67e2cb1decSSalil Mehta return status; 68e2cb1decSSalil Mehta } 69e2cb1decSSalil Mehta tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 70cf72fa63SJian Shen le32_to_cpu(desc.data[1]); 71e2cb1decSSalil Mehta } 72e2cb1decSSalil Mehta 73e2cb1decSSalil Mehta return 0; 74e2cb1decSSalil Mehta } 75e2cb1decSSalil Mehta 76e2cb1decSSalil Mehta static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 77e2cb1decSSalil Mehta { 78e2cb1decSSalil Mehta struct hnae3_knic_private_info *kinfo = &handle->kinfo; 79e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 80e2cb1decSSalil Mehta u64 *buff = data; 81e2cb1decSSalil Mehta int i; 82e2cb1decSSalil Mehta 83b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 84b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 85e2cb1decSSalil Mehta *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 86e2cb1decSSalil Mehta } 87e2cb1decSSalil Mehta for (i = 0; i < kinfo->num_tqps; i++) { 88b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 89e2cb1decSSalil Mehta *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 90e2cb1decSSalil Mehta } 91e2cb1decSSalil Mehta 92e2cb1decSSalil Mehta return buff; 93e2cb1decSSalil Mehta } 94e2cb1decSSalil Mehta 95e2cb1decSSalil Mehta static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 96e2cb1decSSalil Mehta { 97b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 98e2cb1decSSalil Mehta 99b4f1d303SJian Shen return kinfo->num_tqps * 2; 100e2cb1decSSalil Mehta } 101e2cb1decSSalil Mehta 102e2cb1decSSalil Mehta static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 103e2cb1decSSalil Mehta { 104b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 105e2cb1decSSalil Mehta u8 *buff = data; 106e2cb1decSSalil Mehta int i = 0; 107e2cb1decSSalil Mehta 108b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 109b4f1d303SJian Shen struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 110e2cb1decSSalil Mehta struct hclgevf_tqp, q); 1110c218123SJian Shen snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 112e2cb1decSSalil Mehta tqp->index); 113e2cb1decSSalil Mehta buff += ETH_GSTRING_LEN; 114e2cb1decSSalil Mehta } 115e2cb1decSSalil Mehta 116b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 117b4f1d303SJian Shen struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 118e2cb1decSSalil Mehta struct hclgevf_tqp, q); 1190c218123SJian Shen snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 120e2cb1decSSalil Mehta tqp->index); 121e2cb1decSSalil Mehta buff += ETH_GSTRING_LEN; 122e2cb1decSSalil Mehta } 123e2cb1decSSalil Mehta 124e2cb1decSSalil Mehta return buff; 125e2cb1decSSalil Mehta } 126e2cb1decSSalil Mehta 127e2cb1decSSalil Mehta static void hclgevf_update_stats(struct hnae3_handle *handle, 128e2cb1decSSalil Mehta struct net_device_stats *net_stats) 129e2cb1decSSalil Mehta { 130e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 131e2cb1decSSalil Mehta int status; 132e2cb1decSSalil Mehta 133e2cb1decSSalil Mehta status = hclgevf_tqps_update_stats(handle); 134e2cb1decSSalil Mehta if (status) 135e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 136e2cb1decSSalil Mehta "VF update of TQPS stats fail, status = %d.\n", 137e2cb1decSSalil Mehta status); 138e2cb1decSSalil Mehta } 139e2cb1decSSalil Mehta 140e2cb1decSSalil Mehta static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 141e2cb1decSSalil Mehta { 142e2cb1decSSalil Mehta if (strset == ETH_SS_TEST) 143e2cb1decSSalil Mehta return -EOPNOTSUPP; 144e2cb1decSSalil Mehta else if (strset == ETH_SS_STATS) 145e2cb1decSSalil Mehta return hclgevf_tqps_get_sset_count(handle, strset); 146e2cb1decSSalil Mehta 147e2cb1decSSalil Mehta return 0; 148e2cb1decSSalil Mehta } 149e2cb1decSSalil Mehta 150e2cb1decSSalil Mehta static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 151e2cb1decSSalil Mehta u8 *data) 152e2cb1decSSalil Mehta { 153e2cb1decSSalil Mehta u8 *p = (char *)data; 154e2cb1decSSalil Mehta 155e2cb1decSSalil Mehta if (strset == ETH_SS_STATS) 156e2cb1decSSalil Mehta p = hclgevf_tqps_get_strings(handle, p); 157e2cb1decSSalil Mehta } 158e2cb1decSSalil Mehta 159e2cb1decSSalil Mehta static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 160e2cb1decSSalil Mehta { 161e2cb1decSSalil Mehta hclgevf_tqps_get_stats(handle, data); 162e2cb1decSSalil Mehta } 163e2cb1decSSalil Mehta 164e2cb1decSSalil Mehta static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 165e2cb1decSSalil Mehta { 166e2cb1decSSalil Mehta u8 resp_msg; 167e2cb1decSSalil Mehta int status; 168e2cb1decSSalil Mehta 169e2cb1decSSalil Mehta status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 170e2cb1decSSalil Mehta true, &resp_msg, sizeof(u8)); 171e2cb1decSSalil Mehta if (status) { 172e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 173e2cb1decSSalil Mehta "VF request to get TC info from PF failed %d", 174e2cb1decSSalil Mehta status); 175e2cb1decSSalil Mehta return status; 176e2cb1decSSalil Mehta } 177e2cb1decSSalil Mehta 178e2cb1decSSalil Mehta hdev->hw_tc_map = resp_msg; 179e2cb1decSSalil Mehta 180e2cb1decSSalil Mehta return 0; 181e2cb1decSSalil Mehta } 182e2cb1decSSalil Mehta 1836cee6fc3SJian Shen static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 184e2cb1decSSalil Mehta { 185e2cb1decSSalil Mehta #define HCLGEVF_TQPS_RSS_INFO_LEN 8 186e2cb1decSSalil Mehta u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 187e2cb1decSSalil Mehta int status; 188e2cb1decSSalil Mehta 189e2cb1decSSalil Mehta status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 190e2cb1decSSalil Mehta true, resp_msg, 191e2cb1decSSalil Mehta HCLGEVF_TQPS_RSS_INFO_LEN); 192e2cb1decSSalil Mehta if (status) { 193e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 194e2cb1decSSalil Mehta "VF request to get tqp info from PF failed %d", 195e2cb1decSSalil Mehta status); 196e2cb1decSSalil Mehta return status; 197e2cb1decSSalil Mehta } 198e2cb1decSSalil Mehta 199e2cb1decSSalil Mehta memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 200e2cb1decSSalil Mehta memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 201e2cb1decSSalil Mehta memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 202e2cb1decSSalil Mehta memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 203e2cb1decSSalil Mehta 204e2cb1decSSalil Mehta return 0; 205e2cb1decSSalil Mehta } 206e2cb1decSSalil Mehta 207e2cb1decSSalil Mehta static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 208e2cb1decSSalil Mehta { 209e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 210e2cb1decSSalil Mehta int i; 211e2cb1decSSalil Mehta 2127a01c897SSalil Mehta /* if this is on going reset then we need to re-allocate the TPQs 2137a01c897SSalil Mehta * since we cannot assume we would get same number of TPQs back from PF 2147a01c897SSalil Mehta */ 2157a01c897SSalil Mehta if (hclgevf_dev_ongoing_reset(hdev)) 2167a01c897SSalil Mehta devm_kfree(&hdev->pdev->dev, hdev->htqp); 2177a01c897SSalil Mehta 218e2cb1decSSalil Mehta hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 219e2cb1decSSalil Mehta sizeof(struct hclgevf_tqp), GFP_KERNEL); 220e2cb1decSSalil Mehta if (!hdev->htqp) 221e2cb1decSSalil Mehta return -ENOMEM; 222e2cb1decSSalil Mehta 223e2cb1decSSalil Mehta tqp = hdev->htqp; 224e2cb1decSSalil Mehta 225e2cb1decSSalil Mehta for (i = 0; i < hdev->num_tqps; i++) { 226e2cb1decSSalil Mehta tqp->dev = &hdev->pdev->dev; 227e2cb1decSSalil Mehta tqp->index = i; 228e2cb1decSSalil Mehta 229e2cb1decSSalil Mehta tqp->q.ae_algo = &ae_algovf; 230e2cb1decSSalil Mehta tqp->q.buf_size = hdev->rx_buf_len; 231e2cb1decSSalil Mehta tqp->q.desc_num = hdev->num_desc; 232e2cb1decSSalil Mehta tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 233e2cb1decSSalil Mehta i * HCLGEVF_TQP_REG_SIZE; 234e2cb1decSSalil Mehta 235e2cb1decSSalil Mehta tqp++; 236e2cb1decSSalil Mehta } 237e2cb1decSSalil Mehta 238e2cb1decSSalil Mehta return 0; 239e2cb1decSSalil Mehta } 240e2cb1decSSalil Mehta 241e2cb1decSSalil Mehta static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 242e2cb1decSSalil Mehta { 243e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 244e2cb1decSSalil Mehta struct hnae3_knic_private_info *kinfo; 245e2cb1decSSalil Mehta u16 new_tqps = hdev->num_tqps; 246e2cb1decSSalil Mehta int i; 247e2cb1decSSalil Mehta 248e2cb1decSSalil Mehta kinfo = &nic->kinfo; 249e2cb1decSSalil Mehta kinfo->num_tc = 0; 250e2cb1decSSalil Mehta kinfo->num_desc = hdev->num_desc; 251e2cb1decSSalil Mehta kinfo->rx_buf_len = hdev->rx_buf_len; 252e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 253e2cb1decSSalil Mehta if (hdev->hw_tc_map & BIT(i)) 254e2cb1decSSalil Mehta kinfo->num_tc++; 255e2cb1decSSalil Mehta 256e2cb1decSSalil Mehta kinfo->rss_size 257e2cb1decSSalil Mehta = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 258e2cb1decSSalil Mehta new_tqps = kinfo->rss_size * kinfo->num_tc; 259e2cb1decSSalil Mehta kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 260e2cb1decSSalil Mehta 2617a01c897SSalil Mehta /* if this is on going reset then we need to re-allocate the hnae queues 2627a01c897SSalil Mehta * as well since number of TPQs from PF might have changed. 2637a01c897SSalil Mehta */ 2647a01c897SSalil Mehta if (hclgevf_dev_ongoing_reset(hdev)) 2657a01c897SSalil Mehta devm_kfree(&hdev->pdev->dev, kinfo->tqp); 2667a01c897SSalil Mehta 267e2cb1decSSalil Mehta kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 268e2cb1decSSalil Mehta sizeof(struct hnae3_queue *), GFP_KERNEL); 269e2cb1decSSalil Mehta if (!kinfo->tqp) 270e2cb1decSSalil Mehta return -ENOMEM; 271e2cb1decSSalil Mehta 272e2cb1decSSalil Mehta for (i = 0; i < kinfo->num_tqps; i++) { 273e2cb1decSSalil Mehta hdev->htqp[i].q.handle = &hdev->nic; 274e2cb1decSSalil Mehta hdev->htqp[i].q.tqp_index = i; 275e2cb1decSSalil Mehta kinfo->tqp[i] = &hdev->htqp[i].q; 276e2cb1decSSalil Mehta } 277e2cb1decSSalil Mehta 278e2cb1decSSalil Mehta return 0; 279e2cb1decSSalil Mehta } 280e2cb1decSSalil Mehta 281e2cb1decSSalil Mehta static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 282e2cb1decSSalil Mehta { 283e2cb1decSSalil Mehta int status; 284e2cb1decSSalil Mehta u8 resp_msg; 285e2cb1decSSalil Mehta 286e2cb1decSSalil Mehta status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 287e2cb1decSSalil Mehta 0, false, &resp_msg, sizeof(u8)); 288e2cb1decSSalil Mehta if (status) 289e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 290e2cb1decSSalil Mehta "VF failed to fetch link status(%d) from PF", status); 291e2cb1decSSalil Mehta } 292e2cb1decSSalil Mehta 293e2cb1decSSalil Mehta void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 294e2cb1decSSalil Mehta { 295e2cb1decSSalil Mehta struct hnae3_handle *handle = &hdev->nic; 296e2cb1decSSalil Mehta struct hnae3_client *client; 297e2cb1decSSalil Mehta 298e2cb1decSSalil Mehta client = handle->client; 299e2cb1decSSalil Mehta 300582d37bbSPeng Li link_state = 301582d37bbSPeng Li test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 302582d37bbSPeng Li 303e2cb1decSSalil Mehta if (link_state != hdev->hw.mac.link) { 304e2cb1decSSalil Mehta client->ops->link_status_change(handle, !!link_state); 305e2cb1decSSalil Mehta hdev->hw.mac.link = link_state; 306e2cb1decSSalil Mehta } 307e2cb1decSSalil Mehta } 308e2cb1decSSalil Mehta 309e2cb1decSSalil Mehta static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 310e2cb1decSSalil Mehta { 311e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 312e2cb1decSSalil Mehta int ret; 313e2cb1decSSalil Mehta 314e2cb1decSSalil Mehta nic->ae_algo = &ae_algovf; 315e2cb1decSSalil Mehta nic->pdev = hdev->pdev; 316e2cb1decSSalil Mehta nic->numa_node_mask = hdev->numa_node_mask; 317424eb834SSalil Mehta nic->flags |= HNAE3_SUPPORT_VF; 318e2cb1decSSalil Mehta 319e2cb1decSSalil Mehta if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 320e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 321e2cb1decSSalil Mehta hdev->ae_dev->dev_type); 322e2cb1decSSalil Mehta return -EINVAL; 323e2cb1decSSalil Mehta } 324e2cb1decSSalil Mehta 325e2cb1decSSalil Mehta ret = hclgevf_knic_setup(hdev); 326e2cb1decSSalil Mehta if (ret) 327e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 328e2cb1decSSalil Mehta ret); 329e2cb1decSSalil Mehta return ret; 330e2cb1decSSalil Mehta } 331e2cb1decSSalil Mehta 332e2cb1decSSalil Mehta static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 333e2cb1decSSalil Mehta { 33436cbbdf6SPeng Li if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 33536cbbdf6SPeng Li dev_warn(&hdev->pdev->dev, 33636cbbdf6SPeng Li "vector(vector_id %d) has been freed.\n", vector_id); 33736cbbdf6SPeng Li return; 33836cbbdf6SPeng Li } 33936cbbdf6SPeng Li 340e2cb1decSSalil Mehta hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 341e2cb1decSSalil Mehta hdev->num_msi_left += 1; 342e2cb1decSSalil Mehta hdev->num_msi_used -= 1; 343e2cb1decSSalil Mehta } 344e2cb1decSSalil Mehta 345e2cb1decSSalil Mehta static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 346e2cb1decSSalil Mehta struct hnae3_vector_info *vector_info) 347e2cb1decSSalil Mehta { 348e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 349e2cb1decSSalil Mehta struct hnae3_vector_info *vector = vector_info; 350e2cb1decSSalil Mehta int alloc = 0; 351e2cb1decSSalil Mehta int i, j; 352e2cb1decSSalil Mehta 353e2cb1decSSalil Mehta vector_num = min(hdev->num_msi_left, vector_num); 354e2cb1decSSalil Mehta 355e2cb1decSSalil Mehta for (j = 0; j < vector_num; j++) { 356e2cb1decSSalil Mehta for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 357e2cb1decSSalil Mehta if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 358e2cb1decSSalil Mehta vector->vector = pci_irq_vector(hdev->pdev, i); 359e2cb1decSSalil Mehta vector->io_addr = hdev->hw.io_base + 360e2cb1decSSalil Mehta HCLGEVF_VECTOR_REG_BASE + 361e2cb1decSSalil Mehta (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 362e2cb1decSSalil Mehta hdev->vector_status[i] = 0; 363e2cb1decSSalil Mehta hdev->vector_irq[i] = vector->vector; 364e2cb1decSSalil Mehta 365e2cb1decSSalil Mehta vector++; 366e2cb1decSSalil Mehta alloc++; 367e2cb1decSSalil Mehta 368e2cb1decSSalil Mehta break; 369e2cb1decSSalil Mehta } 370e2cb1decSSalil Mehta } 371e2cb1decSSalil Mehta } 372e2cb1decSSalil Mehta hdev->num_msi_left -= alloc; 373e2cb1decSSalil Mehta hdev->num_msi_used += alloc; 374e2cb1decSSalil Mehta 375e2cb1decSSalil Mehta return alloc; 376e2cb1decSSalil Mehta } 377e2cb1decSSalil Mehta 378e2cb1decSSalil Mehta static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 379e2cb1decSSalil Mehta { 380e2cb1decSSalil Mehta int i; 381e2cb1decSSalil Mehta 382e2cb1decSSalil Mehta for (i = 0; i < hdev->num_msi; i++) 383e2cb1decSSalil Mehta if (vector == hdev->vector_irq[i]) 384e2cb1decSSalil Mehta return i; 385e2cb1decSSalil Mehta 386e2cb1decSSalil Mehta return -EINVAL; 387e2cb1decSSalil Mehta } 388e2cb1decSSalil Mehta 389374ad291SJian Shen static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 390374ad291SJian Shen const u8 hfunc, const u8 *key) 391374ad291SJian Shen { 392374ad291SJian Shen struct hclgevf_rss_config_cmd *req; 393374ad291SJian Shen struct hclgevf_desc desc; 394374ad291SJian Shen int key_offset; 395374ad291SJian Shen int key_size; 396374ad291SJian Shen int ret; 397374ad291SJian Shen 398374ad291SJian Shen req = (struct hclgevf_rss_config_cmd *)desc.data; 399374ad291SJian Shen 400374ad291SJian Shen for (key_offset = 0; key_offset < 3; key_offset++) { 401374ad291SJian Shen hclgevf_cmd_setup_basic_desc(&desc, 402374ad291SJian Shen HCLGEVF_OPC_RSS_GENERIC_CONFIG, 403374ad291SJian Shen false); 404374ad291SJian Shen 405374ad291SJian Shen req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 406374ad291SJian Shen req->hash_config |= 407374ad291SJian Shen (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 408374ad291SJian Shen 409374ad291SJian Shen if (key_offset == 2) 410374ad291SJian Shen key_size = 411374ad291SJian Shen HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 412374ad291SJian Shen else 413374ad291SJian Shen key_size = HCLGEVF_RSS_HASH_KEY_NUM; 414374ad291SJian Shen 415374ad291SJian Shen memcpy(req->hash_key, 416374ad291SJian Shen key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 417374ad291SJian Shen 418374ad291SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 419374ad291SJian Shen if (ret) { 420374ad291SJian Shen dev_err(&hdev->pdev->dev, 421374ad291SJian Shen "Configure RSS config fail, status = %d\n", 422374ad291SJian Shen ret); 423374ad291SJian Shen return ret; 424374ad291SJian Shen } 425374ad291SJian Shen } 426374ad291SJian Shen 427374ad291SJian Shen return 0; 428374ad291SJian Shen } 429374ad291SJian Shen 430e2cb1decSSalil Mehta static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 431e2cb1decSSalil Mehta { 432e2cb1decSSalil Mehta return HCLGEVF_RSS_KEY_SIZE; 433e2cb1decSSalil Mehta } 434e2cb1decSSalil Mehta 435e2cb1decSSalil Mehta static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 436e2cb1decSSalil Mehta { 437e2cb1decSSalil Mehta return HCLGEVF_RSS_IND_TBL_SIZE; 438e2cb1decSSalil Mehta } 439e2cb1decSSalil Mehta 440e2cb1decSSalil Mehta static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 441e2cb1decSSalil Mehta { 442e2cb1decSSalil Mehta const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 443e2cb1decSSalil Mehta struct hclgevf_rss_indirection_table_cmd *req; 444e2cb1decSSalil Mehta struct hclgevf_desc desc; 445e2cb1decSSalil Mehta int status; 446e2cb1decSSalil Mehta int i, j; 447e2cb1decSSalil Mehta 448e2cb1decSSalil Mehta req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 449e2cb1decSSalil Mehta 450e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 451e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 452e2cb1decSSalil Mehta false); 453e2cb1decSSalil Mehta req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 454e2cb1decSSalil Mehta req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 455e2cb1decSSalil Mehta for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 456e2cb1decSSalil Mehta req->rss_result[j] = 457e2cb1decSSalil Mehta indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 458e2cb1decSSalil Mehta 459e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 460e2cb1decSSalil Mehta if (status) { 461e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 462e2cb1decSSalil Mehta "VF failed(=%d) to set RSS indirection table\n", 463e2cb1decSSalil Mehta status); 464e2cb1decSSalil Mehta return status; 465e2cb1decSSalil Mehta } 466e2cb1decSSalil Mehta } 467e2cb1decSSalil Mehta 468e2cb1decSSalil Mehta return 0; 469e2cb1decSSalil Mehta } 470e2cb1decSSalil Mehta 471e2cb1decSSalil Mehta static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 472e2cb1decSSalil Mehta { 473e2cb1decSSalil Mehta struct hclgevf_rss_tc_mode_cmd *req; 474e2cb1decSSalil Mehta u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 475e2cb1decSSalil Mehta u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 476e2cb1decSSalil Mehta u16 tc_size[HCLGEVF_MAX_TC_NUM]; 477e2cb1decSSalil Mehta struct hclgevf_desc desc; 478e2cb1decSSalil Mehta u16 roundup_size; 479e2cb1decSSalil Mehta int status; 480e2cb1decSSalil Mehta int i; 481e2cb1decSSalil Mehta 482e2cb1decSSalil Mehta req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 483e2cb1decSSalil Mehta 484e2cb1decSSalil Mehta roundup_size = roundup_pow_of_two(rss_size); 485e2cb1decSSalil Mehta roundup_size = ilog2(roundup_size); 486e2cb1decSSalil Mehta 487e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 488e2cb1decSSalil Mehta tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 489e2cb1decSSalil Mehta tc_size[i] = roundup_size; 490e2cb1decSSalil Mehta tc_offset[i] = rss_size * i; 491e2cb1decSSalil Mehta } 492e2cb1decSSalil Mehta 493e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 494e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 495e4e87715SPeng Li hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 496e2cb1decSSalil Mehta (tc_valid[i] & 0x1)); 497e4e87715SPeng Li hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 498e2cb1decSSalil Mehta HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 499e4e87715SPeng Li hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 500e2cb1decSSalil Mehta HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 501e2cb1decSSalil Mehta } 502e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 503e2cb1decSSalil Mehta if (status) 504e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 505e2cb1decSSalil Mehta "VF failed(=%d) to set rss tc mode\n", status); 506e2cb1decSSalil Mehta 507e2cb1decSSalil Mehta return status; 508e2cb1decSSalil Mehta } 509e2cb1decSSalil Mehta 510e2cb1decSSalil Mehta static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 511e2cb1decSSalil Mehta u8 *hfunc) 512e2cb1decSSalil Mehta { 513e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 514e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 515e2cb1decSSalil Mehta int i; 516e2cb1decSSalil Mehta 517374ad291SJian Shen if (handle->pdev->revision >= 0x21) { 518374ad291SJian Shen /* Get hash algorithm */ 519374ad291SJian Shen if (hfunc) { 520374ad291SJian Shen switch (rss_cfg->hash_algo) { 521374ad291SJian Shen case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 522374ad291SJian Shen *hfunc = ETH_RSS_HASH_TOP; 523374ad291SJian Shen break; 524374ad291SJian Shen case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 525374ad291SJian Shen *hfunc = ETH_RSS_HASH_XOR; 526374ad291SJian Shen break; 527374ad291SJian Shen default: 528374ad291SJian Shen *hfunc = ETH_RSS_HASH_UNKNOWN; 529374ad291SJian Shen break; 530374ad291SJian Shen } 531374ad291SJian Shen } 532374ad291SJian Shen 533374ad291SJian Shen /* Get the RSS Key required by the user */ 534374ad291SJian Shen if (key) 535374ad291SJian Shen memcpy(key, rss_cfg->rss_hash_key, 536374ad291SJian Shen HCLGEVF_RSS_KEY_SIZE); 537374ad291SJian Shen } 538374ad291SJian Shen 539e2cb1decSSalil Mehta if (indir) 540e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 541e2cb1decSSalil Mehta indir[i] = rss_cfg->rss_indirection_tbl[i]; 542e2cb1decSSalil Mehta 543374ad291SJian Shen return 0; 544e2cb1decSSalil Mehta } 545e2cb1decSSalil Mehta 546e2cb1decSSalil Mehta static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 547e2cb1decSSalil Mehta const u8 *key, const u8 hfunc) 548e2cb1decSSalil Mehta { 549e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 550e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 551374ad291SJian Shen int ret, i; 552374ad291SJian Shen 553374ad291SJian Shen if (handle->pdev->revision >= 0x21) { 554374ad291SJian Shen /* Set the RSS Hash Key if specififed by the user */ 555374ad291SJian Shen if (key) { 556374ad291SJian Shen switch (hfunc) { 557374ad291SJian Shen case ETH_RSS_HASH_TOP: 558374ad291SJian Shen rss_cfg->hash_algo = 559374ad291SJian Shen HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 560374ad291SJian Shen break; 561374ad291SJian Shen case ETH_RSS_HASH_XOR: 562374ad291SJian Shen rss_cfg->hash_algo = 563374ad291SJian Shen HCLGEVF_RSS_HASH_ALGO_SIMPLE; 564374ad291SJian Shen break; 565374ad291SJian Shen case ETH_RSS_HASH_NO_CHANGE: 566374ad291SJian Shen break; 567374ad291SJian Shen default: 568374ad291SJian Shen return -EINVAL; 569374ad291SJian Shen } 570374ad291SJian Shen 571374ad291SJian Shen ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 572374ad291SJian Shen key); 573374ad291SJian Shen if (ret) 574374ad291SJian Shen return ret; 575374ad291SJian Shen 576374ad291SJian Shen /* Update the shadow RSS key with user specified qids */ 577374ad291SJian Shen memcpy(rss_cfg->rss_hash_key, key, 578374ad291SJian Shen HCLGEVF_RSS_KEY_SIZE); 579374ad291SJian Shen } 580374ad291SJian Shen } 581e2cb1decSSalil Mehta 582e2cb1decSSalil Mehta /* update the shadow RSS table with user specified qids */ 583e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 584e2cb1decSSalil Mehta rss_cfg->rss_indirection_tbl[i] = indir[i]; 585e2cb1decSSalil Mehta 586e2cb1decSSalil Mehta /* update the hardware */ 587e2cb1decSSalil Mehta return hclgevf_set_rss_indir_table(hdev); 588e2cb1decSSalil Mehta } 589e2cb1decSSalil Mehta 590d97b3072SJian Shen static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 591d97b3072SJian Shen { 592d97b3072SJian Shen u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 593d97b3072SJian Shen 594d97b3072SJian Shen if (nfc->data & RXH_L4_B_2_3) 595d97b3072SJian Shen hash_sets |= HCLGEVF_D_PORT_BIT; 596d97b3072SJian Shen else 597d97b3072SJian Shen hash_sets &= ~HCLGEVF_D_PORT_BIT; 598d97b3072SJian Shen 599d97b3072SJian Shen if (nfc->data & RXH_IP_SRC) 600d97b3072SJian Shen hash_sets |= HCLGEVF_S_IP_BIT; 601d97b3072SJian Shen else 602d97b3072SJian Shen hash_sets &= ~HCLGEVF_S_IP_BIT; 603d97b3072SJian Shen 604d97b3072SJian Shen if (nfc->data & RXH_IP_DST) 605d97b3072SJian Shen hash_sets |= HCLGEVF_D_IP_BIT; 606d97b3072SJian Shen else 607d97b3072SJian Shen hash_sets &= ~HCLGEVF_D_IP_BIT; 608d97b3072SJian Shen 609d97b3072SJian Shen if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 610d97b3072SJian Shen hash_sets |= HCLGEVF_V_TAG_BIT; 611d97b3072SJian Shen 612d97b3072SJian Shen return hash_sets; 613d97b3072SJian Shen } 614d97b3072SJian Shen 615d97b3072SJian Shen static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 616d97b3072SJian Shen struct ethtool_rxnfc *nfc) 617d97b3072SJian Shen { 618d97b3072SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 619d97b3072SJian Shen struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 620d97b3072SJian Shen struct hclgevf_rss_input_tuple_cmd *req; 621d97b3072SJian Shen struct hclgevf_desc desc; 622d97b3072SJian Shen u8 tuple_sets; 623d97b3072SJian Shen int ret; 624d97b3072SJian Shen 625d97b3072SJian Shen if (handle->pdev->revision == 0x20) 626d97b3072SJian Shen return -EOPNOTSUPP; 627d97b3072SJian Shen 628d97b3072SJian Shen if (nfc->data & 629d97b3072SJian Shen ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 630d97b3072SJian Shen return -EINVAL; 631d97b3072SJian Shen 632d97b3072SJian Shen req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 633d97b3072SJian Shen hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 634d97b3072SJian Shen 635d97b3072SJian Shen req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 636d97b3072SJian Shen req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 637d97b3072SJian Shen req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 638d97b3072SJian Shen req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 639d97b3072SJian Shen req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 640d97b3072SJian Shen req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 641d97b3072SJian Shen req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 642d97b3072SJian Shen req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 643d97b3072SJian Shen 644d97b3072SJian Shen tuple_sets = hclgevf_get_rss_hash_bits(nfc); 645d97b3072SJian Shen switch (nfc->flow_type) { 646d97b3072SJian Shen case TCP_V4_FLOW: 647d97b3072SJian Shen req->ipv4_tcp_en = tuple_sets; 648d97b3072SJian Shen break; 649d97b3072SJian Shen case TCP_V6_FLOW: 650d97b3072SJian Shen req->ipv6_tcp_en = tuple_sets; 651d97b3072SJian Shen break; 652d97b3072SJian Shen case UDP_V4_FLOW: 653d97b3072SJian Shen req->ipv4_udp_en = tuple_sets; 654d97b3072SJian Shen break; 655d97b3072SJian Shen case UDP_V6_FLOW: 656d97b3072SJian Shen req->ipv6_udp_en = tuple_sets; 657d97b3072SJian Shen break; 658d97b3072SJian Shen case SCTP_V4_FLOW: 659d97b3072SJian Shen req->ipv4_sctp_en = tuple_sets; 660d97b3072SJian Shen break; 661d97b3072SJian Shen case SCTP_V6_FLOW: 662d97b3072SJian Shen if ((nfc->data & RXH_L4_B_0_1) || 663d97b3072SJian Shen (nfc->data & RXH_L4_B_2_3)) 664d97b3072SJian Shen return -EINVAL; 665d97b3072SJian Shen 666d97b3072SJian Shen req->ipv6_sctp_en = tuple_sets; 667d97b3072SJian Shen break; 668d97b3072SJian Shen case IPV4_FLOW: 669d97b3072SJian Shen req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 670d97b3072SJian Shen break; 671d97b3072SJian Shen case IPV6_FLOW: 672d97b3072SJian Shen req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 673d97b3072SJian Shen break; 674d97b3072SJian Shen default: 675d97b3072SJian Shen return -EINVAL; 676d97b3072SJian Shen } 677d97b3072SJian Shen 678d97b3072SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 679d97b3072SJian Shen if (ret) { 680d97b3072SJian Shen dev_err(&hdev->pdev->dev, 681d97b3072SJian Shen "Set rss tuple fail, status = %d\n", ret); 682d97b3072SJian Shen return ret; 683d97b3072SJian Shen } 684d97b3072SJian Shen 685d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 686d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 687d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 688d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 689d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 690d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 691d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 692d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 693d97b3072SJian Shen return 0; 694d97b3072SJian Shen } 695d97b3072SJian Shen 696d97b3072SJian Shen static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 697d97b3072SJian Shen struct ethtool_rxnfc *nfc) 698d97b3072SJian Shen { 699d97b3072SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 700d97b3072SJian Shen struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 701d97b3072SJian Shen u8 tuple_sets; 702d97b3072SJian Shen 703d97b3072SJian Shen if (handle->pdev->revision == 0x20) 704d97b3072SJian Shen return -EOPNOTSUPP; 705d97b3072SJian Shen 706d97b3072SJian Shen nfc->data = 0; 707d97b3072SJian Shen 708d97b3072SJian Shen switch (nfc->flow_type) { 709d97b3072SJian Shen case TCP_V4_FLOW: 710d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 711d97b3072SJian Shen break; 712d97b3072SJian Shen case UDP_V4_FLOW: 713d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 714d97b3072SJian Shen break; 715d97b3072SJian Shen case TCP_V6_FLOW: 716d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 717d97b3072SJian Shen break; 718d97b3072SJian Shen case UDP_V6_FLOW: 719d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 720d97b3072SJian Shen break; 721d97b3072SJian Shen case SCTP_V4_FLOW: 722d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 723d97b3072SJian Shen break; 724d97b3072SJian Shen case SCTP_V6_FLOW: 725d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 726d97b3072SJian Shen break; 727d97b3072SJian Shen case IPV4_FLOW: 728d97b3072SJian Shen case IPV6_FLOW: 729d97b3072SJian Shen tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 730d97b3072SJian Shen break; 731d97b3072SJian Shen default: 732d97b3072SJian Shen return -EINVAL; 733d97b3072SJian Shen } 734d97b3072SJian Shen 735d97b3072SJian Shen if (!tuple_sets) 736d97b3072SJian Shen return 0; 737d97b3072SJian Shen 738d97b3072SJian Shen if (tuple_sets & HCLGEVF_D_PORT_BIT) 739d97b3072SJian Shen nfc->data |= RXH_L4_B_2_3; 740d97b3072SJian Shen if (tuple_sets & HCLGEVF_S_PORT_BIT) 741d97b3072SJian Shen nfc->data |= RXH_L4_B_0_1; 742d97b3072SJian Shen if (tuple_sets & HCLGEVF_D_IP_BIT) 743d97b3072SJian Shen nfc->data |= RXH_IP_DST; 744d97b3072SJian Shen if (tuple_sets & HCLGEVF_S_IP_BIT) 745d97b3072SJian Shen nfc->data |= RXH_IP_SRC; 746d97b3072SJian Shen 747d97b3072SJian Shen return 0; 748d97b3072SJian Shen } 749d97b3072SJian Shen 750d97b3072SJian Shen static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 751d97b3072SJian Shen struct hclgevf_rss_cfg *rss_cfg) 752d97b3072SJian Shen { 753d97b3072SJian Shen struct hclgevf_rss_input_tuple_cmd *req; 754d97b3072SJian Shen struct hclgevf_desc desc; 755d97b3072SJian Shen int ret; 756d97b3072SJian Shen 757d97b3072SJian Shen hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 758d97b3072SJian Shen 759d97b3072SJian Shen req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 760d97b3072SJian Shen 761d97b3072SJian Shen req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 762d97b3072SJian Shen req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 763d97b3072SJian Shen req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 764d97b3072SJian Shen req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 765d97b3072SJian Shen req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 766d97b3072SJian Shen req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 767d97b3072SJian Shen req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 768d97b3072SJian Shen req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 769d97b3072SJian Shen 770d97b3072SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 771d97b3072SJian Shen if (ret) 772d97b3072SJian Shen dev_err(&hdev->pdev->dev, 773d97b3072SJian Shen "Configure rss input fail, status = %d\n", ret); 774d97b3072SJian Shen return ret; 775d97b3072SJian Shen } 776d97b3072SJian Shen 777e2cb1decSSalil Mehta static int hclgevf_get_tc_size(struct hnae3_handle *handle) 778e2cb1decSSalil Mehta { 779e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 780e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 781e2cb1decSSalil Mehta 782e2cb1decSSalil Mehta return rss_cfg->rss_size; 783e2cb1decSSalil Mehta } 784e2cb1decSSalil Mehta 785e2cb1decSSalil Mehta static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 786b204bc74SPeng Li int vector_id, 787e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 788e2cb1decSSalil Mehta { 789e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 790e2cb1decSSalil Mehta struct hnae3_ring_chain_node *node; 791e2cb1decSSalil Mehta struct hclge_mbx_vf_to_pf_cmd *req; 792e2cb1decSSalil Mehta struct hclgevf_desc desc; 793b204bc74SPeng Li int i = 0; 794e2cb1decSSalil Mehta int status; 795e2cb1decSSalil Mehta u8 type; 796e2cb1decSSalil Mehta 797e2cb1decSSalil Mehta req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 798e2cb1decSSalil Mehta 799e2cb1decSSalil Mehta for (node = ring_chain; node; node = node->next) { 8005d02a58dSYunsheng Lin int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 8015d02a58dSYunsheng Lin HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 8025d02a58dSYunsheng Lin 8035d02a58dSYunsheng Lin if (i == 0) { 8045d02a58dSYunsheng Lin hclgevf_cmd_setup_basic_desc(&desc, 8055d02a58dSYunsheng Lin HCLGEVF_OPC_MBX_VF_TO_PF, 8065d02a58dSYunsheng Lin false); 8075d02a58dSYunsheng Lin type = en ? 8085d02a58dSYunsheng Lin HCLGE_MBX_MAP_RING_TO_VECTOR : 8095d02a58dSYunsheng Lin HCLGE_MBX_UNMAP_RING_TO_VECTOR; 8105d02a58dSYunsheng Lin req->msg[0] = type; 8115d02a58dSYunsheng Lin req->msg[1] = vector_id; 8125d02a58dSYunsheng Lin } 8135d02a58dSYunsheng Lin 8145d02a58dSYunsheng Lin req->msg[idx_offset] = 815e4e87715SPeng Li hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 8165d02a58dSYunsheng Lin req->msg[idx_offset + 1] = node->tqp_index; 817e4e87715SPeng Li req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 81879eee410SFuyun Liang HNAE3_RING_GL_IDX_M, 81979eee410SFuyun Liang HNAE3_RING_GL_IDX_S); 82079eee410SFuyun Liang 8215d02a58dSYunsheng Lin i++; 8225d02a58dSYunsheng Lin if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 8235d02a58dSYunsheng Lin HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 8245d02a58dSYunsheng Lin HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 8255d02a58dSYunsheng Lin !node->next) { 826e2cb1decSSalil Mehta req->msg[2] = i; 827e2cb1decSSalil Mehta 828e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 829e2cb1decSSalil Mehta if (status) { 830e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 831e2cb1decSSalil Mehta "Map TQP fail, status is %d.\n", 832e2cb1decSSalil Mehta status); 833e2cb1decSSalil Mehta return status; 834e2cb1decSSalil Mehta } 835e2cb1decSSalil Mehta i = 0; 836e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, 837e2cb1decSSalil Mehta HCLGEVF_OPC_MBX_VF_TO_PF, 838e2cb1decSSalil Mehta false); 839e2cb1decSSalil Mehta req->msg[0] = type; 840e2cb1decSSalil Mehta req->msg[1] = vector_id; 841e2cb1decSSalil Mehta } 842e2cb1decSSalil Mehta } 843e2cb1decSSalil Mehta 844e2cb1decSSalil Mehta return 0; 845e2cb1decSSalil Mehta } 846e2cb1decSSalil Mehta 847e2cb1decSSalil Mehta static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 848e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 849e2cb1decSSalil Mehta { 850b204bc74SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 851b204bc74SPeng Li int vector_id; 852b204bc74SPeng Li 853b204bc74SPeng Li vector_id = hclgevf_get_vector_index(hdev, vector); 854b204bc74SPeng Li if (vector_id < 0) { 855b204bc74SPeng Li dev_err(&handle->pdev->dev, 856b204bc74SPeng Li "Get vector index fail. ret =%d\n", vector_id); 857b204bc74SPeng Li return vector_id; 858b204bc74SPeng Li } 859b204bc74SPeng Li 860b204bc74SPeng Li return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 861e2cb1decSSalil Mehta } 862e2cb1decSSalil Mehta 863e2cb1decSSalil Mehta static int hclgevf_unmap_ring_from_vector( 864e2cb1decSSalil Mehta struct hnae3_handle *handle, 865e2cb1decSSalil Mehta int vector, 866e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 867e2cb1decSSalil Mehta { 868e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 869e2cb1decSSalil Mehta int ret, vector_id; 870e2cb1decSSalil Mehta 871e2cb1decSSalil Mehta vector_id = hclgevf_get_vector_index(hdev, vector); 872e2cb1decSSalil Mehta if (vector_id < 0) { 873e2cb1decSSalil Mehta dev_err(&handle->pdev->dev, 874e2cb1decSSalil Mehta "Get vector index fail. ret =%d\n", vector_id); 875e2cb1decSSalil Mehta return vector_id; 876e2cb1decSSalil Mehta } 877e2cb1decSSalil Mehta 878b204bc74SPeng Li ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 8790d3e6631SYunsheng Lin if (ret) 880e2cb1decSSalil Mehta dev_err(&handle->pdev->dev, 881e2cb1decSSalil Mehta "Unmap ring from vector fail. vector=%d, ret =%d\n", 882e2cb1decSSalil Mehta vector_id, 883e2cb1decSSalil Mehta ret); 8840d3e6631SYunsheng Lin 885e2cb1decSSalil Mehta return ret; 886e2cb1decSSalil Mehta } 887e2cb1decSSalil Mehta 8880d3e6631SYunsheng Lin static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 8890d3e6631SYunsheng Lin { 8900d3e6631SYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 89103718db9SYunsheng Lin int vector_id; 8920d3e6631SYunsheng Lin 89303718db9SYunsheng Lin vector_id = hclgevf_get_vector_index(hdev, vector); 89403718db9SYunsheng Lin if (vector_id < 0) { 89503718db9SYunsheng Lin dev_err(&handle->pdev->dev, 89603718db9SYunsheng Lin "hclgevf_put_vector get vector index fail. ret =%d\n", 89703718db9SYunsheng Lin vector_id); 89803718db9SYunsheng Lin return vector_id; 89903718db9SYunsheng Lin } 90003718db9SYunsheng Lin 90103718db9SYunsheng Lin hclgevf_free_vector(hdev, vector_id); 902e2cb1decSSalil Mehta 903e2cb1decSSalil Mehta return 0; 904e2cb1decSSalil Mehta } 905e2cb1decSSalil Mehta 9063b75c3dfSPeng Li static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 9073b75c3dfSPeng Li bool en_uc_pmc, bool en_mc_pmc) 908e2cb1decSSalil Mehta { 909e2cb1decSSalil Mehta struct hclge_mbx_vf_to_pf_cmd *req; 910e2cb1decSSalil Mehta struct hclgevf_desc desc; 911e2cb1decSSalil Mehta int status; 912e2cb1decSSalil Mehta 913e2cb1decSSalil Mehta req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 914e2cb1decSSalil Mehta 915e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 916e2cb1decSSalil Mehta req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 9173b75c3dfSPeng Li req->msg[1] = en_uc_pmc ? 1 : 0; 9183b75c3dfSPeng Li req->msg[2] = en_mc_pmc ? 1 : 0; 919e2cb1decSSalil Mehta 920e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 921e2cb1decSSalil Mehta if (status) 922e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 923e2cb1decSSalil Mehta "Set promisc mode fail, status is %d.\n", status); 924e2cb1decSSalil Mehta 925e2cb1decSSalil Mehta return status; 926e2cb1decSSalil Mehta } 927e2cb1decSSalil Mehta 9287fa6be4fSHuazhong Tan static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, 9293b75c3dfSPeng Li bool en_uc_pmc, bool en_mc_pmc) 930e2cb1decSSalil Mehta { 931e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 932e2cb1decSSalil Mehta 9337fa6be4fSHuazhong Tan return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); 934e2cb1decSSalil Mehta } 935e2cb1decSSalil Mehta 936e2cb1decSSalil Mehta static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 937e2cb1decSSalil Mehta int stream_id, bool enable) 938e2cb1decSSalil Mehta { 939e2cb1decSSalil Mehta struct hclgevf_cfg_com_tqp_queue_cmd *req; 940e2cb1decSSalil Mehta struct hclgevf_desc desc; 941e2cb1decSSalil Mehta int status; 942e2cb1decSSalil Mehta 943e2cb1decSSalil Mehta req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 944e2cb1decSSalil Mehta 945e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 946e2cb1decSSalil Mehta false); 947e2cb1decSSalil Mehta req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 948e2cb1decSSalil Mehta req->stream_id = cpu_to_le16(stream_id); 949e2cb1decSSalil Mehta req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 950e2cb1decSSalil Mehta 951e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 952e2cb1decSSalil Mehta if (status) 953e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 954e2cb1decSSalil Mehta "TQP enable fail, status =%d.\n", status); 955e2cb1decSSalil Mehta 956e2cb1decSSalil Mehta return status; 957e2cb1decSSalil Mehta } 958e2cb1decSSalil Mehta 959e2cb1decSSalil Mehta static int hclgevf_get_queue_id(struct hnae3_queue *queue) 960e2cb1decSSalil Mehta { 961e2cb1decSSalil Mehta struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q); 962e2cb1decSSalil Mehta 963e2cb1decSSalil Mehta return tqp->index; 964e2cb1decSSalil Mehta } 965e2cb1decSSalil Mehta 966e2cb1decSSalil Mehta static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 967e2cb1decSSalil Mehta { 968b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 969e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 970e2cb1decSSalil Mehta int i; 971e2cb1decSSalil Mehta 972b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 973b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 974e2cb1decSSalil Mehta memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 975e2cb1decSSalil Mehta } 976e2cb1decSSalil Mehta } 977e2cb1decSSalil Mehta 978e2cb1decSSalil Mehta static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 979e2cb1decSSalil Mehta { 980e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 981e2cb1decSSalil Mehta 982e2cb1decSSalil Mehta ether_addr_copy(p, hdev->hw.mac.mac_addr); 983e2cb1decSSalil Mehta } 984e2cb1decSSalil Mehta 98559098055SFuyun Liang static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 98659098055SFuyun Liang bool is_first) 987e2cb1decSSalil Mehta { 988e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 989e2cb1decSSalil Mehta u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 990e2cb1decSSalil Mehta u8 *new_mac_addr = (u8 *)p; 991e2cb1decSSalil Mehta u8 msg_data[ETH_ALEN * 2]; 99259098055SFuyun Liang u16 subcode; 993e2cb1decSSalil Mehta int status; 994e2cb1decSSalil Mehta 995e2cb1decSSalil Mehta ether_addr_copy(msg_data, new_mac_addr); 996e2cb1decSSalil Mehta ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 997e2cb1decSSalil Mehta 99859098055SFuyun Liang subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 99959098055SFuyun Liang HCLGE_MBX_MAC_VLAN_UC_MODIFY; 100059098055SFuyun Liang 1001e2cb1decSSalil Mehta status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 100259098055SFuyun Liang subcode, msg_data, ETH_ALEN * 2, 10032097fdefSJian Shen true, NULL, 0); 1004e2cb1decSSalil Mehta if (!status) 1005e2cb1decSSalil Mehta ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1006e2cb1decSSalil Mehta 1007e2cb1decSSalil Mehta return status; 1008e2cb1decSSalil Mehta } 1009e2cb1decSSalil Mehta 1010e2cb1decSSalil Mehta static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1011e2cb1decSSalil Mehta const unsigned char *addr) 1012e2cb1decSSalil Mehta { 1013e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1014e2cb1decSSalil Mehta 1015e2cb1decSSalil Mehta return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1016e2cb1decSSalil Mehta HCLGE_MBX_MAC_VLAN_UC_ADD, 1017e2cb1decSSalil Mehta addr, ETH_ALEN, false, NULL, 0); 1018e2cb1decSSalil Mehta } 1019e2cb1decSSalil Mehta 1020e2cb1decSSalil Mehta static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1021e2cb1decSSalil Mehta const unsigned char *addr) 1022e2cb1decSSalil Mehta { 1023e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1024e2cb1decSSalil Mehta 1025e2cb1decSSalil Mehta return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1026e2cb1decSSalil Mehta HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1027e2cb1decSSalil Mehta addr, ETH_ALEN, false, NULL, 0); 1028e2cb1decSSalil Mehta } 1029e2cb1decSSalil Mehta 1030e2cb1decSSalil Mehta static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1031e2cb1decSSalil Mehta const unsigned char *addr) 1032e2cb1decSSalil Mehta { 1033e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1034e2cb1decSSalil Mehta 1035e2cb1decSSalil Mehta return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1036e2cb1decSSalil Mehta HCLGE_MBX_MAC_VLAN_MC_ADD, 1037e2cb1decSSalil Mehta addr, ETH_ALEN, false, NULL, 0); 1038e2cb1decSSalil Mehta } 1039e2cb1decSSalil Mehta 1040e2cb1decSSalil Mehta static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1041e2cb1decSSalil Mehta const unsigned char *addr) 1042e2cb1decSSalil Mehta { 1043e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1044e2cb1decSSalil Mehta 1045e2cb1decSSalil Mehta return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1046e2cb1decSSalil Mehta HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1047e2cb1decSSalil Mehta addr, ETH_ALEN, false, NULL, 0); 1048e2cb1decSSalil Mehta } 1049e2cb1decSSalil Mehta 1050e2cb1decSSalil Mehta static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1051e2cb1decSSalil Mehta __be16 proto, u16 vlan_id, 1052e2cb1decSSalil Mehta bool is_kill) 1053e2cb1decSSalil Mehta { 1054e2cb1decSSalil Mehta #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1055e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1056e2cb1decSSalil Mehta u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1057e2cb1decSSalil Mehta 1058e2cb1decSSalil Mehta if (vlan_id > 4095) 1059e2cb1decSSalil Mehta return -EINVAL; 1060e2cb1decSSalil Mehta 1061e2cb1decSSalil Mehta if (proto != htons(ETH_P_8021Q)) 1062e2cb1decSSalil Mehta return -EPROTONOSUPPORT; 1063e2cb1decSSalil Mehta 1064e2cb1decSSalil Mehta msg_data[0] = is_kill; 1065e2cb1decSSalil Mehta memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1066e2cb1decSSalil Mehta memcpy(&msg_data[3], &proto, sizeof(proto)); 1067e2cb1decSSalil Mehta return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1068e2cb1decSSalil Mehta HCLGE_MBX_VLAN_FILTER, msg_data, 1069e2cb1decSSalil Mehta HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 1070e2cb1decSSalil Mehta } 1071e2cb1decSSalil Mehta 1072b2641e2aSYunsheng Lin static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1073b2641e2aSYunsheng Lin { 1074b2641e2aSYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1075b2641e2aSYunsheng Lin u8 msg_data; 1076b2641e2aSYunsheng Lin 1077b2641e2aSYunsheng Lin msg_data = enable ? 1 : 0; 1078b2641e2aSYunsheng Lin return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1079b2641e2aSYunsheng Lin HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1080b2641e2aSYunsheng Lin 1, false, NULL, 0); 1081b2641e2aSYunsheng Lin } 1082b2641e2aSYunsheng Lin 10837fa6be4fSHuazhong Tan static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1084e2cb1decSSalil Mehta { 1085e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1086e2cb1decSSalil Mehta u8 msg_data[2]; 10871a426f8bSPeng Li int ret; 1088e2cb1decSSalil Mehta 1089e2cb1decSSalil Mehta memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 1090e2cb1decSSalil Mehta 10911a426f8bSPeng Li /* disable vf queue before send queue reset msg to PF */ 10921a426f8bSPeng Li ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 10931a426f8bSPeng Li if (ret) 10947fa6be4fSHuazhong Tan return ret; 10951a426f8bSPeng Li 10967fa6be4fSHuazhong Tan return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 10971a426f8bSPeng Li 2, true, NULL, 0); 1098e2cb1decSSalil Mehta } 1099e2cb1decSSalil Mehta 11006988eb2aSSalil Mehta static int hclgevf_notify_client(struct hclgevf_dev *hdev, 11016988eb2aSSalil Mehta enum hnae3_reset_notify_type type) 11026988eb2aSSalil Mehta { 11036988eb2aSSalil Mehta struct hnae3_client *client = hdev->nic_client; 11046988eb2aSSalil Mehta struct hnae3_handle *handle = &hdev->nic; 11056988eb2aSSalil Mehta 11066988eb2aSSalil Mehta if (!client->ops->reset_notify) 11076988eb2aSSalil Mehta return -EOPNOTSUPP; 11086988eb2aSSalil Mehta 11096988eb2aSSalil Mehta return client->ops->reset_notify(handle, type); 11106988eb2aSSalil Mehta } 11116988eb2aSSalil Mehta 11126988eb2aSSalil Mehta static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 11136988eb2aSSalil Mehta { 11146988eb2aSSalil Mehta #define HCLGEVF_RESET_WAIT_MS 500 11156988eb2aSSalil Mehta #define HCLGEVF_RESET_WAIT_CNT 20 11166988eb2aSSalil Mehta u32 val, cnt = 0; 11176988eb2aSSalil Mehta 11186988eb2aSSalil Mehta /* wait to check the hardware reset completion status */ 11196988eb2aSSalil Mehta val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 1120e4e87715SPeng Li while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) && 11216988eb2aSSalil Mehta (cnt < HCLGEVF_RESET_WAIT_CNT)) { 11226988eb2aSSalil Mehta msleep(HCLGEVF_RESET_WAIT_MS); 11236988eb2aSSalil Mehta val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 11246988eb2aSSalil Mehta cnt++; 11256988eb2aSSalil Mehta } 11266988eb2aSSalil Mehta 11276988eb2aSSalil Mehta /* hardware completion status should be available by this time */ 11286988eb2aSSalil Mehta if (cnt >= HCLGEVF_RESET_WAIT_CNT) { 11296988eb2aSSalil Mehta dev_warn(&hdev->pdev->dev, 11306988eb2aSSalil Mehta "could'nt get reset done status from h/w, timeout!\n"); 11316988eb2aSSalil Mehta return -EBUSY; 11326988eb2aSSalil Mehta } 11336988eb2aSSalil Mehta 11346988eb2aSSalil Mehta /* we will wait a bit more to let reset of the stack to complete. This 11356988eb2aSSalil Mehta * might happen in case reset assertion was made by PF. Yes, this also 11366988eb2aSSalil Mehta * means we might end up waiting bit more even for VF reset. 11376988eb2aSSalil Mehta */ 11386988eb2aSSalil Mehta msleep(5000); 11396988eb2aSSalil Mehta 11406988eb2aSSalil Mehta return 0; 11416988eb2aSSalil Mehta } 11426988eb2aSSalil Mehta 11436988eb2aSSalil Mehta static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 11446988eb2aSSalil Mehta { 11457a01c897SSalil Mehta int ret; 11467a01c897SSalil Mehta 11476988eb2aSSalil Mehta /* uninitialize the nic client */ 11486988eb2aSSalil Mehta hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 11496988eb2aSSalil Mehta 11507a01c897SSalil Mehta /* re-initialize the hclge device */ 11517a01c897SSalil Mehta ret = hclgevf_init_hdev(hdev); 11527a01c897SSalil Mehta if (ret) { 11537a01c897SSalil Mehta dev_err(&hdev->pdev->dev, 11547a01c897SSalil Mehta "hclge device re-init failed, VF is disabled!\n"); 11557a01c897SSalil Mehta return ret; 11567a01c897SSalil Mehta } 11576988eb2aSSalil Mehta 11586988eb2aSSalil Mehta /* bring up the nic client again */ 11596988eb2aSSalil Mehta hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 11606988eb2aSSalil Mehta 11616988eb2aSSalil Mehta return 0; 11626988eb2aSSalil Mehta } 11636988eb2aSSalil Mehta 11646988eb2aSSalil Mehta static int hclgevf_reset(struct hclgevf_dev *hdev) 11656988eb2aSSalil Mehta { 11666988eb2aSSalil Mehta int ret; 11676988eb2aSSalil Mehta 11686988eb2aSSalil Mehta rtnl_lock(); 11696988eb2aSSalil Mehta 11706988eb2aSSalil Mehta /* bring down the nic to stop any ongoing TX/RX */ 11716988eb2aSSalil Mehta hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 11726988eb2aSSalil Mehta 117329118ab9SHuazhong Tan rtnl_unlock(); 117429118ab9SHuazhong Tan 11756988eb2aSSalil Mehta /* check if VF could successfully fetch the hardware reset completion 11766988eb2aSSalil Mehta * status from the hardware 11776988eb2aSSalil Mehta */ 11786988eb2aSSalil Mehta ret = hclgevf_reset_wait(hdev); 11796988eb2aSSalil Mehta if (ret) { 11806988eb2aSSalil Mehta /* can't do much in this situation, will disable VF */ 11816988eb2aSSalil Mehta dev_err(&hdev->pdev->dev, 11826988eb2aSSalil Mehta "VF failed(=%d) to fetch H/W reset completion status\n", 11836988eb2aSSalil Mehta ret); 11846988eb2aSSalil Mehta 11856988eb2aSSalil Mehta dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); 118629118ab9SHuazhong Tan rtnl_lock(); 11876988eb2aSSalil Mehta hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 11886988eb2aSSalil Mehta 11896988eb2aSSalil Mehta rtnl_unlock(); 11906988eb2aSSalil Mehta return ret; 11916988eb2aSSalil Mehta } 11926988eb2aSSalil Mehta 119329118ab9SHuazhong Tan rtnl_lock(); 119429118ab9SHuazhong Tan 11956988eb2aSSalil Mehta /* now, re-initialize the nic client and ae device*/ 11966988eb2aSSalil Mehta ret = hclgevf_reset_stack(hdev); 11976988eb2aSSalil Mehta if (ret) 11986988eb2aSSalil Mehta dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 11996988eb2aSSalil Mehta 12006988eb2aSSalil Mehta /* bring up the nic to enable TX/RX again */ 12016988eb2aSSalil Mehta hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 12026988eb2aSSalil Mehta 12036988eb2aSSalil Mehta rtnl_unlock(); 12046988eb2aSSalil Mehta 12056988eb2aSSalil Mehta return ret; 12066988eb2aSSalil Mehta } 12076988eb2aSSalil Mehta 1208a8dedb65SSalil Mehta static int hclgevf_do_reset(struct hclgevf_dev *hdev) 1209a8dedb65SSalil Mehta { 1210a8dedb65SSalil Mehta int status; 1211a8dedb65SSalil Mehta u8 respmsg; 1212a8dedb65SSalil Mehta 1213a8dedb65SSalil Mehta status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1214a8dedb65SSalil Mehta 0, false, &respmsg, sizeof(u8)); 1215a8dedb65SSalil Mehta if (status) 1216a8dedb65SSalil Mehta dev_err(&hdev->pdev->dev, 1217a8dedb65SSalil Mehta "VF reset request to PF failed(=%d)\n", status); 1218a8dedb65SSalil Mehta 1219a8dedb65SSalil Mehta return status; 1220a8dedb65SSalil Mehta } 1221a8dedb65SSalil Mehta 1222720bd583SHuazhong Tan static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1223720bd583SHuazhong Tan unsigned long *addr) 1224720bd583SHuazhong Tan { 1225720bd583SHuazhong Tan enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1226720bd583SHuazhong Tan 1227720bd583SHuazhong Tan if (test_bit(HNAE3_VF_RESET, addr)) { 1228720bd583SHuazhong Tan rst_level = HNAE3_VF_RESET; 1229720bd583SHuazhong Tan clear_bit(HNAE3_VF_RESET, addr); 1230720bd583SHuazhong Tan } 1231720bd583SHuazhong Tan 1232720bd583SHuazhong Tan return rst_level; 1233720bd583SHuazhong Tan } 1234720bd583SHuazhong Tan 12356ae4e733SShiju Jose static void hclgevf_reset_event(struct pci_dev *pdev, 12366ae4e733SShiju Jose struct hnae3_handle *handle) 12376d4c3981SSalil Mehta { 12386d4c3981SSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 12396d4c3981SSalil Mehta 12406d4c3981SSalil Mehta dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 12416d4c3981SSalil Mehta 1242720bd583SHuazhong Tan if (!hdev->default_reset_request) 1243720bd583SHuazhong Tan handle->reset_level = 1244720bd583SHuazhong Tan hclgevf_get_reset_level(hdev, 1245720bd583SHuazhong Tan &hdev->default_reset_request); 1246720bd583SHuazhong Tan else 12476d4c3981SSalil Mehta handle->reset_level = HNAE3_VF_RESET; 12486d4c3981SSalil Mehta 1249436667d2SSalil Mehta /* reset of this VF requested */ 1250436667d2SSalil Mehta set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1251436667d2SSalil Mehta hclgevf_reset_task_schedule(hdev); 12526d4c3981SSalil Mehta 12536d4c3981SSalil Mehta handle->last_reset_time = jiffies; 12546d4c3981SSalil Mehta } 12556d4c3981SSalil Mehta 1256720bd583SHuazhong Tan static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1257720bd583SHuazhong Tan enum hnae3_reset_type rst_type) 1258720bd583SHuazhong Tan { 1259720bd583SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 1260720bd583SHuazhong Tan 1261720bd583SHuazhong Tan set_bit(rst_type, &hdev->default_reset_request); 1262720bd583SHuazhong Tan } 1263720bd583SHuazhong Tan 1264e2cb1decSSalil Mehta static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1265e2cb1decSSalil Mehta { 1266e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1267e2cb1decSSalil Mehta 1268e2cb1decSSalil Mehta return hdev->fw_version; 1269e2cb1decSSalil Mehta } 1270e2cb1decSSalil Mehta 1271e2cb1decSSalil Mehta static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1272e2cb1decSSalil Mehta { 1273e2cb1decSSalil Mehta struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1274e2cb1decSSalil Mehta 1275e2cb1decSSalil Mehta vector->vector_irq = pci_irq_vector(hdev->pdev, 1276e2cb1decSSalil Mehta HCLGEVF_MISC_VECTOR_NUM); 1277e2cb1decSSalil Mehta vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1278e2cb1decSSalil Mehta /* vector status always valid for Vector 0 */ 1279e2cb1decSSalil Mehta hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1280e2cb1decSSalil Mehta hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1281e2cb1decSSalil Mehta 1282e2cb1decSSalil Mehta hdev->num_msi_left -= 1; 1283e2cb1decSSalil Mehta hdev->num_msi_used += 1; 1284e2cb1decSSalil Mehta } 1285e2cb1decSSalil Mehta 128635a1e503SSalil Mehta void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 128735a1e503SSalil Mehta { 128835a1e503SSalil Mehta if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 128935a1e503SSalil Mehta !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 129035a1e503SSalil Mehta set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 129135a1e503SSalil Mehta schedule_work(&hdev->rst_service_task); 129235a1e503SSalil Mehta } 129335a1e503SSalil Mehta } 129435a1e503SSalil Mehta 129507a0556aSSalil Mehta void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1296e2cb1decSSalil Mehta { 129707a0556aSSalil Mehta if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 129807a0556aSSalil Mehta !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 129907a0556aSSalil Mehta set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1300e2cb1decSSalil Mehta schedule_work(&hdev->mbx_service_task); 1301e2cb1decSSalil Mehta } 130207a0556aSSalil Mehta } 1303e2cb1decSSalil Mehta 1304e2cb1decSSalil Mehta static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1305e2cb1decSSalil Mehta { 1306e2cb1decSSalil Mehta if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1307e2cb1decSSalil Mehta !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1308e2cb1decSSalil Mehta schedule_work(&hdev->service_task); 1309e2cb1decSSalil Mehta } 1310e2cb1decSSalil Mehta 1311436667d2SSalil Mehta static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1312436667d2SSalil Mehta { 131307a0556aSSalil Mehta /* if we have any pending mailbox event then schedule the mbx task */ 131407a0556aSSalil Mehta if (hdev->mbx_event_pending) 131507a0556aSSalil Mehta hclgevf_mbx_task_schedule(hdev); 131607a0556aSSalil Mehta 1317436667d2SSalil Mehta if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1318436667d2SSalil Mehta hclgevf_reset_task_schedule(hdev); 1319436667d2SSalil Mehta } 1320436667d2SSalil Mehta 1321e2cb1decSSalil Mehta static void hclgevf_service_timer(struct timer_list *t) 1322e2cb1decSSalil Mehta { 1323e2cb1decSSalil Mehta struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1324e2cb1decSSalil Mehta 1325e2cb1decSSalil Mehta mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1326e2cb1decSSalil Mehta 1327e2cb1decSSalil Mehta hclgevf_task_schedule(hdev); 1328e2cb1decSSalil Mehta } 1329e2cb1decSSalil Mehta 133035a1e503SSalil Mehta static void hclgevf_reset_service_task(struct work_struct *work) 133135a1e503SSalil Mehta { 133235a1e503SSalil Mehta struct hclgevf_dev *hdev = 133335a1e503SSalil Mehta container_of(work, struct hclgevf_dev, rst_service_task); 1334a8dedb65SSalil Mehta int ret; 133535a1e503SSalil Mehta 133635a1e503SSalil Mehta if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 133735a1e503SSalil Mehta return; 133835a1e503SSalil Mehta 133935a1e503SSalil Mehta clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 134035a1e503SSalil Mehta 1341436667d2SSalil Mehta if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1342436667d2SSalil Mehta &hdev->reset_state)) { 1343436667d2SSalil Mehta /* PF has initmated that it is about to reset the hardware. 1344436667d2SSalil Mehta * We now have to poll & check if harware has actually completed 1345436667d2SSalil Mehta * the reset sequence. On hardware reset completion, VF needs to 1346436667d2SSalil Mehta * reset the client and ae device. 134735a1e503SSalil Mehta */ 1348436667d2SSalil Mehta hdev->reset_attempts = 0; 1349436667d2SSalil Mehta 13506988eb2aSSalil Mehta ret = hclgevf_reset(hdev); 13516988eb2aSSalil Mehta if (ret) 13526988eb2aSSalil Mehta dev_err(&hdev->pdev->dev, "VF stack reset failed.\n"); 1353436667d2SSalil Mehta } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1354436667d2SSalil Mehta &hdev->reset_state)) { 1355436667d2SSalil Mehta /* we could be here when either of below happens: 1356436667d2SSalil Mehta * 1. reset was initiated due to watchdog timeout due to 1357436667d2SSalil Mehta * a. IMP was earlier reset and our TX got choked down and 1358436667d2SSalil Mehta * which resulted in watchdog reacting and inducing VF 1359436667d2SSalil Mehta * reset. This also means our cmdq would be unreliable. 1360436667d2SSalil Mehta * b. problem in TX due to other lower layer(example link 1361436667d2SSalil Mehta * layer not functioning properly etc.) 1362436667d2SSalil Mehta * 2. VF reset might have been initiated due to some config 1363436667d2SSalil Mehta * change. 1364436667d2SSalil Mehta * 1365436667d2SSalil Mehta * NOTE: Theres no clear way to detect above cases than to react 1366436667d2SSalil Mehta * to the response of PF for this reset request. PF will ack the 1367436667d2SSalil Mehta * 1b and 2. cases but we will not get any intimation about 1a 1368436667d2SSalil Mehta * from PF as cmdq would be in unreliable state i.e. mailbox 1369436667d2SSalil Mehta * communication between PF and VF would be broken. 1370436667d2SSalil Mehta */ 1371436667d2SSalil Mehta 1372436667d2SSalil Mehta /* if we are never geting into pending state it means either: 1373436667d2SSalil Mehta * 1. PF is not receiving our request which could be due to IMP 1374436667d2SSalil Mehta * reset 1375436667d2SSalil Mehta * 2. PF is screwed 1376436667d2SSalil Mehta * We cannot do much for 2. but to check first we can try reset 1377436667d2SSalil Mehta * our PCIe + stack and see if it alleviates the problem. 1378436667d2SSalil Mehta */ 1379436667d2SSalil Mehta if (hdev->reset_attempts > 3) { 1380436667d2SSalil Mehta /* prepare for full reset of stack + pcie interface */ 1381436667d2SSalil Mehta hdev->nic.reset_level = HNAE3_VF_FULL_RESET; 1382436667d2SSalil Mehta 1383436667d2SSalil Mehta /* "defer" schedule the reset task again */ 1384436667d2SSalil Mehta set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1385436667d2SSalil Mehta } else { 1386436667d2SSalil Mehta hdev->reset_attempts++; 1387436667d2SSalil Mehta 1388436667d2SSalil Mehta /* request PF for resetting this VF via mailbox */ 1389a8dedb65SSalil Mehta ret = hclgevf_do_reset(hdev); 1390a8dedb65SSalil Mehta if (ret) 1391a8dedb65SSalil Mehta dev_warn(&hdev->pdev->dev, 1392a8dedb65SSalil Mehta "VF rst fail, stack will call\n"); 1393436667d2SSalil Mehta } 1394436667d2SSalil Mehta } 139535a1e503SSalil Mehta 139635a1e503SSalil Mehta clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 139735a1e503SSalil Mehta } 139835a1e503SSalil Mehta 1399e2cb1decSSalil Mehta static void hclgevf_mailbox_service_task(struct work_struct *work) 1400e2cb1decSSalil Mehta { 1401e2cb1decSSalil Mehta struct hclgevf_dev *hdev; 1402e2cb1decSSalil Mehta 1403e2cb1decSSalil Mehta hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1404e2cb1decSSalil Mehta 1405e2cb1decSSalil Mehta if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1406e2cb1decSSalil Mehta return; 1407e2cb1decSSalil Mehta 1408e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1409e2cb1decSSalil Mehta 141007a0556aSSalil Mehta hclgevf_mbx_async_handler(hdev); 1411e2cb1decSSalil Mehta 1412e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1413e2cb1decSSalil Mehta } 1414e2cb1decSSalil Mehta 1415e2cb1decSSalil Mehta static void hclgevf_service_task(struct work_struct *work) 1416e2cb1decSSalil Mehta { 1417e2cb1decSSalil Mehta struct hclgevf_dev *hdev; 1418e2cb1decSSalil Mehta 1419e2cb1decSSalil Mehta hdev = container_of(work, struct hclgevf_dev, service_task); 1420e2cb1decSSalil Mehta 1421e2cb1decSSalil Mehta /* request the link status from the PF. PF would be able to tell VF 1422e2cb1decSSalil Mehta * about such updates in future so we might remove this later 1423e2cb1decSSalil Mehta */ 1424e2cb1decSSalil Mehta hclgevf_request_link_info(hdev); 1425e2cb1decSSalil Mehta 1426436667d2SSalil Mehta hclgevf_deferred_task_schedule(hdev); 1427436667d2SSalil Mehta 1428e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1429e2cb1decSSalil Mehta } 1430e2cb1decSSalil Mehta 1431e2cb1decSSalil Mehta static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1432e2cb1decSSalil Mehta { 1433e2cb1decSSalil Mehta hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1434e2cb1decSSalil Mehta } 1435e2cb1decSSalil Mehta 1436e2cb1decSSalil Mehta static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) 1437e2cb1decSSalil Mehta { 1438e2cb1decSSalil Mehta u32 cmdq_src_reg; 1439e2cb1decSSalil Mehta 1440e2cb1decSSalil Mehta /* fetch the events from their corresponding regs */ 1441e2cb1decSSalil Mehta cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1442e2cb1decSSalil Mehta HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1443e2cb1decSSalil Mehta 1444e2cb1decSSalil Mehta /* check for vector0 mailbox(=CMDQ RX) event source */ 1445e2cb1decSSalil Mehta if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1446e2cb1decSSalil Mehta cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1447e2cb1decSSalil Mehta *clearval = cmdq_src_reg; 1448e2cb1decSSalil Mehta return true; 1449e2cb1decSSalil Mehta } 1450e2cb1decSSalil Mehta 1451e2cb1decSSalil Mehta dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1452e2cb1decSSalil Mehta 1453e2cb1decSSalil Mehta return false; 1454e2cb1decSSalil Mehta } 1455e2cb1decSSalil Mehta 1456e2cb1decSSalil Mehta static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1457e2cb1decSSalil Mehta { 1458e2cb1decSSalil Mehta writel(en ? 1 : 0, vector->addr); 1459e2cb1decSSalil Mehta } 1460e2cb1decSSalil Mehta 1461e2cb1decSSalil Mehta static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1462e2cb1decSSalil Mehta { 1463e2cb1decSSalil Mehta struct hclgevf_dev *hdev = data; 1464e2cb1decSSalil Mehta u32 clearval; 1465e2cb1decSSalil Mehta 1466e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, false); 1467e2cb1decSSalil Mehta if (!hclgevf_check_event_cause(hdev, &clearval)) 1468e2cb1decSSalil Mehta goto skip_sched; 1469e2cb1decSSalil Mehta 147007a0556aSSalil Mehta hclgevf_mbx_handler(hdev); 1471e2cb1decSSalil Mehta 1472e2cb1decSSalil Mehta hclgevf_clear_event_cause(hdev, clearval); 1473e2cb1decSSalil Mehta 1474e2cb1decSSalil Mehta skip_sched: 1475e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, true); 1476e2cb1decSSalil Mehta 1477e2cb1decSSalil Mehta return IRQ_HANDLED; 1478e2cb1decSSalil Mehta } 1479e2cb1decSSalil Mehta 1480e2cb1decSSalil Mehta static int hclgevf_configure(struct hclgevf_dev *hdev) 1481e2cb1decSSalil Mehta { 1482e2cb1decSSalil Mehta int ret; 1483e2cb1decSSalil Mehta 1484c136b884SPeng Li hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE; 1485c136b884SPeng Li 1486e2cb1decSSalil Mehta /* get queue configuration from PF */ 14876cee6fc3SJian Shen ret = hclgevf_get_queue_info(hdev); 1488e2cb1decSSalil Mehta if (ret) 1489e2cb1decSSalil Mehta return ret; 1490e2cb1decSSalil Mehta /* get tc configuration from PF */ 1491e2cb1decSSalil Mehta return hclgevf_get_tc_info(hdev); 1492e2cb1decSSalil Mehta } 1493e2cb1decSSalil Mehta 14947a01c897SSalil Mehta static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 14957a01c897SSalil Mehta { 14967a01c897SSalil Mehta struct pci_dev *pdev = ae_dev->pdev; 14977a01c897SSalil Mehta struct hclgevf_dev *hdev = ae_dev->priv; 14987a01c897SSalil Mehta 14997a01c897SSalil Mehta hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 15007a01c897SSalil Mehta if (!hdev) 15017a01c897SSalil Mehta return -ENOMEM; 15027a01c897SSalil Mehta 15037a01c897SSalil Mehta hdev->pdev = pdev; 15047a01c897SSalil Mehta hdev->ae_dev = ae_dev; 15057a01c897SSalil Mehta ae_dev->priv = hdev; 15067a01c897SSalil Mehta 15077a01c897SSalil Mehta return 0; 15087a01c897SSalil Mehta } 15097a01c897SSalil Mehta 1510e2cb1decSSalil Mehta static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1511e2cb1decSSalil Mehta { 1512e2cb1decSSalil Mehta struct hnae3_handle *roce = &hdev->roce; 1513e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 1514e2cb1decSSalil Mehta 151507acf909SJian Shen roce->rinfo.num_vectors = hdev->num_roce_msix; 1516e2cb1decSSalil Mehta 1517e2cb1decSSalil Mehta if (hdev->num_msi_left < roce->rinfo.num_vectors || 1518e2cb1decSSalil Mehta hdev->num_msi_left == 0) 1519e2cb1decSSalil Mehta return -EINVAL; 1520e2cb1decSSalil Mehta 152107acf909SJian Shen roce->rinfo.base_vector = hdev->roce_base_vector; 1522e2cb1decSSalil Mehta 1523e2cb1decSSalil Mehta roce->rinfo.netdev = nic->kinfo.netdev; 1524e2cb1decSSalil Mehta roce->rinfo.roce_io_base = hdev->hw.io_base; 1525e2cb1decSSalil Mehta 1526e2cb1decSSalil Mehta roce->pdev = nic->pdev; 1527e2cb1decSSalil Mehta roce->ae_algo = nic->ae_algo; 1528e2cb1decSSalil Mehta roce->numa_node_mask = nic->numa_node_mask; 1529e2cb1decSSalil Mehta 1530e2cb1decSSalil Mehta return 0; 1531e2cb1decSSalil Mehta } 1532e2cb1decSSalil Mehta 1533e2cb1decSSalil Mehta static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1534e2cb1decSSalil Mehta { 1535e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1536e2cb1decSSalil Mehta int i, ret; 1537e2cb1decSSalil Mehta 1538e2cb1decSSalil Mehta rss_cfg->rss_size = hdev->rss_size_max; 1539e2cb1decSSalil Mehta 1540374ad291SJian Shen if (hdev->pdev->revision >= 0x21) { 1541374ad291SJian Shen rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 1542374ad291SJian Shen netdev_rss_key_fill(rss_cfg->rss_hash_key, 1543374ad291SJian Shen HCLGEVF_RSS_KEY_SIZE); 1544374ad291SJian Shen 1545374ad291SJian Shen ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 1546374ad291SJian Shen rss_cfg->rss_hash_key); 1547374ad291SJian Shen if (ret) 1548374ad291SJian Shen return ret; 1549d97b3072SJian Shen 1550d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_tcp_en = 1551d97b3072SJian Shen HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1552d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_udp_en = 1553d97b3072SJian Shen HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1554d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_sctp_en = 1555d97b3072SJian Shen HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1556d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_fragment_en = 1557d97b3072SJian Shen HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1558d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_tcp_en = 1559d97b3072SJian Shen HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1560d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_udp_en = 1561d97b3072SJian Shen HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1562d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_sctp_en = 1563d97b3072SJian Shen HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1564d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_fragment_en = 1565d97b3072SJian Shen HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1566d97b3072SJian Shen 1567d97b3072SJian Shen ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 1568d97b3072SJian Shen if (ret) 1569d97b3072SJian Shen return ret; 1570d97b3072SJian Shen 1571374ad291SJian Shen } 1572374ad291SJian Shen 1573e2cb1decSSalil Mehta /* Initialize RSS indirect table for each vport */ 1574e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1575e2cb1decSSalil Mehta rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1576e2cb1decSSalil Mehta 1577e2cb1decSSalil Mehta ret = hclgevf_set_rss_indir_table(hdev); 1578e2cb1decSSalil Mehta if (ret) 1579e2cb1decSSalil Mehta return ret; 1580e2cb1decSSalil Mehta 1581e2cb1decSSalil Mehta return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1582e2cb1decSSalil Mehta } 1583e2cb1decSSalil Mehta 1584e2cb1decSSalil Mehta static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1585e2cb1decSSalil Mehta { 1586e2cb1decSSalil Mehta /* other vlan config(like, VLAN TX/RX offload) would also be added 1587e2cb1decSSalil Mehta * here later 1588e2cb1decSSalil Mehta */ 1589e2cb1decSSalil Mehta return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1590e2cb1decSSalil Mehta false); 1591e2cb1decSSalil Mehta } 1592e2cb1decSSalil Mehta 1593e2cb1decSSalil Mehta static int hclgevf_ae_start(struct hnae3_handle *handle) 1594e2cb1decSSalil Mehta { 1595b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1596e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1597e2cb1decSSalil Mehta int i, queue_id; 1598e2cb1decSSalil Mehta 1599b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 1600e2cb1decSSalil Mehta /* ring enable */ 1601b4f1d303SJian Shen queue_id = hclgevf_get_queue_id(kinfo->tqp[i]); 1602e2cb1decSSalil Mehta if (queue_id < 0) { 1603e2cb1decSSalil Mehta dev_warn(&hdev->pdev->dev, 1604e2cb1decSSalil Mehta "Get invalid queue id, ignore it\n"); 1605e2cb1decSSalil Mehta continue; 1606e2cb1decSSalil Mehta } 1607e2cb1decSSalil Mehta 1608e2cb1decSSalil Mehta hclgevf_tqp_enable(hdev, queue_id, 0, true); 1609e2cb1decSSalil Mehta } 1610e2cb1decSSalil Mehta 1611e2cb1decSSalil Mehta /* reset tqp stats */ 1612e2cb1decSSalil Mehta hclgevf_reset_tqp_stats(handle); 1613e2cb1decSSalil Mehta 1614e2cb1decSSalil Mehta hclgevf_request_link_info(hdev); 1615e2cb1decSSalil Mehta 1616e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1617e2cb1decSSalil Mehta mod_timer(&hdev->service_timer, jiffies + HZ); 1618e2cb1decSSalil Mehta 1619e2cb1decSSalil Mehta return 0; 1620e2cb1decSSalil Mehta } 1621e2cb1decSSalil Mehta 1622e2cb1decSSalil Mehta static void hclgevf_ae_stop(struct hnae3_handle *handle) 1623e2cb1decSSalil Mehta { 1624b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1625e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1626e2cb1decSSalil Mehta int i, queue_id; 1627e2cb1decSSalil Mehta 16282f7e4896SFuyun Liang set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 16292f7e4896SFuyun Liang 1630b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 1631e2cb1decSSalil Mehta /* Ring disable */ 1632b4f1d303SJian Shen queue_id = hclgevf_get_queue_id(kinfo->tqp[i]); 1633e2cb1decSSalil Mehta if (queue_id < 0) { 1634e2cb1decSSalil Mehta dev_warn(&hdev->pdev->dev, 1635e2cb1decSSalil Mehta "Get invalid queue id, ignore it\n"); 1636e2cb1decSSalil Mehta continue; 1637e2cb1decSSalil Mehta } 1638e2cb1decSSalil Mehta 1639e2cb1decSSalil Mehta hclgevf_tqp_enable(hdev, queue_id, 0, false); 1640e2cb1decSSalil Mehta } 1641e2cb1decSSalil Mehta 1642e2cb1decSSalil Mehta /* reset tqp stats */ 1643e2cb1decSSalil Mehta hclgevf_reset_tqp_stats(handle); 16448cc6c1f7SFuyun Liang del_timer_sync(&hdev->service_timer); 16458cc6c1f7SFuyun Liang cancel_work_sync(&hdev->service_task); 1646f5be7967SYunsheng Lin clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 16478cc6c1f7SFuyun Liang hclgevf_update_link_status(hdev, 0); 1648e2cb1decSSalil Mehta } 1649e2cb1decSSalil Mehta 1650e2cb1decSSalil Mehta static void hclgevf_state_init(struct hclgevf_dev *hdev) 1651e2cb1decSSalil Mehta { 16527a01c897SSalil Mehta /* if this is on going reset then skip this initialization */ 16537a01c897SSalil Mehta if (hclgevf_dev_ongoing_reset(hdev)) 16547a01c897SSalil Mehta return; 16557a01c897SSalil Mehta 1656e2cb1decSSalil Mehta /* setup tasks for the MBX */ 1657e2cb1decSSalil Mehta INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1658e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1659e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1660e2cb1decSSalil Mehta 1661e2cb1decSSalil Mehta /* setup tasks for service timer */ 1662e2cb1decSSalil Mehta timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1663e2cb1decSSalil Mehta 1664e2cb1decSSalil Mehta INIT_WORK(&hdev->service_task, hclgevf_service_task); 1665e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1666e2cb1decSSalil Mehta 166735a1e503SSalil Mehta INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 166835a1e503SSalil Mehta 1669e2cb1decSSalil Mehta mutex_init(&hdev->mbx_resp.mbx_mutex); 1670e2cb1decSSalil Mehta 1671e2cb1decSSalil Mehta /* bring the device down */ 1672e2cb1decSSalil Mehta set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1673e2cb1decSSalil Mehta } 1674e2cb1decSSalil Mehta 1675e2cb1decSSalil Mehta static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1676e2cb1decSSalil Mehta { 1677e2cb1decSSalil Mehta set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1678e2cb1decSSalil Mehta 1679e2cb1decSSalil Mehta if (hdev->service_timer.function) 1680e2cb1decSSalil Mehta del_timer_sync(&hdev->service_timer); 1681e2cb1decSSalil Mehta if (hdev->service_task.func) 1682e2cb1decSSalil Mehta cancel_work_sync(&hdev->service_task); 1683e2cb1decSSalil Mehta if (hdev->mbx_service_task.func) 1684e2cb1decSSalil Mehta cancel_work_sync(&hdev->mbx_service_task); 168535a1e503SSalil Mehta if (hdev->rst_service_task.func) 168635a1e503SSalil Mehta cancel_work_sync(&hdev->rst_service_task); 1687e2cb1decSSalil Mehta 1688e2cb1decSSalil Mehta mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1689e2cb1decSSalil Mehta } 1690e2cb1decSSalil Mehta 1691e2cb1decSSalil Mehta static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1692e2cb1decSSalil Mehta { 1693e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 1694e2cb1decSSalil Mehta int vectors; 1695e2cb1decSSalil Mehta int i; 1696e2cb1decSSalil Mehta 16977a01c897SSalil Mehta /* if this is on going reset then skip this initialization */ 16987a01c897SSalil Mehta if (hclgevf_dev_ongoing_reset(hdev)) 16997a01c897SSalil Mehta return 0; 17007a01c897SSalil Mehta 170107acf909SJian Shen if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 170207acf909SJian Shen vectors = pci_alloc_irq_vectors(pdev, 170307acf909SJian Shen hdev->roce_base_msix_offset + 1, 170407acf909SJian Shen hdev->num_msi, 170507acf909SJian Shen PCI_IRQ_MSIX); 170607acf909SJian Shen else 1707e2cb1decSSalil Mehta vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1708e2cb1decSSalil Mehta PCI_IRQ_MSI | PCI_IRQ_MSIX); 170907acf909SJian Shen 1710e2cb1decSSalil Mehta if (vectors < 0) { 1711e2cb1decSSalil Mehta dev_err(&pdev->dev, 1712e2cb1decSSalil Mehta "failed(%d) to allocate MSI/MSI-X vectors\n", 1713e2cb1decSSalil Mehta vectors); 1714e2cb1decSSalil Mehta return vectors; 1715e2cb1decSSalil Mehta } 1716e2cb1decSSalil Mehta if (vectors < hdev->num_msi) 1717e2cb1decSSalil Mehta dev_warn(&hdev->pdev->dev, 1718e2cb1decSSalil Mehta "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1719e2cb1decSSalil Mehta hdev->num_msi, vectors); 1720e2cb1decSSalil Mehta 1721e2cb1decSSalil Mehta hdev->num_msi = vectors; 1722e2cb1decSSalil Mehta hdev->num_msi_left = vectors; 1723e2cb1decSSalil Mehta hdev->base_msi_vector = pdev->irq; 172407acf909SJian Shen hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 1725e2cb1decSSalil Mehta 1726e2cb1decSSalil Mehta hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1727e2cb1decSSalil Mehta sizeof(u16), GFP_KERNEL); 1728e2cb1decSSalil Mehta if (!hdev->vector_status) { 1729e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 1730e2cb1decSSalil Mehta return -ENOMEM; 1731e2cb1decSSalil Mehta } 1732e2cb1decSSalil Mehta 1733e2cb1decSSalil Mehta for (i = 0; i < hdev->num_msi; i++) 1734e2cb1decSSalil Mehta hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 1735e2cb1decSSalil Mehta 1736e2cb1decSSalil Mehta hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 1737e2cb1decSSalil Mehta sizeof(int), GFP_KERNEL); 1738e2cb1decSSalil Mehta if (!hdev->vector_irq) { 1739e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 1740e2cb1decSSalil Mehta return -ENOMEM; 1741e2cb1decSSalil Mehta } 1742e2cb1decSSalil Mehta 1743e2cb1decSSalil Mehta return 0; 1744e2cb1decSSalil Mehta } 1745e2cb1decSSalil Mehta 1746e2cb1decSSalil Mehta static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 1747e2cb1decSSalil Mehta { 1748e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 1749e2cb1decSSalil Mehta 1750e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 1751e2cb1decSSalil Mehta } 1752e2cb1decSSalil Mehta 1753e2cb1decSSalil Mehta static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 1754e2cb1decSSalil Mehta { 1755e2cb1decSSalil Mehta int ret = 0; 1756e2cb1decSSalil Mehta 17577a01c897SSalil Mehta /* if this is on going reset then skip this initialization */ 17587a01c897SSalil Mehta if (hclgevf_dev_ongoing_reset(hdev)) 17597a01c897SSalil Mehta return 0; 17607a01c897SSalil Mehta 1761e2cb1decSSalil Mehta hclgevf_get_misc_vector(hdev); 1762e2cb1decSSalil Mehta 1763e2cb1decSSalil Mehta ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 1764e2cb1decSSalil Mehta 0, "hclgevf_cmd", hdev); 1765e2cb1decSSalil Mehta if (ret) { 1766e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 1767e2cb1decSSalil Mehta hdev->misc_vector.vector_irq); 1768e2cb1decSSalil Mehta return ret; 1769e2cb1decSSalil Mehta } 1770e2cb1decSSalil Mehta 17711819e409SXi Wang hclgevf_clear_event_cause(hdev, 0); 17721819e409SXi Wang 1773e2cb1decSSalil Mehta /* enable misc. vector(vector 0) */ 1774e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, true); 1775e2cb1decSSalil Mehta 1776e2cb1decSSalil Mehta return ret; 1777e2cb1decSSalil Mehta } 1778e2cb1decSSalil Mehta 1779e2cb1decSSalil Mehta static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 1780e2cb1decSSalil Mehta { 1781e2cb1decSSalil Mehta /* disable misc vector(vector 0) */ 1782e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, false); 17831819e409SXi Wang synchronize_irq(hdev->misc_vector.vector_irq); 1784e2cb1decSSalil Mehta free_irq(hdev->misc_vector.vector_irq, hdev); 1785e2cb1decSSalil Mehta hclgevf_free_vector(hdev, 0); 1786e2cb1decSSalil Mehta } 1787e2cb1decSSalil Mehta 1788e718a93fSPeng Li static int hclgevf_init_client_instance(struct hnae3_client *client, 1789e718a93fSPeng Li struct hnae3_ae_dev *ae_dev) 1790e2cb1decSSalil Mehta { 1791e718a93fSPeng Li struct hclgevf_dev *hdev = ae_dev->priv; 1792e2cb1decSSalil Mehta int ret; 1793e2cb1decSSalil Mehta 1794e2cb1decSSalil Mehta switch (client->type) { 1795e2cb1decSSalil Mehta case HNAE3_CLIENT_KNIC: 1796e2cb1decSSalil Mehta hdev->nic_client = client; 1797e2cb1decSSalil Mehta hdev->nic.client = client; 1798e2cb1decSSalil Mehta 1799e2cb1decSSalil Mehta ret = client->ops->init_instance(&hdev->nic); 1800e2cb1decSSalil Mehta if (ret) 180149dd8054SJian Shen goto clear_nic; 1802e2cb1decSSalil Mehta 1803d9f28fc2SJian Shen hnae3_set_client_init_flag(client, ae_dev, 1); 1804d9f28fc2SJian Shen 1805e2cb1decSSalil Mehta if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 1806e2cb1decSSalil Mehta struct hnae3_client *rc = hdev->roce_client; 1807e2cb1decSSalil Mehta 1808e2cb1decSSalil Mehta ret = hclgevf_init_roce_base_info(hdev); 1809e2cb1decSSalil Mehta if (ret) 181049dd8054SJian Shen goto clear_roce; 1811e2cb1decSSalil Mehta ret = rc->ops->init_instance(&hdev->roce); 1812e2cb1decSSalil Mehta if (ret) 181349dd8054SJian Shen goto clear_roce; 1814d9f28fc2SJian Shen 1815d9f28fc2SJian Shen hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 1816d9f28fc2SJian Shen 1); 1817e2cb1decSSalil Mehta } 1818e2cb1decSSalil Mehta break; 1819e2cb1decSSalil Mehta case HNAE3_CLIENT_UNIC: 1820e2cb1decSSalil Mehta hdev->nic_client = client; 1821e2cb1decSSalil Mehta hdev->nic.client = client; 1822e2cb1decSSalil Mehta 1823e2cb1decSSalil Mehta ret = client->ops->init_instance(&hdev->nic); 1824e2cb1decSSalil Mehta if (ret) 182549dd8054SJian Shen goto clear_nic; 1826d9f28fc2SJian Shen 1827d9f28fc2SJian Shen hnae3_set_client_init_flag(client, ae_dev, 1); 1828e2cb1decSSalil Mehta break; 1829e2cb1decSSalil Mehta case HNAE3_CLIENT_ROCE: 1830544a7bcdSLijun Ou if (hnae3_dev_roce_supported(hdev)) { 1831e2cb1decSSalil Mehta hdev->roce_client = client; 1832e2cb1decSSalil Mehta hdev->roce.client = client; 1833544a7bcdSLijun Ou } 1834e2cb1decSSalil Mehta 1835544a7bcdSLijun Ou if (hdev->roce_client && hdev->nic_client) { 1836e2cb1decSSalil Mehta ret = hclgevf_init_roce_base_info(hdev); 1837e2cb1decSSalil Mehta if (ret) 183849dd8054SJian Shen goto clear_roce; 1839e2cb1decSSalil Mehta 1840e2cb1decSSalil Mehta ret = client->ops->init_instance(&hdev->roce); 1841e2cb1decSSalil Mehta if (ret) 184249dd8054SJian Shen goto clear_roce; 1843e2cb1decSSalil Mehta } 1844d9f28fc2SJian Shen 1845d9f28fc2SJian Shen hnae3_set_client_init_flag(client, ae_dev, 1); 1846fa7a4bd5SJian Shen break; 1847fa7a4bd5SJian Shen default: 1848fa7a4bd5SJian Shen return -EINVAL; 1849e2cb1decSSalil Mehta } 1850e2cb1decSSalil Mehta 1851e2cb1decSSalil Mehta return 0; 185249dd8054SJian Shen 185349dd8054SJian Shen clear_nic: 185449dd8054SJian Shen hdev->nic_client = NULL; 185549dd8054SJian Shen hdev->nic.client = NULL; 185649dd8054SJian Shen return ret; 185749dd8054SJian Shen clear_roce: 185849dd8054SJian Shen hdev->roce_client = NULL; 185949dd8054SJian Shen hdev->roce.client = NULL; 186049dd8054SJian Shen return ret; 1861e2cb1decSSalil Mehta } 1862e2cb1decSSalil Mehta 1863e718a93fSPeng Li static void hclgevf_uninit_client_instance(struct hnae3_client *client, 1864e718a93fSPeng Li struct hnae3_ae_dev *ae_dev) 1865e2cb1decSSalil Mehta { 1866e718a93fSPeng Li struct hclgevf_dev *hdev = ae_dev->priv; 1867e718a93fSPeng Li 1868e2cb1decSSalil Mehta /* un-init roce, if it exists */ 186949dd8054SJian Shen if (hdev->roce_client) { 1870e2cb1decSSalil Mehta hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 187149dd8054SJian Shen hdev->roce_client = NULL; 187249dd8054SJian Shen hdev->roce.client = NULL; 187349dd8054SJian Shen } 1874e2cb1decSSalil Mehta 1875e2cb1decSSalil Mehta /* un-init nic/unic, if this was not called by roce client */ 187649dd8054SJian Shen if (client->ops->uninit_instance && hdev->nic_client && 187749dd8054SJian Shen client->type != HNAE3_CLIENT_ROCE) { 1878e2cb1decSSalil Mehta client->ops->uninit_instance(&hdev->nic, 0); 187949dd8054SJian Shen hdev->nic_client = NULL; 188049dd8054SJian Shen hdev->nic.client = NULL; 188149dd8054SJian Shen } 1882e2cb1decSSalil Mehta } 1883e2cb1decSSalil Mehta 1884e2cb1decSSalil Mehta static int hclgevf_pci_init(struct hclgevf_dev *hdev) 1885e2cb1decSSalil Mehta { 1886e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 1887e2cb1decSSalil Mehta struct hclgevf_hw *hw; 1888e2cb1decSSalil Mehta int ret; 1889e2cb1decSSalil Mehta 18907a01c897SSalil Mehta /* check if we need to skip initialization of pci. This will happen if 18917a01c897SSalil Mehta * device is undergoing VF reset. Otherwise, we would need to 18927a01c897SSalil Mehta * re-initialize pci interface again i.e. when device is not going 18937a01c897SSalil Mehta * through *any* reset or actually undergoing full reset. 18947a01c897SSalil Mehta */ 18957a01c897SSalil Mehta if (hclgevf_dev_ongoing_reset(hdev)) 18967a01c897SSalil Mehta return 0; 18977a01c897SSalil Mehta 1898e2cb1decSSalil Mehta ret = pci_enable_device(pdev); 1899e2cb1decSSalil Mehta if (ret) { 1900e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed to enable PCI device\n"); 19013e249d3bSFuyun Liang return ret; 1902e2cb1decSSalil Mehta } 1903e2cb1decSSalil Mehta 1904e2cb1decSSalil Mehta ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1905e2cb1decSSalil Mehta if (ret) { 1906e2cb1decSSalil Mehta dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 1907e2cb1decSSalil Mehta goto err_disable_device; 1908e2cb1decSSalil Mehta } 1909e2cb1decSSalil Mehta 1910e2cb1decSSalil Mehta ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 1911e2cb1decSSalil Mehta if (ret) { 1912e2cb1decSSalil Mehta dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 1913e2cb1decSSalil Mehta goto err_disable_device; 1914e2cb1decSSalil Mehta } 1915e2cb1decSSalil Mehta 1916e2cb1decSSalil Mehta pci_set_master(pdev); 1917e2cb1decSSalil Mehta hw = &hdev->hw; 1918e2cb1decSSalil Mehta hw->hdev = hdev; 19192e1ea493SPeng Li hw->io_base = pci_iomap(pdev, 2, 0); 1920e2cb1decSSalil Mehta if (!hw->io_base) { 1921e2cb1decSSalil Mehta dev_err(&pdev->dev, "can't map configuration register space\n"); 1922e2cb1decSSalil Mehta ret = -ENOMEM; 1923e2cb1decSSalil Mehta goto err_clr_master; 1924e2cb1decSSalil Mehta } 1925e2cb1decSSalil Mehta 1926e2cb1decSSalil Mehta return 0; 1927e2cb1decSSalil Mehta 1928e2cb1decSSalil Mehta err_clr_master: 1929e2cb1decSSalil Mehta pci_clear_master(pdev); 1930e2cb1decSSalil Mehta pci_release_regions(pdev); 1931e2cb1decSSalil Mehta err_disable_device: 1932e2cb1decSSalil Mehta pci_disable_device(pdev); 19333e249d3bSFuyun Liang 1934e2cb1decSSalil Mehta return ret; 1935e2cb1decSSalil Mehta } 1936e2cb1decSSalil Mehta 1937e2cb1decSSalil Mehta static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 1938e2cb1decSSalil Mehta { 1939e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 1940e2cb1decSSalil Mehta 1941e2cb1decSSalil Mehta pci_iounmap(pdev, hdev->hw.io_base); 1942e2cb1decSSalil Mehta pci_clear_master(pdev); 1943e2cb1decSSalil Mehta pci_release_regions(pdev); 1944e2cb1decSSalil Mehta pci_disable_device(pdev); 1945e2cb1decSSalil Mehta } 1946e2cb1decSSalil Mehta 194707acf909SJian Shen static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 194807acf909SJian Shen { 194907acf909SJian Shen struct hclgevf_query_res_cmd *req; 195007acf909SJian Shen struct hclgevf_desc desc; 195107acf909SJian Shen int ret; 195207acf909SJian Shen 195307acf909SJian Shen hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 195407acf909SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 195507acf909SJian Shen if (ret) { 195607acf909SJian Shen dev_err(&hdev->pdev->dev, 195707acf909SJian Shen "query vf resource failed, ret = %d.\n", ret); 195807acf909SJian Shen return ret; 195907acf909SJian Shen } 196007acf909SJian Shen 196107acf909SJian Shen req = (struct hclgevf_query_res_cmd *)desc.data; 196207acf909SJian Shen 196307acf909SJian Shen if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 196407acf909SJian Shen hdev->roce_base_msix_offset = 196507acf909SJian Shen hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 196607acf909SJian Shen HCLGEVF_MSIX_OFT_ROCEE_M, 196707acf909SJian Shen HCLGEVF_MSIX_OFT_ROCEE_S); 196807acf909SJian Shen hdev->num_roce_msix = 196907acf909SJian Shen hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 197007acf909SJian Shen HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 197107acf909SJian Shen 197207acf909SJian Shen /* VF should have NIC vectors and Roce vectors, NIC vectors 197307acf909SJian Shen * are queued before Roce vectors. The offset is fixed to 64. 197407acf909SJian Shen */ 197507acf909SJian Shen hdev->num_msi = hdev->num_roce_msix + 197607acf909SJian Shen hdev->roce_base_msix_offset; 197707acf909SJian Shen } else { 197807acf909SJian Shen hdev->num_msi = 197907acf909SJian Shen hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 198007acf909SJian Shen HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 198107acf909SJian Shen } 198207acf909SJian Shen 198307acf909SJian Shen return 0; 198407acf909SJian Shen } 198507acf909SJian Shen 19867a01c897SSalil Mehta static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 1987e2cb1decSSalil Mehta { 19887a01c897SSalil Mehta struct pci_dev *pdev = hdev->pdev; 1989e2cb1decSSalil Mehta int ret; 1990e2cb1decSSalil Mehta 19917a01c897SSalil Mehta /* check if device is on-going full reset(i.e. pcie as well) */ 19927a01c897SSalil Mehta if (hclgevf_dev_ongoing_full_reset(hdev)) { 19937a01c897SSalil Mehta dev_warn(&pdev->dev, "device is going full reset\n"); 19947a01c897SSalil Mehta hclgevf_uninit_hdev(hdev); 19957a01c897SSalil Mehta } 1996e2cb1decSSalil Mehta 1997e2cb1decSSalil Mehta ret = hclgevf_pci_init(hdev); 1998e2cb1decSSalil Mehta if (ret) { 1999e2cb1decSSalil Mehta dev_err(&pdev->dev, "PCI initialization failed\n"); 2000e2cb1decSSalil Mehta return ret; 2001e2cb1decSSalil Mehta } 2002e2cb1decSSalil Mehta 2003eddf0462SYunsheng Lin ret = hclgevf_cmd_init(hdev); 2004eddf0462SYunsheng Lin if (ret) 2005eddf0462SYunsheng Lin goto err_cmd_init; 2006eddf0462SYunsheng Lin 200707acf909SJian Shen /* Get vf resource */ 200807acf909SJian Shen ret = hclgevf_query_vf_resource(hdev); 200907acf909SJian Shen if (ret) { 201007acf909SJian Shen dev_err(&hdev->pdev->dev, 201107acf909SJian Shen "Query vf status error, ret = %d.\n", ret); 201207acf909SJian Shen goto err_query_vf; 201307acf909SJian Shen } 201407acf909SJian Shen 201507acf909SJian Shen ret = hclgevf_init_msi(hdev); 201607acf909SJian Shen if (ret) { 201707acf909SJian Shen dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 201807acf909SJian Shen goto err_query_vf; 201907acf909SJian Shen } 202007acf909SJian Shen 202107acf909SJian Shen hclgevf_state_init(hdev); 202207acf909SJian Shen 2023e2cb1decSSalil Mehta ret = hclgevf_misc_irq_init(hdev); 2024e2cb1decSSalil Mehta if (ret) { 2025e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2026e2cb1decSSalil Mehta ret); 2027e2cb1decSSalil Mehta goto err_misc_irq_init; 2028e2cb1decSSalil Mehta } 2029e2cb1decSSalil Mehta 2030e2cb1decSSalil Mehta ret = hclgevf_configure(hdev); 2031e2cb1decSSalil Mehta if (ret) { 2032e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2033e2cb1decSSalil Mehta goto err_config; 2034e2cb1decSSalil Mehta } 2035e2cb1decSSalil Mehta 2036e2cb1decSSalil Mehta ret = hclgevf_alloc_tqps(hdev); 2037e2cb1decSSalil Mehta if (ret) { 2038e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2039e2cb1decSSalil Mehta goto err_config; 2040e2cb1decSSalil Mehta } 2041e2cb1decSSalil Mehta 2042e2cb1decSSalil Mehta ret = hclgevf_set_handle_info(hdev); 2043e2cb1decSSalil Mehta if (ret) { 2044e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2045e2cb1decSSalil Mehta goto err_config; 2046e2cb1decSSalil Mehta } 2047e2cb1decSSalil Mehta 2048e2cb1decSSalil Mehta /* Initialize RSS for this VF */ 2049e2cb1decSSalil Mehta ret = hclgevf_rss_init_hw(hdev); 2050e2cb1decSSalil Mehta if (ret) { 2051e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 2052e2cb1decSSalil Mehta "failed(%d) to initialize RSS\n", ret); 2053e2cb1decSSalil Mehta goto err_config; 2054e2cb1decSSalil Mehta } 2055e2cb1decSSalil Mehta 2056e2cb1decSSalil Mehta ret = hclgevf_init_vlan_config(hdev); 2057e2cb1decSSalil Mehta if (ret) { 2058e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 2059e2cb1decSSalil Mehta "failed(%d) to initialize VLAN config\n", ret); 2060e2cb1decSSalil Mehta goto err_config; 2061e2cb1decSSalil Mehta } 2062e2cb1decSSalil Mehta 2063e2cb1decSSalil Mehta pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 2064e2cb1decSSalil Mehta 2065e2cb1decSSalil Mehta return 0; 2066e2cb1decSSalil Mehta 2067e2cb1decSSalil Mehta err_config: 2068e2cb1decSSalil Mehta hclgevf_misc_irq_uninit(hdev); 2069e2cb1decSSalil Mehta err_misc_irq_init: 2070e2cb1decSSalil Mehta hclgevf_state_uninit(hdev); 2071e2cb1decSSalil Mehta hclgevf_uninit_msi(hdev); 207207acf909SJian Shen err_query_vf: 207307acf909SJian Shen hclgevf_cmd_uninit(hdev); 207407acf909SJian Shen err_cmd_init: 2075e2cb1decSSalil Mehta hclgevf_pci_uninit(hdev); 2076e2cb1decSSalil Mehta return ret; 2077e2cb1decSSalil Mehta } 2078e2cb1decSSalil Mehta 20797a01c897SSalil Mehta static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2080e2cb1decSSalil Mehta { 2081e2cb1decSSalil Mehta hclgevf_state_uninit(hdev); 2082eddf0462SYunsheng Lin hclgevf_misc_irq_uninit(hdev); 2083eddf0462SYunsheng Lin hclgevf_cmd_uninit(hdev); 2084e2cb1decSSalil Mehta hclgevf_uninit_msi(hdev); 2085e2cb1decSSalil Mehta hclgevf_pci_uninit(hdev); 20867a01c897SSalil Mehta } 20877a01c897SSalil Mehta 20887a01c897SSalil Mehta static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 20897a01c897SSalil Mehta { 20907a01c897SSalil Mehta struct pci_dev *pdev = ae_dev->pdev; 20917a01c897SSalil Mehta int ret; 20927a01c897SSalil Mehta 20937a01c897SSalil Mehta ret = hclgevf_alloc_hdev(ae_dev); 20947a01c897SSalil Mehta if (ret) { 20957a01c897SSalil Mehta dev_err(&pdev->dev, "hclge device allocation failed\n"); 20967a01c897SSalil Mehta return ret; 20977a01c897SSalil Mehta } 20987a01c897SSalil Mehta 20997a01c897SSalil Mehta ret = hclgevf_init_hdev(ae_dev->priv); 21007a01c897SSalil Mehta if (ret) 21017a01c897SSalil Mehta dev_err(&pdev->dev, "hclge device initialization failed\n"); 21027a01c897SSalil Mehta 21037a01c897SSalil Mehta return ret; 21047a01c897SSalil Mehta } 21057a01c897SSalil Mehta 21067a01c897SSalil Mehta static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 21077a01c897SSalil Mehta { 21087a01c897SSalil Mehta struct hclgevf_dev *hdev = ae_dev->priv; 21097a01c897SSalil Mehta 21107a01c897SSalil Mehta hclgevf_uninit_hdev(hdev); 2111e2cb1decSSalil Mehta ae_dev->priv = NULL; 2112e2cb1decSSalil Mehta } 2113e2cb1decSSalil Mehta 2114849e4607SPeng Li static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2115849e4607SPeng Li { 2116849e4607SPeng Li struct hnae3_handle *nic = &hdev->nic; 2117849e4607SPeng Li struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2118849e4607SPeng Li 2119849e4607SPeng Li return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 2120849e4607SPeng Li } 2121849e4607SPeng Li 2122849e4607SPeng Li /** 2123849e4607SPeng Li * hclgevf_get_channels - Get the current channels enabled and max supported. 2124849e4607SPeng Li * @handle: hardware information for network interface 2125849e4607SPeng Li * @ch: ethtool channels structure 2126849e4607SPeng Li * 2127849e4607SPeng Li * We don't support separate tx and rx queues as channels. The other count 2128849e4607SPeng Li * represents how many queues are being used for control. max_combined counts 2129849e4607SPeng Li * how many queue pairs we can support. They may not be mapped 1 to 1 with 2130849e4607SPeng Li * q_vectors since we support a lot more queue pairs than q_vectors. 2131849e4607SPeng Li **/ 2132849e4607SPeng Li static void hclgevf_get_channels(struct hnae3_handle *handle, 2133849e4607SPeng Li struct ethtool_channels *ch) 2134849e4607SPeng Li { 2135849e4607SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2136849e4607SPeng Li 2137849e4607SPeng Li ch->max_combined = hclgevf_get_max_channels(hdev); 2138849e4607SPeng Li ch->other_count = 0; 2139849e4607SPeng Li ch->max_other = 0; 2140849e4607SPeng Li ch->combined_count = hdev->num_tqps; 2141849e4607SPeng Li } 2142849e4607SPeng Li 2143cc719218SPeng Li static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 21440d43bf45SHuazhong Tan u16 *alloc_tqps, u16 *max_rss_size) 2145cc719218SPeng Li { 2146cc719218SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2147cc719218SPeng Li 21480d43bf45SHuazhong Tan *alloc_tqps = hdev->num_tqps; 2149cc719218SPeng Li *max_rss_size = hdev->rss_size_max; 2150cc719218SPeng Li } 2151cc719218SPeng Li 2152175ec96bSFuyun Liang static int hclgevf_get_status(struct hnae3_handle *handle) 2153175ec96bSFuyun Liang { 2154175ec96bSFuyun Liang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2155175ec96bSFuyun Liang 2156175ec96bSFuyun Liang return hdev->hw.mac.link; 2157175ec96bSFuyun Liang } 2158175ec96bSFuyun Liang 21594a152de9SFuyun Liang static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 21604a152de9SFuyun Liang u8 *auto_neg, u32 *speed, 21614a152de9SFuyun Liang u8 *duplex) 21624a152de9SFuyun Liang { 21634a152de9SFuyun Liang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 21644a152de9SFuyun Liang 21654a152de9SFuyun Liang if (speed) 21664a152de9SFuyun Liang *speed = hdev->hw.mac.speed; 21674a152de9SFuyun Liang if (duplex) 21684a152de9SFuyun Liang *duplex = hdev->hw.mac.duplex; 21694a152de9SFuyun Liang if (auto_neg) 21704a152de9SFuyun Liang *auto_neg = AUTONEG_DISABLE; 21714a152de9SFuyun Liang } 21724a152de9SFuyun Liang 21734a152de9SFuyun Liang void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 21744a152de9SFuyun Liang u8 duplex) 21754a152de9SFuyun Liang { 21764a152de9SFuyun Liang hdev->hw.mac.speed = speed; 21774a152de9SFuyun Liang hdev->hw.mac.duplex = duplex; 21784a152de9SFuyun Liang } 21794a152de9SFuyun Liang 2180c136b884SPeng Li static void hclgevf_get_media_type(struct hnae3_handle *handle, 2181c136b884SPeng Li u8 *media_type) 2182c136b884SPeng Li { 2183c136b884SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2184c136b884SPeng Li if (media_type) 2185c136b884SPeng Li *media_type = hdev->hw.mac.media_type; 2186c136b884SPeng Li } 2187c136b884SPeng Li 2188e2cb1decSSalil Mehta static const struct hnae3_ae_ops hclgevf_ops = { 2189e2cb1decSSalil Mehta .init_ae_dev = hclgevf_init_ae_dev, 2190e2cb1decSSalil Mehta .uninit_ae_dev = hclgevf_uninit_ae_dev, 2191e718a93fSPeng Li .init_client_instance = hclgevf_init_client_instance, 2192e718a93fSPeng Li .uninit_client_instance = hclgevf_uninit_client_instance, 2193e2cb1decSSalil Mehta .start = hclgevf_ae_start, 2194e2cb1decSSalil Mehta .stop = hclgevf_ae_stop, 2195e2cb1decSSalil Mehta .map_ring_to_vector = hclgevf_map_ring_to_vector, 2196e2cb1decSSalil Mehta .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2197e2cb1decSSalil Mehta .get_vector = hclgevf_get_vector, 21980d3e6631SYunsheng Lin .put_vector = hclgevf_put_vector, 2199e2cb1decSSalil Mehta .reset_queue = hclgevf_reset_tqp, 2200e2cb1decSSalil Mehta .set_promisc_mode = hclgevf_set_promisc_mode, 2201e2cb1decSSalil Mehta .get_mac_addr = hclgevf_get_mac_addr, 2202e2cb1decSSalil Mehta .set_mac_addr = hclgevf_set_mac_addr, 2203e2cb1decSSalil Mehta .add_uc_addr = hclgevf_add_uc_addr, 2204e2cb1decSSalil Mehta .rm_uc_addr = hclgevf_rm_uc_addr, 2205e2cb1decSSalil Mehta .add_mc_addr = hclgevf_add_mc_addr, 2206e2cb1decSSalil Mehta .rm_mc_addr = hclgevf_rm_mc_addr, 2207e2cb1decSSalil Mehta .get_stats = hclgevf_get_stats, 2208e2cb1decSSalil Mehta .update_stats = hclgevf_update_stats, 2209e2cb1decSSalil Mehta .get_strings = hclgevf_get_strings, 2210e2cb1decSSalil Mehta .get_sset_count = hclgevf_get_sset_count, 2211e2cb1decSSalil Mehta .get_rss_key_size = hclgevf_get_rss_key_size, 2212e2cb1decSSalil Mehta .get_rss_indir_size = hclgevf_get_rss_indir_size, 2213e2cb1decSSalil Mehta .get_rss = hclgevf_get_rss, 2214e2cb1decSSalil Mehta .set_rss = hclgevf_set_rss, 2215d97b3072SJian Shen .get_rss_tuple = hclgevf_get_rss_tuple, 2216d97b3072SJian Shen .set_rss_tuple = hclgevf_set_rss_tuple, 2217e2cb1decSSalil Mehta .get_tc_size = hclgevf_get_tc_size, 2218e2cb1decSSalil Mehta .get_fw_version = hclgevf_get_fw_version, 2219e2cb1decSSalil Mehta .set_vlan_filter = hclgevf_set_vlan_filter, 2220b2641e2aSYunsheng Lin .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 22216d4c3981SSalil Mehta .reset_event = hclgevf_reset_event, 2222720bd583SHuazhong Tan .set_default_reset_request = hclgevf_set_def_reset_request, 2223849e4607SPeng Li .get_channels = hclgevf_get_channels, 2224cc719218SPeng Li .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2225175ec96bSFuyun Liang .get_status = hclgevf_get_status, 22264a152de9SFuyun Liang .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2227c136b884SPeng Li .get_media_type = hclgevf_get_media_type, 2228e2cb1decSSalil Mehta }; 2229e2cb1decSSalil Mehta 2230e2cb1decSSalil Mehta static struct hnae3_ae_algo ae_algovf = { 2231e2cb1decSSalil Mehta .ops = &hclgevf_ops, 2232e2cb1decSSalil Mehta .pdev_id_table = ae_algovf_pci_tbl, 2233e2cb1decSSalil Mehta }; 2234e2cb1decSSalil Mehta 2235e2cb1decSSalil Mehta static int hclgevf_init(void) 2236e2cb1decSSalil Mehta { 2237e2cb1decSSalil Mehta pr_info("%s is initializing\n", HCLGEVF_NAME); 2238e2cb1decSSalil Mehta 2239854cf33aSFuyun Liang hnae3_register_ae_algo(&ae_algovf); 2240854cf33aSFuyun Liang 2241854cf33aSFuyun Liang return 0; 2242e2cb1decSSalil Mehta } 2243e2cb1decSSalil Mehta 2244e2cb1decSSalil Mehta static void hclgevf_exit(void) 2245e2cb1decSSalil Mehta { 2246e2cb1decSSalil Mehta hnae3_unregister_ae_algo(&ae_algovf); 2247e2cb1decSSalil Mehta } 2248e2cb1decSSalil Mehta module_init(hclgevf_init); 2249e2cb1decSSalil Mehta module_exit(hclgevf_exit); 2250e2cb1decSSalil Mehta 2251e2cb1decSSalil Mehta MODULE_LICENSE("GPL"); 2252e2cb1decSSalil Mehta MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2253e2cb1decSSalil Mehta MODULE_DESCRIPTION("HCLGEVF Driver"); 2254e2cb1decSSalil Mehta MODULE_VERSION(HCLGEVF_MOD_VERSION); 2255