1e2cb1decSSalil Mehta // SPDX-License-Identifier: GPL-2.0+ 2e2cb1decSSalil Mehta // Copyright (c) 2016-2017 Hisilicon Limited. 3e2cb1decSSalil Mehta 4e2cb1decSSalil Mehta #include <linux/etherdevice.h> 56988eb2aSSalil Mehta #include <net/rtnetlink.h> 6e2cb1decSSalil Mehta #include "hclgevf_cmd.h" 7e2cb1decSSalil Mehta #include "hclgevf_main.h" 8e2cb1decSSalil Mehta #include "hclge_mbx.h" 9e2cb1decSSalil Mehta #include "hnae3.h" 10e2cb1decSSalil Mehta 11e2cb1decSSalil Mehta #define HCLGEVF_NAME "hclgevf" 12e2cb1decSSalil Mehta 139c6f7085SHuazhong Tan static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 14e2cb1decSSalil Mehta static struct hnae3_ae_algo ae_algovf; 15e2cb1decSSalil Mehta 16e2cb1decSSalil Mehta static const struct pci_device_id ae_algovf_pci_tbl[] = { 17e2cb1decSSalil Mehta {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 18e2cb1decSSalil Mehta {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 19e2cb1decSSalil Mehta /* required last entry */ 20e2cb1decSSalil Mehta {0, } 21e2cb1decSSalil Mehta }; 22e2cb1decSSalil Mehta 232f550a46SYunsheng Lin MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 242f550a46SYunsheng Lin 25e2cb1decSSalil Mehta static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 26e2cb1decSSalil Mehta struct hnae3_handle *handle) 27e2cb1decSSalil Mehta { 28e2cb1decSSalil Mehta return container_of(handle, struct hclgevf_dev, nic); 29e2cb1decSSalil Mehta } 30e2cb1decSSalil Mehta 31e2cb1decSSalil Mehta static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 32e2cb1decSSalil Mehta { 33b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 34e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 35e2cb1decSSalil Mehta struct hclgevf_desc desc; 36e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 37e2cb1decSSalil Mehta int status; 38e2cb1decSSalil Mehta int i; 39e2cb1decSSalil Mehta 40b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 41b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 42e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, 43e2cb1decSSalil Mehta HCLGEVF_OPC_QUERY_RX_STATUS, 44e2cb1decSSalil Mehta true); 45e2cb1decSSalil Mehta 46e2cb1decSSalil Mehta desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 47e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 48e2cb1decSSalil Mehta if (status) { 49e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 50e2cb1decSSalil Mehta "Query tqp stat fail, status = %d,queue = %d\n", 51e2cb1decSSalil Mehta status, i); 52e2cb1decSSalil Mehta return status; 53e2cb1decSSalil Mehta } 54e2cb1decSSalil Mehta tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 55cf72fa63SJian Shen le32_to_cpu(desc.data[1]); 56e2cb1decSSalil Mehta 57e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 58e2cb1decSSalil Mehta true); 59e2cb1decSSalil Mehta 60e2cb1decSSalil Mehta desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 61e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 62e2cb1decSSalil Mehta if (status) { 63e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 64e2cb1decSSalil Mehta "Query tqp stat fail, status = %d,queue = %d\n", 65e2cb1decSSalil Mehta status, i); 66e2cb1decSSalil Mehta return status; 67e2cb1decSSalil Mehta } 68e2cb1decSSalil Mehta tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 69cf72fa63SJian Shen le32_to_cpu(desc.data[1]); 70e2cb1decSSalil Mehta } 71e2cb1decSSalil Mehta 72e2cb1decSSalil Mehta return 0; 73e2cb1decSSalil Mehta } 74e2cb1decSSalil Mehta 75e2cb1decSSalil Mehta static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 76e2cb1decSSalil Mehta { 77e2cb1decSSalil Mehta struct hnae3_knic_private_info *kinfo = &handle->kinfo; 78e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 79e2cb1decSSalil Mehta u64 *buff = data; 80e2cb1decSSalil Mehta int i; 81e2cb1decSSalil Mehta 82b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 83b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 84e2cb1decSSalil Mehta *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 85e2cb1decSSalil Mehta } 86e2cb1decSSalil Mehta for (i = 0; i < kinfo->num_tqps; i++) { 87b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 88e2cb1decSSalil Mehta *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 89e2cb1decSSalil Mehta } 90e2cb1decSSalil Mehta 91e2cb1decSSalil Mehta return buff; 92e2cb1decSSalil Mehta } 93e2cb1decSSalil Mehta 94e2cb1decSSalil Mehta static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 95e2cb1decSSalil Mehta { 96b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 97e2cb1decSSalil Mehta 98b4f1d303SJian Shen return kinfo->num_tqps * 2; 99e2cb1decSSalil Mehta } 100e2cb1decSSalil Mehta 101e2cb1decSSalil Mehta static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 102e2cb1decSSalil Mehta { 103b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 104e2cb1decSSalil Mehta u8 *buff = data; 105e2cb1decSSalil Mehta int i = 0; 106e2cb1decSSalil Mehta 107b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 108b4f1d303SJian Shen struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 109e2cb1decSSalil Mehta struct hclgevf_tqp, q); 1100c218123SJian Shen snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 111e2cb1decSSalil Mehta tqp->index); 112e2cb1decSSalil Mehta buff += ETH_GSTRING_LEN; 113e2cb1decSSalil Mehta } 114e2cb1decSSalil Mehta 115b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 116b4f1d303SJian Shen struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 117e2cb1decSSalil Mehta struct hclgevf_tqp, q); 1180c218123SJian Shen snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 119e2cb1decSSalil Mehta tqp->index); 120e2cb1decSSalil Mehta buff += ETH_GSTRING_LEN; 121e2cb1decSSalil Mehta } 122e2cb1decSSalil Mehta 123e2cb1decSSalil Mehta return buff; 124e2cb1decSSalil Mehta } 125e2cb1decSSalil Mehta 126e2cb1decSSalil Mehta static void hclgevf_update_stats(struct hnae3_handle *handle, 127e2cb1decSSalil Mehta struct net_device_stats *net_stats) 128e2cb1decSSalil Mehta { 129e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 130e2cb1decSSalil Mehta int status; 131e2cb1decSSalil Mehta 132e2cb1decSSalil Mehta status = hclgevf_tqps_update_stats(handle); 133e2cb1decSSalil Mehta if (status) 134e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 135e2cb1decSSalil Mehta "VF update of TQPS stats fail, status = %d.\n", 136e2cb1decSSalil Mehta status); 137e2cb1decSSalil Mehta } 138e2cb1decSSalil Mehta 139e2cb1decSSalil Mehta static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 140e2cb1decSSalil Mehta { 141e2cb1decSSalil Mehta if (strset == ETH_SS_TEST) 142e2cb1decSSalil Mehta return -EOPNOTSUPP; 143e2cb1decSSalil Mehta else if (strset == ETH_SS_STATS) 144e2cb1decSSalil Mehta return hclgevf_tqps_get_sset_count(handle, strset); 145e2cb1decSSalil Mehta 146e2cb1decSSalil Mehta return 0; 147e2cb1decSSalil Mehta } 148e2cb1decSSalil Mehta 149e2cb1decSSalil Mehta static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 150e2cb1decSSalil Mehta u8 *data) 151e2cb1decSSalil Mehta { 152e2cb1decSSalil Mehta u8 *p = (char *)data; 153e2cb1decSSalil Mehta 154e2cb1decSSalil Mehta if (strset == ETH_SS_STATS) 155e2cb1decSSalil Mehta p = hclgevf_tqps_get_strings(handle, p); 156e2cb1decSSalil Mehta } 157e2cb1decSSalil Mehta 158e2cb1decSSalil Mehta static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 159e2cb1decSSalil Mehta { 160e2cb1decSSalil Mehta hclgevf_tqps_get_stats(handle, data); 161e2cb1decSSalil Mehta } 162e2cb1decSSalil Mehta 163e2cb1decSSalil Mehta static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 164e2cb1decSSalil Mehta { 165e2cb1decSSalil Mehta u8 resp_msg; 166e2cb1decSSalil Mehta int status; 167e2cb1decSSalil Mehta 168e2cb1decSSalil Mehta status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 169e2cb1decSSalil Mehta true, &resp_msg, sizeof(u8)); 170e2cb1decSSalil Mehta if (status) { 171e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 172e2cb1decSSalil Mehta "VF request to get TC info from PF failed %d", 173e2cb1decSSalil Mehta status); 174e2cb1decSSalil Mehta return status; 175e2cb1decSSalil Mehta } 176e2cb1decSSalil Mehta 177e2cb1decSSalil Mehta hdev->hw_tc_map = resp_msg; 178e2cb1decSSalil Mehta 179e2cb1decSSalil Mehta return 0; 180e2cb1decSSalil Mehta } 181e2cb1decSSalil Mehta 1826cee6fc3SJian Shen static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 183e2cb1decSSalil Mehta { 184e2cb1decSSalil Mehta #define HCLGEVF_TQPS_RSS_INFO_LEN 8 185e2cb1decSSalil Mehta u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 186e2cb1decSSalil Mehta int status; 187e2cb1decSSalil Mehta 188e2cb1decSSalil Mehta status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 189e2cb1decSSalil Mehta true, resp_msg, 190e2cb1decSSalil Mehta HCLGEVF_TQPS_RSS_INFO_LEN); 191e2cb1decSSalil Mehta if (status) { 192e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 193e2cb1decSSalil Mehta "VF request to get tqp info from PF failed %d", 194e2cb1decSSalil Mehta status); 195e2cb1decSSalil Mehta return status; 196e2cb1decSSalil Mehta } 197e2cb1decSSalil Mehta 198e2cb1decSSalil Mehta memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 199e2cb1decSSalil Mehta memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 200e2cb1decSSalil Mehta memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 201e2cb1decSSalil Mehta memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 202e2cb1decSSalil Mehta 203e2cb1decSSalil Mehta return 0; 204e2cb1decSSalil Mehta } 205e2cb1decSSalil Mehta 206e2cb1decSSalil Mehta static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 207e2cb1decSSalil Mehta { 208e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 209e2cb1decSSalil Mehta int i; 210e2cb1decSSalil Mehta 211e2cb1decSSalil Mehta hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 212e2cb1decSSalil Mehta sizeof(struct hclgevf_tqp), GFP_KERNEL); 213e2cb1decSSalil Mehta if (!hdev->htqp) 214e2cb1decSSalil Mehta return -ENOMEM; 215e2cb1decSSalil Mehta 216e2cb1decSSalil Mehta tqp = hdev->htqp; 217e2cb1decSSalil Mehta 218e2cb1decSSalil Mehta for (i = 0; i < hdev->num_tqps; i++) { 219e2cb1decSSalil Mehta tqp->dev = &hdev->pdev->dev; 220e2cb1decSSalil Mehta tqp->index = i; 221e2cb1decSSalil Mehta 222e2cb1decSSalil Mehta tqp->q.ae_algo = &ae_algovf; 223e2cb1decSSalil Mehta tqp->q.buf_size = hdev->rx_buf_len; 224e2cb1decSSalil Mehta tqp->q.desc_num = hdev->num_desc; 225e2cb1decSSalil Mehta tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 226e2cb1decSSalil Mehta i * HCLGEVF_TQP_REG_SIZE; 227e2cb1decSSalil Mehta 228e2cb1decSSalil Mehta tqp++; 229e2cb1decSSalil Mehta } 230e2cb1decSSalil Mehta 231e2cb1decSSalil Mehta return 0; 232e2cb1decSSalil Mehta } 233e2cb1decSSalil Mehta 234e2cb1decSSalil Mehta static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 235e2cb1decSSalil Mehta { 236e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 237e2cb1decSSalil Mehta struct hnae3_knic_private_info *kinfo; 238e2cb1decSSalil Mehta u16 new_tqps = hdev->num_tqps; 239e2cb1decSSalil Mehta int i; 240e2cb1decSSalil Mehta 241e2cb1decSSalil Mehta kinfo = &nic->kinfo; 242e2cb1decSSalil Mehta kinfo->num_tc = 0; 243e2cb1decSSalil Mehta kinfo->num_desc = hdev->num_desc; 244e2cb1decSSalil Mehta kinfo->rx_buf_len = hdev->rx_buf_len; 245e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 246e2cb1decSSalil Mehta if (hdev->hw_tc_map & BIT(i)) 247e2cb1decSSalil Mehta kinfo->num_tc++; 248e2cb1decSSalil Mehta 249e2cb1decSSalil Mehta kinfo->rss_size 250e2cb1decSSalil Mehta = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 251e2cb1decSSalil Mehta new_tqps = kinfo->rss_size * kinfo->num_tc; 252e2cb1decSSalil Mehta kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 253e2cb1decSSalil Mehta 254e2cb1decSSalil Mehta kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 255e2cb1decSSalil Mehta sizeof(struct hnae3_queue *), GFP_KERNEL); 256e2cb1decSSalil Mehta if (!kinfo->tqp) 257e2cb1decSSalil Mehta return -ENOMEM; 258e2cb1decSSalil Mehta 259e2cb1decSSalil Mehta for (i = 0; i < kinfo->num_tqps; i++) { 260e2cb1decSSalil Mehta hdev->htqp[i].q.handle = &hdev->nic; 261e2cb1decSSalil Mehta hdev->htqp[i].q.tqp_index = i; 262e2cb1decSSalil Mehta kinfo->tqp[i] = &hdev->htqp[i].q; 263e2cb1decSSalil Mehta } 264e2cb1decSSalil Mehta 265e2cb1decSSalil Mehta return 0; 266e2cb1decSSalil Mehta } 267e2cb1decSSalil Mehta 268e2cb1decSSalil Mehta static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 269e2cb1decSSalil Mehta { 270e2cb1decSSalil Mehta int status; 271e2cb1decSSalil Mehta u8 resp_msg; 272e2cb1decSSalil Mehta 273e2cb1decSSalil Mehta status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 274e2cb1decSSalil Mehta 0, false, &resp_msg, sizeof(u8)); 275e2cb1decSSalil Mehta if (status) 276e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 277e2cb1decSSalil Mehta "VF failed to fetch link status(%d) from PF", status); 278e2cb1decSSalil Mehta } 279e2cb1decSSalil Mehta 280e2cb1decSSalil Mehta void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 281e2cb1decSSalil Mehta { 282e2cb1decSSalil Mehta struct hnae3_handle *handle = &hdev->nic; 283e2cb1decSSalil Mehta struct hnae3_client *client; 284e2cb1decSSalil Mehta 285e2cb1decSSalil Mehta client = handle->client; 286e2cb1decSSalil Mehta 287582d37bbSPeng Li link_state = 288582d37bbSPeng Li test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 289582d37bbSPeng Li 290e2cb1decSSalil Mehta if (link_state != hdev->hw.mac.link) { 291e2cb1decSSalil Mehta client->ops->link_status_change(handle, !!link_state); 292e2cb1decSSalil Mehta hdev->hw.mac.link = link_state; 293e2cb1decSSalil Mehta } 294e2cb1decSSalil Mehta } 295e2cb1decSSalil Mehta 296e2cb1decSSalil Mehta static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 297e2cb1decSSalil Mehta { 298e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 299e2cb1decSSalil Mehta int ret; 300e2cb1decSSalil Mehta 301e2cb1decSSalil Mehta nic->ae_algo = &ae_algovf; 302e2cb1decSSalil Mehta nic->pdev = hdev->pdev; 303e2cb1decSSalil Mehta nic->numa_node_mask = hdev->numa_node_mask; 304424eb834SSalil Mehta nic->flags |= HNAE3_SUPPORT_VF; 305e2cb1decSSalil Mehta 306e2cb1decSSalil Mehta if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 307e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 308e2cb1decSSalil Mehta hdev->ae_dev->dev_type); 309e2cb1decSSalil Mehta return -EINVAL; 310e2cb1decSSalil Mehta } 311e2cb1decSSalil Mehta 312e2cb1decSSalil Mehta ret = hclgevf_knic_setup(hdev); 313e2cb1decSSalil Mehta if (ret) 314e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 315e2cb1decSSalil Mehta ret); 316e2cb1decSSalil Mehta return ret; 317e2cb1decSSalil Mehta } 318e2cb1decSSalil Mehta 319e2cb1decSSalil Mehta static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 320e2cb1decSSalil Mehta { 32136cbbdf6SPeng Li if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 32236cbbdf6SPeng Li dev_warn(&hdev->pdev->dev, 32336cbbdf6SPeng Li "vector(vector_id %d) has been freed.\n", vector_id); 32436cbbdf6SPeng Li return; 32536cbbdf6SPeng Li } 32636cbbdf6SPeng Li 327e2cb1decSSalil Mehta hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 328e2cb1decSSalil Mehta hdev->num_msi_left += 1; 329e2cb1decSSalil Mehta hdev->num_msi_used -= 1; 330e2cb1decSSalil Mehta } 331e2cb1decSSalil Mehta 332e2cb1decSSalil Mehta static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 333e2cb1decSSalil Mehta struct hnae3_vector_info *vector_info) 334e2cb1decSSalil Mehta { 335e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 336e2cb1decSSalil Mehta struct hnae3_vector_info *vector = vector_info; 337e2cb1decSSalil Mehta int alloc = 0; 338e2cb1decSSalil Mehta int i, j; 339e2cb1decSSalil Mehta 340e2cb1decSSalil Mehta vector_num = min(hdev->num_msi_left, vector_num); 341e2cb1decSSalil Mehta 342e2cb1decSSalil Mehta for (j = 0; j < vector_num; j++) { 343e2cb1decSSalil Mehta for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 344e2cb1decSSalil Mehta if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 345e2cb1decSSalil Mehta vector->vector = pci_irq_vector(hdev->pdev, i); 346e2cb1decSSalil Mehta vector->io_addr = hdev->hw.io_base + 347e2cb1decSSalil Mehta HCLGEVF_VECTOR_REG_BASE + 348e2cb1decSSalil Mehta (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 349e2cb1decSSalil Mehta hdev->vector_status[i] = 0; 350e2cb1decSSalil Mehta hdev->vector_irq[i] = vector->vector; 351e2cb1decSSalil Mehta 352e2cb1decSSalil Mehta vector++; 353e2cb1decSSalil Mehta alloc++; 354e2cb1decSSalil Mehta 355e2cb1decSSalil Mehta break; 356e2cb1decSSalil Mehta } 357e2cb1decSSalil Mehta } 358e2cb1decSSalil Mehta } 359e2cb1decSSalil Mehta hdev->num_msi_left -= alloc; 360e2cb1decSSalil Mehta hdev->num_msi_used += alloc; 361e2cb1decSSalil Mehta 362e2cb1decSSalil Mehta return alloc; 363e2cb1decSSalil Mehta } 364e2cb1decSSalil Mehta 365e2cb1decSSalil Mehta static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 366e2cb1decSSalil Mehta { 367e2cb1decSSalil Mehta int i; 368e2cb1decSSalil Mehta 369e2cb1decSSalil Mehta for (i = 0; i < hdev->num_msi; i++) 370e2cb1decSSalil Mehta if (vector == hdev->vector_irq[i]) 371e2cb1decSSalil Mehta return i; 372e2cb1decSSalil Mehta 373e2cb1decSSalil Mehta return -EINVAL; 374e2cb1decSSalil Mehta } 375e2cb1decSSalil Mehta 376374ad291SJian Shen static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 377374ad291SJian Shen const u8 hfunc, const u8 *key) 378374ad291SJian Shen { 379374ad291SJian Shen struct hclgevf_rss_config_cmd *req; 380374ad291SJian Shen struct hclgevf_desc desc; 381374ad291SJian Shen int key_offset; 382374ad291SJian Shen int key_size; 383374ad291SJian Shen int ret; 384374ad291SJian Shen 385374ad291SJian Shen req = (struct hclgevf_rss_config_cmd *)desc.data; 386374ad291SJian Shen 387374ad291SJian Shen for (key_offset = 0; key_offset < 3; key_offset++) { 388374ad291SJian Shen hclgevf_cmd_setup_basic_desc(&desc, 389374ad291SJian Shen HCLGEVF_OPC_RSS_GENERIC_CONFIG, 390374ad291SJian Shen false); 391374ad291SJian Shen 392374ad291SJian Shen req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 393374ad291SJian Shen req->hash_config |= 394374ad291SJian Shen (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 395374ad291SJian Shen 396374ad291SJian Shen if (key_offset == 2) 397374ad291SJian Shen key_size = 398374ad291SJian Shen HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 399374ad291SJian Shen else 400374ad291SJian Shen key_size = HCLGEVF_RSS_HASH_KEY_NUM; 401374ad291SJian Shen 402374ad291SJian Shen memcpy(req->hash_key, 403374ad291SJian Shen key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 404374ad291SJian Shen 405374ad291SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 406374ad291SJian Shen if (ret) { 407374ad291SJian Shen dev_err(&hdev->pdev->dev, 408374ad291SJian Shen "Configure RSS config fail, status = %d\n", 409374ad291SJian Shen ret); 410374ad291SJian Shen return ret; 411374ad291SJian Shen } 412374ad291SJian Shen } 413374ad291SJian Shen 414374ad291SJian Shen return 0; 415374ad291SJian Shen } 416374ad291SJian Shen 417e2cb1decSSalil Mehta static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 418e2cb1decSSalil Mehta { 419e2cb1decSSalil Mehta return HCLGEVF_RSS_KEY_SIZE; 420e2cb1decSSalil Mehta } 421e2cb1decSSalil Mehta 422e2cb1decSSalil Mehta static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 423e2cb1decSSalil Mehta { 424e2cb1decSSalil Mehta return HCLGEVF_RSS_IND_TBL_SIZE; 425e2cb1decSSalil Mehta } 426e2cb1decSSalil Mehta 427e2cb1decSSalil Mehta static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 428e2cb1decSSalil Mehta { 429e2cb1decSSalil Mehta const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 430e2cb1decSSalil Mehta struct hclgevf_rss_indirection_table_cmd *req; 431e2cb1decSSalil Mehta struct hclgevf_desc desc; 432e2cb1decSSalil Mehta int status; 433e2cb1decSSalil Mehta int i, j; 434e2cb1decSSalil Mehta 435e2cb1decSSalil Mehta req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 436e2cb1decSSalil Mehta 437e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 438e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 439e2cb1decSSalil Mehta false); 440e2cb1decSSalil Mehta req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 441e2cb1decSSalil Mehta req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 442e2cb1decSSalil Mehta for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 443e2cb1decSSalil Mehta req->rss_result[j] = 444e2cb1decSSalil Mehta indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 445e2cb1decSSalil Mehta 446e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 447e2cb1decSSalil Mehta if (status) { 448e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 449e2cb1decSSalil Mehta "VF failed(=%d) to set RSS indirection table\n", 450e2cb1decSSalil Mehta status); 451e2cb1decSSalil Mehta return status; 452e2cb1decSSalil Mehta } 453e2cb1decSSalil Mehta } 454e2cb1decSSalil Mehta 455e2cb1decSSalil Mehta return 0; 456e2cb1decSSalil Mehta } 457e2cb1decSSalil Mehta 458e2cb1decSSalil Mehta static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 459e2cb1decSSalil Mehta { 460e2cb1decSSalil Mehta struct hclgevf_rss_tc_mode_cmd *req; 461e2cb1decSSalil Mehta u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 462e2cb1decSSalil Mehta u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 463e2cb1decSSalil Mehta u16 tc_size[HCLGEVF_MAX_TC_NUM]; 464e2cb1decSSalil Mehta struct hclgevf_desc desc; 465e2cb1decSSalil Mehta u16 roundup_size; 466e2cb1decSSalil Mehta int status; 467e2cb1decSSalil Mehta int i; 468e2cb1decSSalil Mehta 469e2cb1decSSalil Mehta req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 470e2cb1decSSalil Mehta 471e2cb1decSSalil Mehta roundup_size = roundup_pow_of_two(rss_size); 472e2cb1decSSalil Mehta roundup_size = ilog2(roundup_size); 473e2cb1decSSalil Mehta 474e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 475e2cb1decSSalil Mehta tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 476e2cb1decSSalil Mehta tc_size[i] = roundup_size; 477e2cb1decSSalil Mehta tc_offset[i] = rss_size * i; 478e2cb1decSSalil Mehta } 479e2cb1decSSalil Mehta 480e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 481e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 482e4e87715SPeng Li hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 483e2cb1decSSalil Mehta (tc_valid[i] & 0x1)); 484e4e87715SPeng Li hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 485e2cb1decSSalil Mehta HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 486e4e87715SPeng Li hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 487e2cb1decSSalil Mehta HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 488e2cb1decSSalil Mehta } 489e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 490e2cb1decSSalil Mehta if (status) 491e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 492e2cb1decSSalil Mehta "VF failed(=%d) to set rss tc mode\n", status); 493e2cb1decSSalil Mehta 494e2cb1decSSalil Mehta return status; 495e2cb1decSSalil Mehta } 496e2cb1decSSalil Mehta 497e2cb1decSSalil Mehta static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 498e2cb1decSSalil Mehta u8 *hfunc) 499e2cb1decSSalil Mehta { 500e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 501e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 502e2cb1decSSalil Mehta int i; 503e2cb1decSSalil Mehta 504374ad291SJian Shen if (handle->pdev->revision >= 0x21) { 505374ad291SJian Shen /* Get hash algorithm */ 506374ad291SJian Shen if (hfunc) { 507374ad291SJian Shen switch (rss_cfg->hash_algo) { 508374ad291SJian Shen case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 509374ad291SJian Shen *hfunc = ETH_RSS_HASH_TOP; 510374ad291SJian Shen break; 511374ad291SJian Shen case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 512374ad291SJian Shen *hfunc = ETH_RSS_HASH_XOR; 513374ad291SJian Shen break; 514374ad291SJian Shen default: 515374ad291SJian Shen *hfunc = ETH_RSS_HASH_UNKNOWN; 516374ad291SJian Shen break; 517374ad291SJian Shen } 518374ad291SJian Shen } 519374ad291SJian Shen 520374ad291SJian Shen /* Get the RSS Key required by the user */ 521374ad291SJian Shen if (key) 522374ad291SJian Shen memcpy(key, rss_cfg->rss_hash_key, 523374ad291SJian Shen HCLGEVF_RSS_KEY_SIZE); 524374ad291SJian Shen } 525374ad291SJian Shen 526e2cb1decSSalil Mehta if (indir) 527e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 528e2cb1decSSalil Mehta indir[i] = rss_cfg->rss_indirection_tbl[i]; 529e2cb1decSSalil Mehta 530374ad291SJian Shen return 0; 531e2cb1decSSalil Mehta } 532e2cb1decSSalil Mehta 533e2cb1decSSalil Mehta static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 534e2cb1decSSalil Mehta const u8 *key, const u8 hfunc) 535e2cb1decSSalil Mehta { 536e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 537e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 538374ad291SJian Shen int ret, i; 539374ad291SJian Shen 540374ad291SJian Shen if (handle->pdev->revision >= 0x21) { 541374ad291SJian Shen /* Set the RSS Hash Key if specififed by the user */ 542374ad291SJian Shen if (key) { 543374ad291SJian Shen switch (hfunc) { 544374ad291SJian Shen case ETH_RSS_HASH_TOP: 545374ad291SJian Shen rss_cfg->hash_algo = 546374ad291SJian Shen HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 547374ad291SJian Shen break; 548374ad291SJian Shen case ETH_RSS_HASH_XOR: 549374ad291SJian Shen rss_cfg->hash_algo = 550374ad291SJian Shen HCLGEVF_RSS_HASH_ALGO_SIMPLE; 551374ad291SJian Shen break; 552374ad291SJian Shen case ETH_RSS_HASH_NO_CHANGE: 553374ad291SJian Shen break; 554374ad291SJian Shen default: 555374ad291SJian Shen return -EINVAL; 556374ad291SJian Shen } 557374ad291SJian Shen 558374ad291SJian Shen ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 559374ad291SJian Shen key); 560374ad291SJian Shen if (ret) 561374ad291SJian Shen return ret; 562374ad291SJian Shen 563374ad291SJian Shen /* Update the shadow RSS key with user specified qids */ 564374ad291SJian Shen memcpy(rss_cfg->rss_hash_key, key, 565374ad291SJian Shen HCLGEVF_RSS_KEY_SIZE); 566374ad291SJian Shen } 567374ad291SJian Shen } 568e2cb1decSSalil Mehta 569e2cb1decSSalil Mehta /* update the shadow RSS table with user specified qids */ 570e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 571e2cb1decSSalil Mehta rss_cfg->rss_indirection_tbl[i] = indir[i]; 572e2cb1decSSalil Mehta 573e2cb1decSSalil Mehta /* update the hardware */ 574e2cb1decSSalil Mehta return hclgevf_set_rss_indir_table(hdev); 575e2cb1decSSalil Mehta } 576e2cb1decSSalil Mehta 577d97b3072SJian Shen static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 578d97b3072SJian Shen { 579d97b3072SJian Shen u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 580d97b3072SJian Shen 581d97b3072SJian Shen if (nfc->data & RXH_L4_B_2_3) 582d97b3072SJian Shen hash_sets |= HCLGEVF_D_PORT_BIT; 583d97b3072SJian Shen else 584d97b3072SJian Shen hash_sets &= ~HCLGEVF_D_PORT_BIT; 585d97b3072SJian Shen 586d97b3072SJian Shen if (nfc->data & RXH_IP_SRC) 587d97b3072SJian Shen hash_sets |= HCLGEVF_S_IP_BIT; 588d97b3072SJian Shen else 589d97b3072SJian Shen hash_sets &= ~HCLGEVF_S_IP_BIT; 590d97b3072SJian Shen 591d97b3072SJian Shen if (nfc->data & RXH_IP_DST) 592d97b3072SJian Shen hash_sets |= HCLGEVF_D_IP_BIT; 593d97b3072SJian Shen else 594d97b3072SJian Shen hash_sets &= ~HCLGEVF_D_IP_BIT; 595d97b3072SJian Shen 596d97b3072SJian Shen if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 597d97b3072SJian Shen hash_sets |= HCLGEVF_V_TAG_BIT; 598d97b3072SJian Shen 599d97b3072SJian Shen return hash_sets; 600d97b3072SJian Shen } 601d97b3072SJian Shen 602d97b3072SJian Shen static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 603d97b3072SJian Shen struct ethtool_rxnfc *nfc) 604d97b3072SJian Shen { 605d97b3072SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 606d97b3072SJian Shen struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 607d97b3072SJian Shen struct hclgevf_rss_input_tuple_cmd *req; 608d97b3072SJian Shen struct hclgevf_desc desc; 609d97b3072SJian Shen u8 tuple_sets; 610d97b3072SJian Shen int ret; 611d97b3072SJian Shen 612d97b3072SJian Shen if (handle->pdev->revision == 0x20) 613d97b3072SJian Shen return -EOPNOTSUPP; 614d97b3072SJian Shen 615d97b3072SJian Shen if (nfc->data & 616d97b3072SJian Shen ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 617d97b3072SJian Shen return -EINVAL; 618d97b3072SJian Shen 619d97b3072SJian Shen req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 620d97b3072SJian Shen hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 621d97b3072SJian Shen 622d97b3072SJian Shen req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 623d97b3072SJian Shen req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 624d97b3072SJian Shen req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 625d97b3072SJian Shen req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 626d97b3072SJian Shen req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 627d97b3072SJian Shen req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 628d97b3072SJian Shen req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 629d97b3072SJian Shen req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 630d97b3072SJian Shen 631d97b3072SJian Shen tuple_sets = hclgevf_get_rss_hash_bits(nfc); 632d97b3072SJian Shen switch (nfc->flow_type) { 633d97b3072SJian Shen case TCP_V4_FLOW: 634d97b3072SJian Shen req->ipv4_tcp_en = tuple_sets; 635d97b3072SJian Shen break; 636d97b3072SJian Shen case TCP_V6_FLOW: 637d97b3072SJian Shen req->ipv6_tcp_en = tuple_sets; 638d97b3072SJian Shen break; 639d97b3072SJian Shen case UDP_V4_FLOW: 640d97b3072SJian Shen req->ipv4_udp_en = tuple_sets; 641d97b3072SJian Shen break; 642d97b3072SJian Shen case UDP_V6_FLOW: 643d97b3072SJian Shen req->ipv6_udp_en = tuple_sets; 644d97b3072SJian Shen break; 645d97b3072SJian Shen case SCTP_V4_FLOW: 646d97b3072SJian Shen req->ipv4_sctp_en = tuple_sets; 647d97b3072SJian Shen break; 648d97b3072SJian Shen case SCTP_V6_FLOW: 649d97b3072SJian Shen if ((nfc->data & RXH_L4_B_0_1) || 650d97b3072SJian Shen (nfc->data & RXH_L4_B_2_3)) 651d97b3072SJian Shen return -EINVAL; 652d97b3072SJian Shen 653d97b3072SJian Shen req->ipv6_sctp_en = tuple_sets; 654d97b3072SJian Shen break; 655d97b3072SJian Shen case IPV4_FLOW: 656d97b3072SJian Shen req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 657d97b3072SJian Shen break; 658d97b3072SJian Shen case IPV6_FLOW: 659d97b3072SJian Shen req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 660d97b3072SJian Shen break; 661d97b3072SJian Shen default: 662d97b3072SJian Shen return -EINVAL; 663d97b3072SJian Shen } 664d97b3072SJian Shen 665d97b3072SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 666d97b3072SJian Shen if (ret) { 667d97b3072SJian Shen dev_err(&hdev->pdev->dev, 668d97b3072SJian Shen "Set rss tuple fail, status = %d\n", ret); 669d97b3072SJian Shen return ret; 670d97b3072SJian Shen } 671d97b3072SJian Shen 672d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 673d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 674d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 675d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 676d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 677d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 678d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 679d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 680d97b3072SJian Shen return 0; 681d97b3072SJian Shen } 682d97b3072SJian Shen 683d97b3072SJian Shen static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 684d97b3072SJian Shen struct ethtool_rxnfc *nfc) 685d97b3072SJian Shen { 686d97b3072SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 687d97b3072SJian Shen struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 688d97b3072SJian Shen u8 tuple_sets; 689d97b3072SJian Shen 690d97b3072SJian Shen if (handle->pdev->revision == 0x20) 691d97b3072SJian Shen return -EOPNOTSUPP; 692d97b3072SJian Shen 693d97b3072SJian Shen nfc->data = 0; 694d97b3072SJian Shen 695d97b3072SJian Shen switch (nfc->flow_type) { 696d97b3072SJian Shen case TCP_V4_FLOW: 697d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 698d97b3072SJian Shen break; 699d97b3072SJian Shen case UDP_V4_FLOW: 700d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 701d97b3072SJian Shen break; 702d97b3072SJian Shen case TCP_V6_FLOW: 703d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 704d97b3072SJian Shen break; 705d97b3072SJian Shen case UDP_V6_FLOW: 706d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 707d97b3072SJian Shen break; 708d97b3072SJian Shen case SCTP_V4_FLOW: 709d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 710d97b3072SJian Shen break; 711d97b3072SJian Shen case SCTP_V6_FLOW: 712d97b3072SJian Shen tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 713d97b3072SJian Shen break; 714d97b3072SJian Shen case IPV4_FLOW: 715d97b3072SJian Shen case IPV6_FLOW: 716d97b3072SJian Shen tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 717d97b3072SJian Shen break; 718d97b3072SJian Shen default: 719d97b3072SJian Shen return -EINVAL; 720d97b3072SJian Shen } 721d97b3072SJian Shen 722d97b3072SJian Shen if (!tuple_sets) 723d97b3072SJian Shen return 0; 724d97b3072SJian Shen 725d97b3072SJian Shen if (tuple_sets & HCLGEVF_D_PORT_BIT) 726d97b3072SJian Shen nfc->data |= RXH_L4_B_2_3; 727d97b3072SJian Shen if (tuple_sets & HCLGEVF_S_PORT_BIT) 728d97b3072SJian Shen nfc->data |= RXH_L4_B_0_1; 729d97b3072SJian Shen if (tuple_sets & HCLGEVF_D_IP_BIT) 730d97b3072SJian Shen nfc->data |= RXH_IP_DST; 731d97b3072SJian Shen if (tuple_sets & HCLGEVF_S_IP_BIT) 732d97b3072SJian Shen nfc->data |= RXH_IP_SRC; 733d97b3072SJian Shen 734d97b3072SJian Shen return 0; 735d97b3072SJian Shen } 736d97b3072SJian Shen 737d97b3072SJian Shen static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 738d97b3072SJian Shen struct hclgevf_rss_cfg *rss_cfg) 739d97b3072SJian Shen { 740d97b3072SJian Shen struct hclgevf_rss_input_tuple_cmd *req; 741d97b3072SJian Shen struct hclgevf_desc desc; 742d97b3072SJian Shen int ret; 743d97b3072SJian Shen 744d97b3072SJian Shen hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 745d97b3072SJian Shen 746d97b3072SJian Shen req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 747d97b3072SJian Shen 748d97b3072SJian Shen req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 749d97b3072SJian Shen req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 750d97b3072SJian Shen req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 751d97b3072SJian Shen req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 752d97b3072SJian Shen req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 753d97b3072SJian Shen req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 754d97b3072SJian Shen req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 755d97b3072SJian Shen req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 756d97b3072SJian Shen 757d97b3072SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 758d97b3072SJian Shen if (ret) 759d97b3072SJian Shen dev_err(&hdev->pdev->dev, 760d97b3072SJian Shen "Configure rss input fail, status = %d\n", ret); 761d97b3072SJian Shen return ret; 762d97b3072SJian Shen } 763d97b3072SJian Shen 764e2cb1decSSalil Mehta static int hclgevf_get_tc_size(struct hnae3_handle *handle) 765e2cb1decSSalil Mehta { 766e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 767e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 768e2cb1decSSalil Mehta 769e2cb1decSSalil Mehta return rss_cfg->rss_size; 770e2cb1decSSalil Mehta } 771e2cb1decSSalil Mehta 772e2cb1decSSalil Mehta static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 773b204bc74SPeng Li int vector_id, 774e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 775e2cb1decSSalil Mehta { 776e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 777e2cb1decSSalil Mehta struct hnae3_ring_chain_node *node; 778e2cb1decSSalil Mehta struct hclge_mbx_vf_to_pf_cmd *req; 779e2cb1decSSalil Mehta struct hclgevf_desc desc; 780b204bc74SPeng Li int i = 0; 781e2cb1decSSalil Mehta int status; 782e2cb1decSSalil Mehta u8 type; 783e2cb1decSSalil Mehta 784e2cb1decSSalil Mehta req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 785e2cb1decSSalil Mehta 786e2cb1decSSalil Mehta for (node = ring_chain; node; node = node->next) { 7875d02a58dSYunsheng Lin int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 7885d02a58dSYunsheng Lin HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 7895d02a58dSYunsheng Lin 7905d02a58dSYunsheng Lin if (i == 0) { 7915d02a58dSYunsheng Lin hclgevf_cmd_setup_basic_desc(&desc, 7925d02a58dSYunsheng Lin HCLGEVF_OPC_MBX_VF_TO_PF, 7935d02a58dSYunsheng Lin false); 7945d02a58dSYunsheng Lin type = en ? 7955d02a58dSYunsheng Lin HCLGE_MBX_MAP_RING_TO_VECTOR : 7965d02a58dSYunsheng Lin HCLGE_MBX_UNMAP_RING_TO_VECTOR; 7975d02a58dSYunsheng Lin req->msg[0] = type; 7985d02a58dSYunsheng Lin req->msg[1] = vector_id; 7995d02a58dSYunsheng Lin } 8005d02a58dSYunsheng Lin 8015d02a58dSYunsheng Lin req->msg[idx_offset] = 802e4e87715SPeng Li hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 8035d02a58dSYunsheng Lin req->msg[idx_offset + 1] = node->tqp_index; 804e4e87715SPeng Li req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 80579eee410SFuyun Liang HNAE3_RING_GL_IDX_M, 80679eee410SFuyun Liang HNAE3_RING_GL_IDX_S); 80779eee410SFuyun Liang 8085d02a58dSYunsheng Lin i++; 8095d02a58dSYunsheng Lin if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 8105d02a58dSYunsheng Lin HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 8115d02a58dSYunsheng Lin HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 8125d02a58dSYunsheng Lin !node->next) { 813e2cb1decSSalil Mehta req->msg[2] = i; 814e2cb1decSSalil Mehta 815e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 816e2cb1decSSalil Mehta if (status) { 817e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 818e2cb1decSSalil Mehta "Map TQP fail, status is %d.\n", 819e2cb1decSSalil Mehta status); 820e2cb1decSSalil Mehta return status; 821e2cb1decSSalil Mehta } 822e2cb1decSSalil Mehta i = 0; 823e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, 824e2cb1decSSalil Mehta HCLGEVF_OPC_MBX_VF_TO_PF, 825e2cb1decSSalil Mehta false); 826e2cb1decSSalil Mehta req->msg[0] = type; 827e2cb1decSSalil Mehta req->msg[1] = vector_id; 828e2cb1decSSalil Mehta } 829e2cb1decSSalil Mehta } 830e2cb1decSSalil Mehta 831e2cb1decSSalil Mehta return 0; 832e2cb1decSSalil Mehta } 833e2cb1decSSalil Mehta 834e2cb1decSSalil Mehta static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 835e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 836e2cb1decSSalil Mehta { 837b204bc74SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 838b204bc74SPeng Li int vector_id; 839b204bc74SPeng Li 840b204bc74SPeng Li vector_id = hclgevf_get_vector_index(hdev, vector); 841b204bc74SPeng Li if (vector_id < 0) { 842b204bc74SPeng Li dev_err(&handle->pdev->dev, 843b204bc74SPeng Li "Get vector index fail. ret =%d\n", vector_id); 844b204bc74SPeng Li return vector_id; 845b204bc74SPeng Li } 846b204bc74SPeng Li 847b204bc74SPeng Li return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 848e2cb1decSSalil Mehta } 849e2cb1decSSalil Mehta 850e2cb1decSSalil Mehta static int hclgevf_unmap_ring_from_vector( 851e2cb1decSSalil Mehta struct hnae3_handle *handle, 852e2cb1decSSalil Mehta int vector, 853e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 854e2cb1decSSalil Mehta { 855e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 856e2cb1decSSalil Mehta int ret, vector_id; 857e2cb1decSSalil Mehta 858e2cb1decSSalil Mehta vector_id = hclgevf_get_vector_index(hdev, vector); 859e2cb1decSSalil Mehta if (vector_id < 0) { 860e2cb1decSSalil Mehta dev_err(&handle->pdev->dev, 861e2cb1decSSalil Mehta "Get vector index fail. ret =%d\n", vector_id); 862e2cb1decSSalil Mehta return vector_id; 863e2cb1decSSalil Mehta } 864e2cb1decSSalil Mehta 865b204bc74SPeng Li ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 8660d3e6631SYunsheng Lin if (ret) 867e2cb1decSSalil Mehta dev_err(&handle->pdev->dev, 868e2cb1decSSalil Mehta "Unmap ring from vector fail. vector=%d, ret =%d\n", 869e2cb1decSSalil Mehta vector_id, 870e2cb1decSSalil Mehta ret); 8710d3e6631SYunsheng Lin 872e2cb1decSSalil Mehta return ret; 873e2cb1decSSalil Mehta } 874e2cb1decSSalil Mehta 8750d3e6631SYunsheng Lin static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 8760d3e6631SYunsheng Lin { 8770d3e6631SYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 87803718db9SYunsheng Lin int vector_id; 8790d3e6631SYunsheng Lin 88003718db9SYunsheng Lin vector_id = hclgevf_get_vector_index(hdev, vector); 88103718db9SYunsheng Lin if (vector_id < 0) { 88203718db9SYunsheng Lin dev_err(&handle->pdev->dev, 88303718db9SYunsheng Lin "hclgevf_put_vector get vector index fail. ret =%d\n", 88403718db9SYunsheng Lin vector_id); 88503718db9SYunsheng Lin return vector_id; 88603718db9SYunsheng Lin } 88703718db9SYunsheng Lin 88803718db9SYunsheng Lin hclgevf_free_vector(hdev, vector_id); 889e2cb1decSSalil Mehta 890e2cb1decSSalil Mehta return 0; 891e2cb1decSSalil Mehta } 892e2cb1decSSalil Mehta 8933b75c3dfSPeng Li static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 8943b75c3dfSPeng Li bool en_uc_pmc, bool en_mc_pmc) 895e2cb1decSSalil Mehta { 896e2cb1decSSalil Mehta struct hclge_mbx_vf_to_pf_cmd *req; 897e2cb1decSSalil Mehta struct hclgevf_desc desc; 898e2cb1decSSalil Mehta int status; 899e2cb1decSSalil Mehta 900e2cb1decSSalil Mehta req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 901e2cb1decSSalil Mehta 902e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 903e2cb1decSSalil Mehta req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 9043b75c3dfSPeng Li req->msg[1] = en_uc_pmc ? 1 : 0; 9053b75c3dfSPeng Li req->msg[2] = en_mc_pmc ? 1 : 0; 906e2cb1decSSalil Mehta 907e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 908e2cb1decSSalil Mehta if (status) 909e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 910e2cb1decSSalil Mehta "Set promisc mode fail, status is %d.\n", status); 911e2cb1decSSalil Mehta 912e2cb1decSSalil Mehta return status; 913e2cb1decSSalil Mehta } 914e2cb1decSSalil Mehta 9157fa6be4fSHuazhong Tan static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, 9163b75c3dfSPeng Li bool en_uc_pmc, bool en_mc_pmc) 917e2cb1decSSalil Mehta { 918e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 919e2cb1decSSalil Mehta 9207fa6be4fSHuazhong Tan return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); 921e2cb1decSSalil Mehta } 922e2cb1decSSalil Mehta 923e2cb1decSSalil Mehta static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 924e2cb1decSSalil Mehta int stream_id, bool enable) 925e2cb1decSSalil Mehta { 926e2cb1decSSalil Mehta struct hclgevf_cfg_com_tqp_queue_cmd *req; 927e2cb1decSSalil Mehta struct hclgevf_desc desc; 928e2cb1decSSalil Mehta int status; 929e2cb1decSSalil Mehta 930e2cb1decSSalil Mehta req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 931e2cb1decSSalil Mehta 932e2cb1decSSalil Mehta hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 933e2cb1decSSalil Mehta false); 934e2cb1decSSalil Mehta req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 935e2cb1decSSalil Mehta req->stream_id = cpu_to_le16(stream_id); 936e2cb1decSSalil Mehta req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 937e2cb1decSSalil Mehta 938e2cb1decSSalil Mehta status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 939e2cb1decSSalil Mehta if (status) 940e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 941e2cb1decSSalil Mehta "TQP enable fail, status =%d.\n", status); 942e2cb1decSSalil Mehta 943e2cb1decSSalil Mehta return status; 944e2cb1decSSalil Mehta } 945e2cb1decSSalil Mehta 946e2cb1decSSalil Mehta static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 947e2cb1decSSalil Mehta { 948b4f1d303SJian Shen struct hnae3_knic_private_info *kinfo = &handle->kinfo; 949e2cb1decSSalil Mehta struct hclgevf_tqp *tqp; 950e2cb1decSSalil Mehta int i; 951e2cb1decSSalil Mehta 952b4f1d303SJian Shen for (i = 0; i < kinfo->num_tqps; i++) { 953b4f1d303SJian Shen tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 954e2cb1decSSalil Mehta memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 955e2cb1decSSalil Mehta } 956e2cb1decSSalil Mehta } 957e2cb1decSSalil Mehta 958e2cb1decSSalil Mehta static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 959e2cb1decSSalil Mehta { 960e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 961e2cb1decSSalil Mehta 962e2cb1decSSalil Mehta ether_addr_copy(p, hdev->hw.mac.mac_addr); 963e2cb1decSSalil Mehta } 964e2cb1decSSalil Mehta 96559098055SFuyun Liang static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 96659098055SFuyun Liang bool is_first) 967e2cb1decSSalil Mehta { 968e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 969e2cb1decSSalil Mehta u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 970e2cb1decSSalil Mehta u8 *new_mac_addr = (u8 *)p; 971e2cb1decSSalil Mehta u8 msg_data[ETH_ALEN * 2]; 97259098055SFuyun Liang u16 subcode; 973e2cb1decSSalil Mehta int status; 974e2cb1decSSalil Mehta 975e2cb1decSSalil Mehta ether_addr_copy(msg_data, new_mac_addr); 976e2cb1decSSalil Mehta ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 977e2cb1decSSalil Mehta 97859098055SFuyun Liang subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 97959098055SFuyun Liang HCLGE_MBX_MAC_VLAN_UC_MODIFY; 98059098055SFuyun Liang 981e2cb1decSSalil Mehta status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 98259098055SFuyun Liang subcode, msg_data, ETH_ALEN * 2, 9832097fdefSJian Shen true, NULL, 0); 984e2cb1decSSalil Mehta if (!status) 985e2cb1decSSalil Mehta ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 986e2cb1decSSalil Mehta 987e2cb1decSSalil Mehta return status; 988e2cb1decSSalil Mehta } 989e2cb1decSSalil Mehta 990e2cb1decSSalil Mehta static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 991e2cb1decSSalil Mehta const unsigned char *addr) 992e2cb1decSSalil Mehta { 993e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 994e2cb1decSSalil Mehta 995e2cb1decSSalil Mehta return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 996e2cb1decSSalil Mehta HCLGE_MBX_MAC_VLAN_UC_ADD, 997e2cb1decSSalil Mehta addr, ETH_ALEN, false, NULL, 0); 998e2cb1decSSalil Mehta } 999e2cb1decSSalil Mehta 1000e2cb1decSSalil Mehta static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1001e2cb1decSSalil Mehta const unsigned char *addr) 1002e2cb1decSSalil Mehta { 1003e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1004e2cb1decSSalil Mehta 1005e2cb1decSSalil Mehta return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1006e2cb1decSSalil Mehta HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1007e2cb1decSSalil Mehta addr, ETH_ALEN, false, NULL, 0); 1008e2cb1decSSalil Mehta } 1009e2cb1decSSalil Mehta 1010e2cb1decSSalil Mehta static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1011e2cb1decSSalil Mehta const unsigned char *addr) 1012e2cb1decSSalil Mehta { 1013e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1014e2cb1decSSalil Mehta 1015e2cb1decSSalil Mehta return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1016e2cb1decSSalil Mehta HCLGE_MBX_MAC_VLAN_MC_ADD, 1017e2cb1decSSalil Mehta addr, ETH_ALEN, false, NULL, 0); 1018e2cb1decSSalil Mehta } 1019e2cb1decSSalil Mehta 1020e2cb1decSSalil Mehta static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1021e2cb1decSSalil Mehta const unsigned char *addr) 1022e2cb1decSSalil Mehta { 1023e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1024e2cb1decSSalil Mehta 1025e2cb1decSSalil Mehta return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1026e2cb1decSSalil Mehta HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1027e2cb1decSSalil Mehta addr, ETH_ALEN, false, NULL, 0); 1028e2cb1decSSalil Mehta } 1029e2cb1decSSalil Mehta 1030e2cb1decSSalil Mehta static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1031e2cb1decSSalil Mehta __be16 proto, u16 vlan_id, 1032e2cb1decSSalil Mehta bool is_kill) 1033e2cb1decSSalil Mehta { 1034e2cb1decSSalil Mehta #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1035e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1036e2cb1decSSalil Mehta u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1037e2cb1decSSalil Mehta 1038e2cb1decSSalil Mehta if (vlan_id > 4095) 1039e2cb1decSSalil Mehta return -EINVAL; 1040e2cb1decSSalil Mehta 1041e2cb1decSSalil Mehta if (proto != htons(ETH_P_8021Q)) 1042e2cb1decSSalil Mehta return -EPROTONOSUPPORT; 1043e2cb1decSSalil Mehta 1044e2cb1decSSalil Mehta msg_data[0] = is_kill; 1045e2cb1decSSalil Mehta memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1046e2cb1decSSalil Mehta memcpy(&msg_data[3], &proto, sizeof(proto)); 1047e2cb1decSSalil Mehta return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1048e2cb1decSSalil Mehta HCLGE_MBX_VLAN_FILTER, msg_data, 1049e2cb1decSSalil Mehta HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 1050e2cb1decSSalil Mehta } 1051e2cb1decSSalil Mehta 1052b2641e2aSYunsheng Lin static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1053b2641e2aSYunsheng Lin { 1054b2641e2aSYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1055b2641e2aSYunsheng Lin u8 msg_data; 1056b2641e2aSYunsheng Lin 1057b2641e2aSYunsheng Lin msg_data = enable ? 1 : 0; 1058b2641e2aSYunsheng Lin return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1059b2641e2aSYunsheng Lin HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1060b2641e2aSYunsheng Lin 1, false, NULL, 0); 1061b2641e2aSYunsheng Lin } 1062b2641e2aSYunsheng Lin 10637fa6be4fSHuazhong Tan static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1064e2cb1decSSalil Mehta { 1065e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1066e2cb1decSSalil Mehta u8 msg_data[2]; 10671a426f8bSPeng Li int ret; 1068e2cb1decSSalil Mehta 1069e2cb1decSSalil Mehta memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 1070e2cb1decSSalil Mehta 10711a426f8bSPeng Li /* disable vf queue before send queue reset msg to PF */ 10721a426f8bSPeng Li ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 10731a426f8bSPeng Li if (ret) 10747fa6be4fSHuazhong Tan return ret; 10751a426f8bSPeng Li 10767fa6be4fSHuazhong Tan return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 10771a426f8bSPeng Li 2, true, NULL, 0); 1078e2cb1decSSalil Mehta } 1079e2cb1decSSalil Mehta 10806988eb2aSSalil Mehta static int hclgevf_notify_client(struct hclgevf_dev *hdev, 10816988eb2aSSalil Mehta enum hnae3_reset_notify_type type) 10826988eb2aSSalil Mehta { 10836988eb2aSSalil Mehta struct hnae3_client *client = hdev->nic_client; 10846988eb2aSSalil Mehta struct hnae3_handle *handle = &hdev->nic; 10856988eb2aSSalil Mehta 10866988eb2aSSalil Mehta if (!client->ops->reset_notify) 10876988eb2aSSalil Mehta return -EOPNOTSUPP; 10886988eb2aSSalil Mehta 10896988eb2aSSalil Mehta return client->ops->reset_notify(handle, type); 10906988eb2aSSalil Mehta } 10916988eb2aSSalil Mehta 10926988eb2aSSalil Mehta static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 10936988eb2aSSalil Mehta { 10946988eb2aSSalil Mehta #define HCLGEVF_RESET_WAIT_MS 500 10956988eb2aSSalil Mehta #define HCLGEVF_RESET_WAIT_CNT 20 10966988eb2aSSalil Mehta u32 val, cnt = 0; 10976988eb2aSSalil Mehta 10986988eb2aSSalil Mehta /* wait to check the hardware reset completion status */ 10996988eb2aSSalil Mehta val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 1100e4e87715SPeng Li while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) && 11016988eb2aSSalil Mehta (cnt < HCLGEVF_RESET_WAIT_CNT)) { 11026988eb2aSSalil Mehta msleep(HCLGEVF_RESET_WAIT_MS); 11036988eb2aSSalil Mehta val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 11046988eb2aSSalil Mehta cnt++; 11056988eb2aSSalil Mehta } 11066988eb2aSSalil Mehta 11076988eb2aSSalil Mehta /* hardware completion status should be available by this time */ 11086988eb2aSSalil Mehta if (cnt >= HCLGEVF_RESET_WAIT_CNT) { 11096988eb2aSSalil Mehta dev_warn(&hdev->pdev->dev, 11106988eb2aSSalil Mehta "could'nt get reset done status from h/w, timeout!\n"); 11116988eb2aSSalil Mehta return -EBUSY; 11126988eb2aSSalil Mehta } 11136988eb2aSSalil Mehta 11146988eb2aSSalil Mehta /* we will wait a bit more to let reset of the stack to complete. This 11156988eb2aSSalil Mehta * might happen in case reset assertion was made by PF. Yes, this also 11166988eb2aSSalil Mehta * means we might end up waiting bit more even for VF reset. 11176988eb2aSSalil Mehta */ 11186988eb2aSSalil Mehta msleep(5000); 11196988eb2aSSalil Mehta 11206988eb2aSSalil Mehta return 0; 11216988eb2aSSalil Mehta } 11226988eb2aSSalil Mehta 11236988eb2aSSalil Mehta static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 11246988eb2aSSalil Mehta { 11257a01c897SSalil Mehta int ret; 11267a01c897SSalil Mehta 11276988eb2aSSalil Mehta /* uninitialize the nic client */ 11286988eb2aSSalil Mehta hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 11296988eb2aSSalil Mehta 11307a01c897SSalil Mehta /* re-initialize the hclge device */ 11319c6f7085SHuazhong Tan ret = hclgevf_reset_hdev(hdev); 11327a01c897SSalil Mehta if (ret) { 11337a01c897SSalil Mehta dev_err(&hdev->pdev->dev, 11347a01c897SSalil Mehta "hclge device re-init failed, VF is disabled!\n"); 11357a01c897SSalil Mehta return ret; 11367a01c897SSalil Mehta } 11376988eb2aSSalil Mehta 11386988eb2aSSalil Mehta /* bring up the nic client again */ 11396988eb2aSSalil Mehta hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 11406988eb2aSSalil Mehta 11416988eb2aSSalil Mehta return 0; 11426988eb2aSSalil Mehta } 11436988eb2aSSalil Mehta 11446988eb2aSSalil Mehta static int hclgevf_reset(struct hclgevf_dev *hdev) 11456988eb2aSSalil Mehta { 11466988eb2aSSalil Mehta int ret; 11476988eb2aSSalil Mehta 11484d60291bSHuazhong Tan hdev->reset_count++; 11496988eb2aSSalil Mehta rtnl_lock(); 11506988eb2aSSalil Mehta 11516988eb2aSSalil Mehta /* bring down the nic to stop any ongoing TX/RX */ 11526988eb2aSSalil Mehta hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 11536988eb2aSSalil Mehta 115429118ab9SHuazhong Tan rtnl_unlock(); 115529118ab9SHuazhong Tan 11566988eb2aSSalil Mehta /* check if VF could successfully fetch the hardware reset completion 11576988eb2aSSalil Mehta * status from the hardware 11586988eb2aSSalil Mehta */ 11596988eb2aSSalil Mehta ret = hclgevf_reset_wait(hdev); 11606988eb2aSSalil Mehta if (ret) { 11616988eb2aSSalil Mehta /* can't do much in this situation, will disable VF */ 11626988eb2aSSalil Mehta dev_err(&hdev->pdev->dev, 11636988eb2aSSalil Mehta "VF failed(=%d) to fetch H/W reset completion status\n", 11646988eb2aSSalil Mehta ret); 11656988eb2aSSalil Mehta 11666988eb2aSSalil Mehta dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); 116729118ab9SHuazhong Tan rtnl_lock(); 11686988eb2aSSalil Mehta hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 11696988eb2aSSalil Mehta 11706988eb2aSSalil Mehta rtnl_unlock(); 11716988eb2aSSalil Mehta return ret; 11726988eb2aSSalil Mehta } 11736988eb2aSSalil Mehta 117429118ab9SHuazhong Tan rtnl_lock(); 117529118ab9SHuazhong Tan 11766988eb2aSSalil Mehta /* now, re-initialize the nic client and ae device*/ 11776988eb2aSSalil Mehta ret = hclgevf_reset_stack(hdev); 11786988eb2aSSalil Mehta if (ret) 11796988eb2aSSalil Mehta dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 11806988eb2aSSalil Mehta 11816988eb2aSSalil Mehta /* bring up the nic to enable TX/RX again */ 11826988eb2aSSalil Mehta hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 11836988eb2aSSalil Mehta 11846988eb2aSSalil Mehta rtnl_unlock(); 11856988eb2aSSalil Mehta 11866988eb2aSSalil Mehta return ret; 11876988eb2aSSalil Mehta } 11886988eb2aSSalil Mehta 1189a8dedb65SSalil Mehta static int hclgevf_do_reset(struct hclgevf_dev *hdev) 1190a8dedb65SSalil Mehta { 1191a8dedb65SSalil Mehta int status; 1192a8dedb65SSalil Mehta u8 respmsg; 1193a8dedb65SSalil Mehta 1194a8dedb65SSalil Mehta status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1195a8dedb65SSalil Mehta 0, false, &respmsg, sizeof(u8)); 1196a8dedb65SSalil Mehta if (status) 1197a8dedb65SSalil Mehta dev_err(&hdev->pdev->dev, 1198a8dedb65SSalil Mehta "VF reset request to PF failed(=%d)\n", status); 1199a8dedb65SSalil Mehta 1200a8dedb65SSalil Mehta return status; 1201a8dedb65SSalil Mehta } 1202a8dedb65SSalil Mehta 1203720bd583SHuazhong Tan static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1204720bd583SHuazhong Tan unsigned long *addr) 1205720bd583SHuazhong Tan { 1206720bd583SHuazhong Tan enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1207720bd583SHuazhong Tan 1208720bd583SHuazhong Tan if (test_bit(HNAE3_VF_RESET, addr)) { 1209720bd583SHuazhong Tan rst_level = HNAE3_VF_RESET; 1210720bd583SHuazhong Tan clear_bit(HNAE3_VF_RESET, addr); 1211720bd583SHuazhong Tan } 1212720bd583SHuazhong Tan 1213720bd583SHuazhong Tan return rst_level; 1214720bd583SHuazhong Tan } 1215720bd583SHuazhong Tan 12166ae4e733SShiju Jose static void hclgevf_reset_event(struct pci_dev *pdev, 12176ae4e733SShiju Jose struct hnae3_handle *handle) 12186d4c3981SSalil Mehta { 12196d4c3981SSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 12206d4c3981SSalil Mehta 12216d4c3981SSalil Mehta dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 12226d4c3981SSalil Mehta 1223720bd583SHuazhong Tan if (!hdev->default_reset_request) 12240742ed7cSHuazhong Tan hdev->reset_level = 1225720bd583SHuazhong Tan hclgevf_get_reset_level(hdev, 1226720bd583SHuazhong Tan &hdev->default_reset_request); 1227720bd583SHuazhong Tan else 12280742ed7cSHuazhong Tan hdev->reset_level = HNAE3_VF_RESET; 12296d4c3981SSalil Mehta 1230436667d2SSalil Mehta /* reset of this VF requested */ 1231436667d2SSalil Mehta set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1232436667d2SSalil Mehta hclgevf_reset_task_schedule(hdev); 12336d4c3981SSalil Mehta 12340742ed7cSHuazhong Tan hdev->last_reset_time = jiffies; 12356d4c3981SSalil Mehta } 12366d4c3981SSalil Mehta 1237720bd583SHuazhong Tan static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1238720bd583SHuazhong Tan enum hnae3_reset_type rst_type) 1239720bd583SHuazhong Tan { 1240720bd583SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 1241720bd583SHuazhong Tan 1242720bd583SHuazhong Tan set_bit(rst_type, &hdev->default_reset_request); 1243720bd583SHuazhong Tan } 1244720bd583SHuazhong Tan 1245e2cb1decSSalil Mehta static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1246e2cb1decSSalil Mehta { 1247e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1248e2cb1decSSalil Mehta 1249e2cb1decSSalil Mehta return hdev->fw_version; 1250e2cb1decSSalil Mehta } 1251e2cb1decSSalil Mehta 1252e2cb1decSSalil Mehta static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1253e2cb1decSSalil Mehta { 1254e2cb1decSSalil Mehta struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1255e2cb1decSSalil Mehta 1256e2cb1decSSalil Mehta vector->vector_irq = pci_irq_vector(hdev->pdev, 1257e2cb1decSSalil Mehta HCLGEVF_MISC_VECTOR_NUM); 1258e2cb1decSSalil Mehta vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1259e2cb1decSSalil Mehta /* vector status always valid for Vector 0 */ 1260e2cb1decSSalil Mehta hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1261e2cb1decSSalil Mehta hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1262e2cb1decSSalil Mehta 1263e2cb1decSSalil Mehta hdev->num_msi_left -= 1; 1264e2cb1decSSalil Mehta hdev->num_msi_used += 1; 1265e2cb1decSSalil Mehta } 1266e2cb1decSSalil Mehta 126735a1e503SSalil Mehta void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 126835a1e503SSalil Mehta { 126935a1e503SSalil Mehta if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 127035a1e503SSalil Mehta !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 127135a1e503SSalil Mehta set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 127235a1e503SSalil Mehta schedule_work(&hdev->rst_service_task); 127335a1e503SSalil Mehta } 127435a1e503SSalil Mehta } 127535a1e503SSalil Mehta 127607a0556aSSalil Mehta void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1277e2cb1decSSalil Mehta { 127807a0556aSSalil Mehta if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 127907a0556aSSalil Mehta !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 128007a0556aSSalil Mehta set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1281e2cb1decSSalil Mehta schedule_work(&hdev->mbx_service_task); 1282e2cb1decSSalil Mehta } 128307a0556aSSalil Mehta } 1284e2cb1decSSalil Mehta 1285e2cb1decSSalil Mehta static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1286e2cb1decSSalil Mehta { 1287e2cb1decSSalil Mehta if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1288e2cb1decSSalil Mehta !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1289e2cb1decSSalil Mehta schedule_work(&hdev->service_task); 1290e2cb1decSSalil Mehta } 1291e2cb1decSSalil Mehta 1292436667d2SSalil Mehta static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1293436667d2SSalil Mehta { 129407a0556aSSalil Mehta /* if we have any pending mailbox event then schedule the mbx task */ 129507a0556aSSalil Mehta if (hdev->mbx_event_pending) 129607a0556aSSalil Mehta hclgevf_mbx_task_schedule(hdev); 129707a0556aSSalil Mehta 1298436667d2SSalil Mehta if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1299436667d2SSalil Mehta hclgevf_reset_task_schedule(hdev); 1300436667d2SSalil Mehta } 1301436667d2SSalil Mehta 1302e2cb1decSSalil Mehta static void hclgevf_service_timer(struct timer_list *t) 1303e2cb1decSSalil Mehta { 1304e2cb1decSSalil Mehta struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1305e2cb1decSSalil Mehta 1306e2cb1decSSalil Mehta mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1307e2cb1decSSalil Mehta 1308e2cb1decSSalil Mehta hclgevf_task_schedule(hdev); 1309e2cb1decSSalil Mehta } 1310e2cb1decSSalil Mehta 131135a1e503SSalil Mehta static void hclgevf_reset_service_task(struct work_struct *work) 131235a1e503SSalil Mehta { 131335a1e503SSalil Mehta struct hclgevf_dev *hdev = 131435a1e503SSalil Mehta container_of(work, struct hclgevf_dev, rst_service_task); 1315a8dedb65SSalil Mehta int ret; 131635a1e503SSalil Mehta 131735a1e503SSalil Mehta if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 131835a1e503SSalil Mehta return; 131935a1e503SSalil Mehta 132035a1e503SSalil Mehta clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 132135a1e503SSalil Mehta 1322436667d2SSalil Mehta if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1323436667d2SSalil Mehta &hdev->reset_state)) { 1324436667d2SSalil Mehta /* PF has initmated that it is about to reset the hardware. 1325436667d2SSalil Mehta * We now have to poll & check if harware has actually completed 1326436667d2SSalil Mehta * the reset sequence. On hardware reset completion, VF needs to 1327436667d2SSalil Mehta * reset the client and ae device. 132835a1e503SSalil Mehta */ 1329436667d2SSalil Mehta hdev->reset_attempts = 0; 1330436667d2SSalil Mehta 13316988eb2aSSalil Mehta ret = hclgevf_reset(hdev); 13326988eb2aSSalil Mehta if (ret) 13336988eb2aSSalil Mehta dev_err(&hdev->pdev->dev, "VF stack reset failed.\n"); 1334436667d2SSalil Mehta } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1335436667d2SSalil Mehta &hdev->reset_state)) { 1336436667d2SSalil Mehta /* we could be here when either of below happens: 1337436667d2SSalil Mehta * 1. reset was initiated due to watchdog timeout due to 1338436667d2SSalil Mehta * a. IMP was earlier reset and our TX got choked down and 1339436667d2SSalil Mehta * which resulted in watchdog reacting and inducing VF 1340436667d2SSalil Mehta * reset. This also means our cmdq would be unreliable. 1341436667d2SSalil Mehta * b. problem in TX due to other lower layer(example link 1342436667d2SSalil Mehta * layer not functioning properly etc.) 1343436667d2SSalil Mehta * 2. VF reset might have been initiated due to some config 1344436667d2SSalil Mehta * change. 1345436667d2SSalil Mehta * 1346436667d2SSalil Mehta * NOTE: Theres no clear way to detect above cases than to react 1347436667d2SSalil Mehta * to the response of PF for this reset request. PF will ack the 1348436667d2SSalil Mehta * 1b and 2. cases but we will not get any intimation about 1a 1349436667d2SSalil Mehta * from PF as cmdq would be in unreliable state i.e. mailbox 1350436667d2SSalil Mehta * communication between PF and VF would be broken. 1351436667d2SSalil Mehta */ 1352436667d2SSalil Mehta 1353436667d2SSalil Mehta /* if we are never geting into pending state it means either: 1354436667d2SSalil Mehta * 1. PF is not receiving our request which could be due to IMP 1355436667d2SSalil Mehta * reset 1356436667d2SSalil Mehta * 2. PF is screwed 1357436667d2SSalil Mehta * We cannot do much for 2. but to check first we can try reset 1358436667d2SSalil Mehta * our PCIe + stack and see if it alleviates the problem. 1359436667d2SSalil Mehta */ 1360436667d2SSalil Mehta if (hdev->reset_attempts > 3) { 1361436667d2SSalil Mehta /* prepare for full reset of stack + pcie interface */ 13620742ed7cSHuazhong Tan hdev->reset_level = HNAE3_VF_FULL_RESET; 1363436667d2SSalil Mehta 1364436667d2SSalil Mehta /* "defer" schedule the reset task again */ 1365436667d2SSalil Mehta set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1366436667d2SSalil Mehta } else { 1367436667d2SSalil Mehta hdev->reset_attempts++; 1368436667d2SSalil Mehta 1369436667d2SSalil Mehta /* request PF for resetting this VF via mailbox */ 1370a8dedb65SSalil Mehta ret = hclgevf_do_reset(hdev); 1371a8dedb65SSalil Mehta if (ret) 1372a8dedb65SSalil Mehta dev_warn(&hdev->pdev->dev, 1373a8dedb65SSalil Mehta "VF rst fail, stack will call\n"); 1374436667d2SSalil Mehta } 1375436667d2SSalil Mehta } 137635a1e503SSalil Mehta 137735a1e503SSalil Mehta clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 137835a1e503SSalil Mehta } 137935a1e503SSalil Mehta 1380e2cb1decSSalil Mehta static void hclgevf_mailbox_service_task(struct work_struct *work) 1381e2cb1decSSalil Mehta { 1382e2cb1decSSalil Mehta struct hclgevf_dev *hdev; 1383e2cb1decSSalil Mehta 1384e2cb1decSSalil Mehta hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1385e2cb1decSSalil Mehta 1386e2cb1decSSalil Mehta if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1387e2cb1decSSalil Mehta return; 1388e2cb1decSSalil Mehta 1389e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1390e2cb1decSSalil Mehta 139107a0556aSSalil Mehta hclgevf_mbx_async_handler(hdev); 1392e2cb1decSSalil Mehta 1393e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1394e2cb1decSSalil Mehta } 1395e2cb1decSSalil Mehta 1396e2cb1decSSalil Mehta static void hclgevf_service_task(struct work_struct *work) 1397e2cb1decSSalil Mehta { 1398e2cb1decSSalil Mehta struct hclgevf_dev *hdev; 1399e2cb1decSSalil Mehta 1400e2cb1decSSalil Mehta hdev = container_of(work, struct hclgevf_dev, service_task); 1401e2cb1decSSalil Mehta 1402e2cb1decSSalil Mehta /* request the link status from the PF. PF would be able to tell VF 1403e2cb1decSSalil Mehta * about such updates in future so we might remove this later 1404e2cb1decSSalil Mehta */ 1405e2cb1decSSalil Mehta hclgevf_request_link_info(hdev); 1406e2cb1decSSalil Mehta 1407436667d2SSalil Mehta hclgevf_deferred_task_schedule(hdev); 1408436667d2SSalil Mehta 1409e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1410e2cb1decSSalil Mehta } 1411e2cb1decSSalil Mehta 1412e2cb1decSSalil Mehta static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1413e2cb1decSSalil Mehta { 1414e2cb1decSSalil Mehta hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1415e2cb1decSSalil Mehta } 1416e2cb1decSSalil Mehta 1417e2cb1decSSalil Mehta static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) 1418e2cb1decSSalil Mehta { 1419e2cb1decSSalil Mehta u32 cmdq_src_reg; 1420e2cb1decSSalil Mehta 1421e2cb1decSSalil Mehta /* fetch the events from their corresponding regs */ 1422e2cb1decSSalil Mehta cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1423e2cb1decSSalil Mehta HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1424e2cb1decSSalil Mehta 1425e2cb1decSSalil Mehta /* check for vector0 mailbox(=CMDQ RX) event source */ 1426e2cb1decSSalil Mehta if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1427e2cb1decSSalil Mehta cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1428e2cb1decSSalil Mehta *clearval = cmdq_src_reg; 1429e2cb1decSSalil Mehta return true; 1430e2cb1decSSalil Mehta } 1431e2cb1decSSalil Mehta 1432e2cb1decSSalil Mehta dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1433e2cb1decSSalil Mehta 1434e2cb1decSSalil Mehta return false; 1435e2cb1decSSalil Mehta } 1436e2cb1decSSalil Mehta 1437e2cb1decSSalil Mehta static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1438e2cb1decSSalil Mehta { 1439e2cb1decSSalil Mehta writel(en ? 1 : 0, vector->addr); 1440e2cb1decSSalil Mehta } 1441e2cb1decSSalil Mehta 1442e2cb1decSSalil Mehta static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1443e2cb1decSSalil Mehta { 1444e2cb1decSSalil Mehta struct hclgevf_dev *hdev = data; 1445e2cb1decSSalil Mehta u32 clearval; 1446e2cb1decSSalil Mehta 1447e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, false); 1448e2cb1decSSalil Mehta if (!hclgevf_check_event_cause(hdev, &clearval)) 1449e2cb1decSSalil Mehta goto skip_sched; 1450e2cb1decSSalil Mehta 145107a0556aSSalil Mehta hclgevf_mbx_handler(hdev); 1452e2cb1decSSalil Mehta 1453e2cb1decSSalil Mehta hclgevf_clear_event_cause(hdev, clearval); 1454e2cb1decSSalil Mehta 1455e2cb1decSSalil Mehta skip_sched: 1456e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, true); 1457e2cb1decSSalil Mehta 1458e2cb1decSSalil Mehta return IRQ_HANDLED; 1459e2cb1decSSalil Mehta } 1460e2cb1decSSalil Mehta 1461e2cb1decSSalil Mehta static int hclgevf_configure(struct hclgevf_dev *hdev) 1462e2cb1decSSalil Mehta { 1463e2cb1decSSalil Mehta int ret; 1464e2cb1decSSalil Mehta 1465c136b884SPeng Li hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE; 1466c136b884SPeng Li 1467e2cb1decSSalil Mehta /* get queue configuration from PF */ 14686cee6fc3SJian Shen ret = hclgevf_get_queue_info(hdev); 1469e2cb1decSSalil Mehta if (ret) 1470e2cb1decSSalil Mehta return ret; 1471e2cb1decSSalil Mehta /* get tc configuration from PF */ 1472e2cb1decSSalil Mehta return hclgevf_get_tc_info(hdev); 1473e2cb1decSSalil Mehta } 1474e2cb1decSSalil Mehta 14757a01c897SSalil Mehta static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 14767a01c897SSalil Mehta { 14777a01c897SSalil Mehta struct pci_dev *pdev = ae_dev->pdev; 14787a01c897SSalil Mehta struct hclgevf_dev *hdev = ae_dev->priv; 14797a01c897SSalil Mehta 14807a01c897SSalil Mehta hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 14817a01c897SSalil Mehta if (!hdev) 14827a01c897SSalil Mehta return -ENOMEM; 14837a01c897SSalil Mehta 14847a01c897SSalil Mehta hdev->pdev = pdev; 14857a01c897SSalil Mehta hdev->ae_dev = ae_dev; 14867a01c897SSalil Mehta ae_dev->priv = hdev; 14877a01c897SSalil Mehta 14887a01c897SSalil Mehta return 0; 14897a01c897SSalil Mehta } 14907a01c897SSalil Mehta 1491e2cb1decSSalil Mehta static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1492e2cb1decSSalil Mehta { 1493e2cb1decSSalil Mehta struct hnae3_handle *roce = &hdev->roce; 1494e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 1495e2cb1decSSalil Mehta 149607acf909SJian Shen roce->rinfo.num_vectors = hdev->num_roce_msix; 1497e2cb1decSSalil Mehta 1498e2cb1decSSalil Mehta if (hdev->num_msi_left < roce->rinfo.num_vectors || 1499e2cb1decSSalil Mehta hdev->num_msi_left == 0) 1500e2cb1decSSalil Mehta return -EINVAL; 1501e2cb1decSSalil Mehta 150207acf909SJian Shen roce->rinfo.base_vector = hdev->roce_base_vector; 1503e2cb1decSSalil Mehta 1504e2cb1decSSalil Mehta roce->rinfo.netdev = nic->kinfo.netdev; 1505e2cb1decSSalil Mehta roce->rinfo.roce_io_base = hdev->hw.io_base; 1506e2cb1decSSalil Mehta 1507e2cb1decSSalil Mehta roce->pdev = nic->pdev; 1508e2cb1decSSalil Mehta roce->ae_algo = nic->ae_algo; 1509e2cb1decSSalil Mehta roce->numa_node_mask = nic->numa_node_mask; 1510e2cb1decSSalil Mehta 1511e2cb1decSSalil Mehta return 0; 1512e2cb1decSSalil Mehta } 1513e2cb1decSSalil Mehta 1514e2cb1decSSalil Mehta static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1515e2cb1decSSalil Mehta { 1516e2cb1decSSalil Mehta struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1517e2cb1decSSalil Mehta int i, ret; 1518e2cb1decSSalil Mehta 1519e2cb1decSSalil Mehta rss_cfg->rss_size = hdev->rss_size_max; 1520e2cb1decSSalil Mehta 1521374ad291SJian Shen if (hdev->pdev->revision >= 0x21) { 1522374ad291SJian Shen rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 1523374ad291SJian Shen netdev_rss_key_fill(rss_cfg->rss_hash_key, 1524374ad291SJian Shen HCLGEVF_RSS_KEY_SIZE); 1525374ad291SJian Shen 1526374ad291SJian Shen ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 1527374ad291SJian Shen rss_cfg->rss_hash_key); 1528374ad291SJian Shen if (ret) 1529374ad291SJian Shen return ret; 1530d97b3072SJian Shen 1531d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_tcp_en = 1532d97b3072SJian Shen HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1533d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_udp_en = 1534d97b3072SJian Shen HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1535d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_sctp_en = 1536d97b3072SJian Shen HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1537d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv4_fragment_en = 1538d97b3072SJian Shen HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1539d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_tcp_en = 1540d97b3072SJian Shen HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1541d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_udp_en = 1542d97b3072SJian Shen HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1543d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_sctp_en = 1544d97b3072SJian Shen HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1545d97b3072SJian Shen rss_cfg->rss_tuple_sets.ipv6_fragment_en = 1546d97b3072SJian Shen HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1547d97b3072SJian Shen 1548d97b3072SJian Shen ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 1549d97b3072SJian Shen if (ret) 1550d97b3072SJian Shen return ret; 1551d97b3072SJian Shen 1552374ad291SJian Shen } 1553374ad291SJian Shen 1554e2cb1decSSalil Mehta /* Initialize RSS indirect table for each vport */ 1555e2cb1decSSalil Mehta for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1556e2cb1decSSalil Mehta rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1557e2cb1decSSalil Mehta 1558e2cb1decSSalil Mehta ret = hclgevf_set_rss_indir_table(hdev); 1559e2cb1decSSalil Mehta if (ret) 1560e2cb1decSSalil Mehta return ret; 1561e2cb1decSSalil Mehta 1562e2cb1decSSalil Mehta return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1563e2cb1decSSalil Mehta } 1564e2cb1decSSalil Mehta 1565e2cb1decSSalil Mehta static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1566e2cb1decSSalil Mehta { 1567e2cb1decSSalil Mehta /* other vlan config(like, VLAN TX/RX offload) would also be added 1568e2cb1decSSalil Mehta * here later 1569e2cb1decSSalil Mehta */ 1570e2cb1decSSalil Mehta return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1571e2cb1decSSalil Mehta false); 1572e2cb1decSSalil Mehta } 1573e2cb1decSSalil Mehta 1574e2cb1decSSalil Mehta static int hclgevf_ae_start(struct hnae3_handle *handle) 1575e2cb1decSSalil Mehta { 1576e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1577e2cb1decSSalil Mehta 1578e2cb1decSSalil Mehta /* reset tqp stats */ 1579e2cb1decSSalil Mehta hclgevf_reset_tqp_stats(handle); 1580e2cb1decSSalil Mehta 1581e2cb1decSSalil Mehta hclgevf_request_link_info(hdev); 1582e2cb1decSSalil Mehta 1583e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1584e2cb1decSSalil Mehta mod_timer(&hdev->service_timer, jiffies + HZ); 1585e2cb1decSSalil Mehta 1586e2cb1decSSalil Mehta return 0; 1587e2cb1decSSalil Mehta } 1588e2cb1decSSalil Mehta 1589e2cb1decSSalil Mehta static void hclgevf_ae_stop(struct hnae3_handle *handle) 1590e2cb1decSSalil Mehta { 1591e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1592e2cb1decSSalil Mehta 15932f7e4896SFuyun Liang set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 15942f7e4896SFuyun Liang 1595e2cb1decSSalil Mehta /* reset tqp stats */ 1596e2cb1decSSalil Mehta hclgevf_reset_tqp_stats(handle); 15978cc6c1f7SFuyun Liang del_timer_sync(&hdev->service_timer); 15988cc6c1f7SFuyun Liang cancel_work_sync(&hdev->service_task); 1599f5be7967SYunsheng Lin clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 16008cc6c1f7SFuyun Liang hclgevf_update_link_status(hdev, 0); 1601e2cb1decSSalil Mehta } 1602e2cb1decSSalil Mehta 1603e2cb1decSSalil Mehta static void hclgevf_state_init(struct hclgevf_dev *hdev) 1604e2cb1decSSalil Mehta { 1605e2cb1decSSalil Mehta /* setup tasks for the MBX */ 1606e2cb1decSSalil Mehta INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1607e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1608e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1609e2cb1decSSalil Mehta 1610e2cb1decSSalil Mehta /* setup tasks for service timer */ 1611e2cb1decSSalil Mehta timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1612e2cb1decSSalil Mehta 1613e2cb1decSSalil Mehta INIT_WORK(&hdev->service_task, hclgevf_service_task); 1614e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1615e2cb1decSSalil Mehta 161635a1e503SSalil Mehta INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 161735a1e503SSalil Mehta 1618e2cb1decSSalil Mehta mutex_init(&hdev->mbx_resp.mbx_mutex); 1619e2cb1decSSalil Mehta 1620e2cb1decSSalil Mehta /* bring the device down */ 1621e2cb1decSSalil Mehta set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1622e2cb1decSSalil Mehta } 1623e2cb1decSSalil Mehta 1624e2cb1decSSalil Mehta static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1625e2cb1decSSalil Mehta { 1626e2cb1decSSalil Mehta set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1627e2cb1decSSalil Mehta 1628e2cb1decSSalil Mehta if (hdev->service_timer.function) 1629e2cb1decSSalil Mehta del_timer_sync(&hdev->service_timer); 1630e2cb1decSSalil Mehta if (hdev->service_task.func) 1631e2cb1decSSalil Mehta cancel_work_sync(&hdev->service_task); 1632e2cb1decSSalil Mehta if (hdev->mbx_service_task.func) 1633e2cb1decSSalil Mehta cancel_work_sync(&hdev->mbx_service_task); 163435a1e503SSalil Mehta if (hdev->rst_service_task.func) 163535a1e503SSalil Mehta cancel_work_sync(&hdev->rst_service_task); 1636e2cb1decSSalil Mehta 1637e2cb1decSSalil Mehta mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1638e2cb1decSSalil Mehta } 1639e2cb1decSSalil Mehta 1640e2cb1decSSalil Mehta static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1641e2cb1decSSalil Mehta { 1642e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 1643e2cb1decSSalil Mehta int vectors; 1644e2cb1decSSalil Mehta int i; 1645e2cb1decSSalil Mehta 164607acf909SJian Shen if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 164707acf909SJian Shen vectors = pci_alloc_irq_vectors(pdev, 164807acf909SJian Shen hdev->roce_base_msix_offset + 1, 164907acf909SJian Shen hdev->num_msi, 165007acf909SJian Shen PCI_IRQ_MSIX); 165107acf909SJian Shen else 1652e2cb1decSSalil Mehta vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1653e2cb1decSSalil Mehta PCI_IRQ_MSI | PCI_IRQ_MSIX); 165407acf909SJian Shen 1655e2cb1decSSalil Mehta if (vectors < 0) { 1656e2cb1decSSalil Mehta dev_err(&pdev->dev, 1657e2cb1decSSalil Mehta "failed(%d) to allocate MSI/MSI-X vectors\n", 1658e2cb1decSSalil Mehta vectors); 1659e2cb1decSSalil Mehta return vectors; 1660e2cb1decSSalil Mehta } 1661e2cb1decSSalil Mehta if (vectors < hdev->num_msi) 1662e2cb1decSSalil Mehta dev_warn(&hdev->pdev->dev, 1663e2cb1decSSalil Mehta "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1664e2cb1decSSalil Mehta hdev->num_msi, vectors); 1665e2cb1decSSalil Mehta 1666e2cb1decSSalil Mehta hdev->num_msi = vectors; 1667e2cb1decSSalil Mehta hdev->num_msi_left = vectors; 1668e2cb1decSSalil Mehta hdev->base_msi_vector = pdev->irq; 166907acf909SJian Shen hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 1670e2cb1decSSalil Mehta 1671e2cb1decSSalil Mehta hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1672e2cb1decSSalil Mehta sizeof(u16), GFP_KERNEL); 1673e2cb1decSSalil Mehta if (!hdev->vector_status) { 1674e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 1675e2cb1decSSalil Mehta return -ENOMEM; 1676e2cb1decSSalil Mehta } 1677e2cb1decSSalil Mehta 1678e2cb1decSSalil Mehta for (i = 0; i < hdev->num_msi; i++) 1679e2cb1decSSalil Mehta hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 1680e2cb1decSSalil Mehta 1681e2cb1decSSalil Mehta hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 1682e2cb1decSSalil Mehta sizeof(int), GFP_KERNEL); 1683e2cb1decSSalil Mehta if (!hdev->vector_irq) { 1684e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 1685e2cb1decSSalil Mehta return -ENOMEM; 1686e2cb1decSSalil Mehta } 1687e2cb1decSSalil Mehta 1688e2cb1decSSalil Mehta return 0; 1689e2cb1decSSalil Mehta } 1690e2cb1decSSalil Mehta 1691e2cb1decSSalil Mehta static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 1692e2cb1decSSalil Mehta { 1693e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 1694e2cb1decSSalil Mehta 1695e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 1696e2cb1decSSalil Mehta } 1697e2cb1decSSalil Mehta 1698e2cb1decSSalil Mehta static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 1699e2cb1decSSalil Mehta { 1700e2cb1decSSalil Mehta int ret = 0; 1701e2cb1decSSalil Mehta 1702e2cb1decSSalil Mehta hclgevf_get_misc_vector(hdev); 1703e2cb1decSSalil Mehta 1704e2cb1decSSalil Mehta ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 1705e2cb1decSSalil Mehta 0, "hclgevf_cmd", hdev); 1706e2cb1decSSalil Mehta if (ret) { 1707e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 1708e2cb1decSSalil Mehta hdev->misc_vector.vector_irq); 1709e2cb1decSSalil Mehta return ret; 1710e2cb1decSSalil Mehta } 1711e2cb1decSSalil Mehta 17121819e409SXi Wang hclgevf_clear_event_cause(hdev, 0); 17131819e409SXi Wang 1714e2cb1decSSalil Mehta /* enable misc. vector(vector 0) */ 1715e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, true); 1716e2cb1decSSalil Mehta 1717e2cb1decSSalil Mehta return ret; 1718e2cb1decSSalil Mehta } 1719e2cb1decSSalil Mehta 1720e2cb1decSSalil Mehta static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 1721e2cb1decSSalil Mehta { 1722e2cb1decSSalil Mehta /* disable misc vector(vector 0) */ 1723e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, false); 17241819e409SXi Wang synchronize_irq(hdev->misc_vector.vector_irq); 1725e2cb1decSSalil Mehta free_irq(hdev->misc_vector.vector_irq, hdev); 1726e2cb1decSSalil Mehta hclgevf_free_vector(hdev, 0); 1727e2cb1decSSalil Mehta } 1728e2cb1decSSalil Mehta 1729e718a93fSPeng Li static int hclgevf_init_client_instance(struct hnae3_client *client, 1730e718a93fSPeng Li struct hnae3_ae_dev *ae_dev) 1731e2cb1decSSalil Mehta { 1732e718a93fSPeng Li struct hclgevf_dev *hdev = ae_dev->priv; 1733e2cb1decSSalil Mehta int ret; 1734e2cb1decSSalil Mehta 1735e2cb1decSSalil Mehta switch (client->type) { 1736e2cb1decSSalil Mehta case HNAE3_CLIENT_KNIC: 1737e2cb1decSSalil Mehta hdev->nic_client = client; 1738e2cb1decSSalil Mehta hdev->nic.client = client; 1739e2cb1decSSalil Mehta 1740e2cb1decSSalil Mehta ret = client->ops->init_instance(&hdev->nic); 1741e2cb1decSSalil Mehta if (ret) 174249dd8054SJian Shen goto clear_nic; 1743e2cb1decSSalil Mehta 1744d9f28fc2SJian Shen hnae3_set_client_init_flag(client, ae_dev, 1); 1745d9f28fc2SJian Shen 1746e2cb1decSSalil Mehta if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 1747e2cb1decSSalil Mehta struct hnae3_client *rc = hdev->roce_client; 1748e2cb1decSSalil Mehta 1749e2cb1decSSalil Mehta ret = hclgevf_init_roce_base_info(hdev); 1750e2cb1decSSalil Mehta if (ret) 175149dd8054SJian Shen goto clear_roce; 1752e2cb1decSSalil Mehta ret = rc->ops->init_instance(&hdev->roce); 1753e2cb1decSSalil Mehta if (ret) 175449dd8054SJian Shen goto clear_roce; 1755d9f28fc2SJian Shen 1756d9f28fc2SJian Shen hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 1757d9f28fc2SJian Shen 1); 1758e2cb1decSSalil Mehta } 1759e2cb1decSSalil Mehta break; 1760e2cb1decSSalil Mehta case HNAE3_CLIENT_UNIC: 1761e2cb1decSSalil Mehta hdev->nic_client = client; 1762e2cb1decSSalil Mehta hdev->nic.client = client; 1763e2cb1decSSalil Mehta 1764e2cb1decSSalil Mehta ret = client->ops->init_instance(&hdev->nic); 1765e2cb1decSSalil Mehta if (ret) 176649dd8054SJian Shen goto clear_nic; 1767d9f28fc2SJian Shen 1768d9f28fc2SJian Shen hnae3_set_client_init_flag(client, ae_dev, 1); 1769e2cb1decSSalil Mehta break; 1770e2cb1decSSalil Mehta case HNAE3_CLIENT_ROCE: 1771544a7bcdSLijun Ou if (hnae3_dev_roce_supported(hdev)) { 1772e2cb1decSSalil Mehta hdev->roce_client = client; 1773e2cb1decSSalil Mehta hdev->roce.client = client; 1774544a7bcdSLijun Ou } 1775e2cb1decSSalil Mehta 1776544a7bcdSLijun Ou if (hdev->roce_client && hdev->nic_client) { 1777e2cb1decSSalil Mehta ret = hclgevf_init_roce_base_info(hdev); 1778e2cb1decSSalil Mehta if (ret) 177949dd8054SJian Shen goto clear_roce; 1780e2cb1decSSalil Mehta 1781e2cb1decSSalil Mehta ret = client->ops->init_instance(&hdev->roce); 1782e2cb1decSSalil Mehta if (ret) 178349dd8054SJian Shen goto clear_roce; 1784e2cb1decSSalil Mehta } 1785d9f28fc2SJian Shen 1786d9f28fc2SJian Shen hnae3_set_client_init_flag(client, ae_dev, 1); 1787fa7a4bd5SJian Shen break; 1788fa7a4bd5SJian Shen default: 1789fa7a4bd5SJian Shen return -EINVAL; 1790e2cb1decSSalil Mehta } 1791e2cb1decSSalil Mehta 1792e2cb1decSSalil Mehta return 0; 179349dd8054SJian Shen 179449dd8054SJian Shen clear_nic: 179549dd8054SJian Shen hdev->nic_client = NULL; 179649dd8054SJian Shen hdev->nic.client = NULL; 179749dd8054SJian Shen return ret; 179849dd8054SJian Shen clear_roce: 179949dd8054SJian Shen hdev->roce_client = NULL; 180049dd8054SJian Shen hdev->roce.client = NULL; 180149dd8054SJian Shen return ret; 1802e2cb1decSSalil Mehta } 1803e2cb1decSSalil Mehta 1804e718a93fSPeng Li static void hclgevf_uninit_client_instance(struct hnae3_client *client, 1805e718a93fSPeng Li struct hnae3_ae_dev *ae_dev) 1806e2cb1decSSalil Mehta { 1807e718a93fSPeng Li struct hclgevf_dev *hdev = ae_dev->priv; 1808e718a93fSPeng Li 1809e2cb1decSSalil Mehta /* un-init roce, if it exists */ 181049dd8054SJian Shen if (hdev->roce_client) { 1811e2cb1decSSalil Mehta hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 181249dd8054SJian Shen hdev->roce_client = NULL; 181349dd8054SJian Shen hdev->roce.client = NULL; 181449dd8054SJian Shen } 1815e2cb1decSSalil Mehta 1816e2cb1decSSalil Mehta /* un-init nic/unic, if this was not called by roce client */ 181749dd8054SJian Shen if (client->ops->uninit_instance && hdev->nic_client && 181849dd8054SJian Shen client->type != HNAE3_CLIENT_ROCE) { 1819e2cb1decSSalil Mehta client->ops->uninit_instance(&hdev->nic, 0); 182049dd8054SJian Shen hdev->nic_client = NULL; 182149dd8054SJian Shen hdev->nic.client = NULL; 182249dd8054SJian Shen } 1823e2cb1decSSalil Mehta } 1824e2cb1decSSalil Mehta 1825e2cb1decSSalil Mehta static int hclgevf_pci_init(struct hclgevf_dev *hdev) 1826e2cb1decSSalil Mehta { 1827e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 1828e2cb1decSSalil Mehta struct hclgevf_hw *hw; 1829e2cb1decSSalil Mehta int ret; 1830e2cb1decSSalil Mehta 1831e2cb1decSSalil Mehta ret = pci_enable_device(pdev); 1832e2cb1decSSalil Mehta if (ret) { 1833e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed to enable PCI device\n"); 18343e249d3bSFuyun Liang return ret; 1835e2cb1decSSalil Mehta } 1836e2cb1decSSalil Mehta 1837e2cb1decSSalil Mehta ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1838e2cb1decSSalil Mehta if (ret) { 1839e2cb1decSSalil Mehta dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 1840e2cb1decSSalil Mehta goto err_disable_device; 1841e2cb1decSSalil Mehta } 1842e2cb1decSSalil Mehta 1843e2cb1decSSalil Mehta ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 1844e2cb1decSSalil Mehta if (ret) { 1845e2cb1decSSalil Mehta dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 1846e2cb1decSSalil Mehta goto err_disable_device; 1847e2cb1decSSalil Mehta } 1848e2cb1decSSalil Mehta 1849e2cb1decSSalil Mehta pci_set_master(pdev); 1850e2cb1decSSalil Mehta hw = &hdev->hw; 1851e2cb1decSSalil Mehta hw->hdev = hdev; 18522e1ea493SPeng Li hw->io_base = pci_iomap(pdev, 2, 0); 1853e2cb1decSSalil Mehta if (!hw->io_base) { 1854e2cb1decSSalil Mehta dev_err(&pdev->dev, "can't map configuration register space\n"); 1855e2cb1decSSalil Mehta ret = -ENOMEM; 1856e2cb1decSSalil Mehta goto err_clr_master; 1857e2cb1decSSalil Mehta } 1858e2cb1decSSalil Mehta 1859e2cb1decSSalil Mehta return 0; 1860e2cb1decSSalil Mehta 1861e2cb1decSSalil Mehta err_clr_master: 1862e2cb1decSSalil Mehta pci_clear_master(pdev); 1863e2cb1decSSalil Mehta pci_release_regions(pdev); 1864e2cb1decSSalil Mehta err_disable_device: 1865e2cb1decSSalil Mehta pci_disable_device(pdev); 18663e249d3bSFuyun Liang 1867e2cb1decSSalil Mehta return ret; 1868e2cb1decSSalil Mehta } 1869e2cb1decSSalil Mehta 1870e2cb1decSSalil Mehta static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 1871e2cb1decSSalil Mehta { 1872e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 1873e2cb1decSSalil Mehta 1874e2cb1decSSalil Mehta pci_iounmap(pdev, hdev->hw.io_base); 1875e2cb1decSSalil Mehta pci_clear_master(pdev); 1876e2cb1decSSalil Mehta pci_release_regions(pdev); 1877e2cb1decSSalil Mehta pci_disable_device(pdev); 1878e2cb1decSSalil Mehta } 1879e2cb1decSSalil Mehta 188007acf909SJian Shen static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 188107acf909SJian Shen { 188207acf909SJian Shen struct hclgevf_query_res_cmd *req; 188307acf909SJian Shen struct hclgevf_desc desc; 188407acf909SJian Shen int ret; 188507acf909SJian Shen 188607acf909SJian Shen hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 188707acf909SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 188807acf909SJian Shen if (ret) { 188907acf909SJian Shen dev_err(&hdev->pdev->dev, 189007acf909SJian Shen "query vf resource failed, ret = %d.\n", ret); 189107acf909SJian Shen return ret; 189207acf909SJian Shen } 189307acf909SJian Shen 189407acf909SJian Shen req = (struct hclgevf_query_res_cmd *)desc.data; 189507acf909SJian Shen 189607acf909SJian Shen if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 189707acf909SJian Shen hdev->roce_base_msix_offset = 189807acf909SJian Shen hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 189907acf909SJian Shen HCLGEVF_MSIX_OFT_ROCEE_M, 190007acf909SJian Shen HCLGEVF_MSIX_OFT_ROCEE_S); 190107acf909SJian Shen hdev->num_roce_msix = 190207acf909SJian Shen hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 190307acf909SJian Shen HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 190407acf909SJian Shen 190507acf909SJian Shen /* VF should have NIC vectors and Roce vectors, NIC vectors 190607acf909SJian Shen * are queued before Roce vectors. The offset is fixed to 64. 190707acf909SJian Shen */ 190807acf909SJian Shen hdev->num_msi = hdev->num_roce_msix + 190907acf909SJian Shen hdev->roce_base_msix_offset; 191007acf909SJian Shen } else { 191107acf909SJian Shen hdev->num_msi = 191207acf909SJian Shen hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 191307acf909SJian Shen HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 191407acf909SJian Shen } 191507acf909SJian Shen 191607acf909SJian Shen return 0; 191707acf909SJian Shen } 191807acf909SJian Shen 19199c6f7085SHuazhong Tan static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 1920e2cb1decSSalil Mehta { 19217a01c897SSalil Mehta struct pci_dev *pdev = hdev->pdev; 1922e2cb1decSSalil Mehta int ret; 1923e2cb1decSSalil Mehta 19249c6f7085SHuazhong Tan ret = hclgevf_cmd_init(hdev); 19259c6f7085SHuazhong Tan if (ret) { 19269c6f7085SHuazhong Tan dev_err(&pdev->dev, "cmd failed %d\n", ret); 19279c6f7085SHuazhong Tan return ret; 19287a01c897SSalil Mehta } 1929e2cb1decSSalil Mehta 19309c6f7085SHuazhong Tan ret = hclgevf_rss_init_hw(hdev); 19319c6f7085SHuazhong Tan if (ret) { 19329c6f7085SHuazhong Tan dev_err(&hdev->pdev->dev, 19339c6f7085SHuazhong Tan "failed(%d) to initialize RSS\n", ret); 19349c6f7085SHuazhong Tan return ret; 19359c6f7085SHuazhong Tan } 19369c6f7085SHuazhong Tan 19379c6f7085SHuazhong Tan ret = hclgevf_init_vlan_config(hdev); 19389c6f7085SHuazhong Tan if (ret) { 19399c6f7085SHuazhong Tan dev_err(&hdev->pdev->dev, 19409c6f7085SHuazhong Tan "failed(%d) to initialize VLAN config\n", ret); 19419c6f7085SHuazhong Tan return ret; 19429c6f7085SHuazhong Tan } 19439c6f7085SHuazhong Tan 19449c6f7085SHuazhong Tan dev_info(&hdev->pdev->dev, "Reset done\n"); 19459c6f7085SHuazhong Tan 19469c6f7085SHuazhong Tan return 0; 19479c6f7085SHuazhong Tan } 19489c6f7085SHuazhong Tan 19499c6f7085SHuazhong Tan static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 19509c6f7085SHuazhong Tan { 19519c6f7085SHuazhong Tan struct pci_dev *pdev = hdev->pdev; 19529c6f7085SHuazhong Tan int ret; 19539c6f7085SHuazhong Tan 1954e2cb1decSSalil Mehta ret = hclgevf_pci_init(hdev); 1955e2cb1decSSalil Mehta if (ret) { 1956e2cb1decSSalil Mehta dev_err(&pdev->dev, "PCI initialization failed\n"); 1957e2cb1decSSalil Mehta return ret; 1958e2cb1decSSalil Mehta } 1959e2cb1decSSalil Mehta 19608b0195a3SHuazhong Tan ret = hclgevf_cmd_queue_init(hdev); 19618b0195a3SHuazhong Tan if (ret) { 19628b0195a3SHuazhong Tan dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); 19638b0195a3SHuazhong Tan goto err_cmd_queue_init; 19648b0195a3SHuazhong Tan } 19658b0195a3SHuazhong Tan 1966eddf0462SYunsheng Lin ret = hclgevf_cmd_init(hdev); 1967eddf0462SYunsheng Lin if (ret) 1968eddf0462SYunsheng Lin goto err_cmd_init; 1969eddf0462SYunsheng Lin 197007acf909SJian Shen /* Get vf resource */ 197107acf909SJian Shen ret = hclgevf_query_vf_resource(hdev); 197207acf909SJian Shen if (ret) { 197307acf909SJian Shen dev_err(&hdev->pdev->dev, 197407acf909SJian Shen "Query vf status error, ret = %d.\n", ret); 19758b0195a3SHuazhong Tan goto err_cmd_init; 197607acf909SJian Shen } 197707acf909SJian Shen 197807acf909SJian Shen ret = hclgevf_init_msi(hdev); 197907acf909SJian Shen if (ret) { 198007acf909SJian Shen dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 19818b0195a3SHuazhong Tan goto err_cmd_init; 198207acf909SJian Shen } 198307acf909SJian Shen 198407acf909SJian Shen hclgevf_state_init(hdev); 19850742ed7cSHuazhong Tan hdev->reset_level = HNAE3_VF_RESET; 198607acf909SJian Shen 1987e2cb1decSSalil Mehta ret = hclgevf_misc_irq_init(hdev); 1988e2cb1decSSalil Mehta if (ret) { 1989e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 1990e2cb1decSSalil Mehta ret); 1991e2cb1decSSalil Mehta goto err_misc_irq_init; 1992e2cb1decSSalil Mehta } 1993e2cb1decSSalil Mehta 1994e2cb1decSSalil Mehta ret = hclgevf_configure(hdev); 1995e2cb1decSSalil Mehta if (ret) { 1996e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 1997e2cb1decSSalil Mehta goto err_config; 1998e2cb1decSSalil Mehta } 1999e2cb1decSSalil Mehta 2000e2cb1decSSalil Mehta ret = hclgevf_alloc_tqps(hdev); 2001e2cb1decSSalil Mehta if (ret) { 2002e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2003e2cb1decSSalil Mehta goto err_config; 2004e2cb1decSSalil Mehta } 2005e2cb1decSSalil Mehta 2006e2cb1decSSalil Mehta ret = hclgevf_set_handle_info(hdev); 2007e2cb1decSSalil Mehta if (ret) { 2008e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2009e2cb1decSSalil Mehta goto err_config; 2010e2cb1decSSalil Mehta } 2011e2cb1decSSalil Mehta 2012e2cb1decSSalil Mehta /* Initialize RSS for this VF */ 2013e2cb1decSSalil Mehta ret = hclgevf_rss_init_hw(hdev); 2014e2cb1decSSalil Mehta if (ret) { 2015e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 2016e2cb1decSSalil Mehta "failed(%d) to initialize RSS\n", ret); 2017e2cb1decSSalil Mehta goto err_config; 2018e2cb1decSSalil Mehta } 2019e2cb1decSSalil Mehta 2020e2cb1decSSalil Mehta ret = hclgevf_init_vlan_config(hdev); 2021e2cb1decSSalil Mehta if (ret) { 2022e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 2023e2cb1decSSalil Mehta "failed(%d) to initialize VLAN config\n", ret); 2024e2cb1decSSalil Mehta goto err_config; 2025e2cb1decSSalil Mehta } 2026e2cb1decSSalil Mehta 20270742ed7cSHuazhong Tan hdev->last_reset_time = jiffies; 2028e2cb1decSSalil Mehta pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 2029e2cb1decSSalil Mehta 2030e2cb1decSSalil Mehta return 0; 2031e2cb1decSSalil Mehta 2032e2cb1decSSalil Mehta err_config: 2033e2cb1decSSalil Mehta hclgevf_misc_irq_uninit(hdev); 2034e2cb1decSSalil Mehta err_misc_irq_init: 2035e2cb1decSSalil Mehta hclgevf_state_uninit(hdev); 2036e2cb1decSSalil Mehta hclgevf_uninit_msi(hdev); 203707acf909SJian Shen err_cmd_init: 20388b0195a3SHuazhong Tan hclgevf_cmd_uninit(hdev); 20398b0195a3SHuazhong Tan err_cmd_queue_init: 2040e2cb1decSSalil Mehta hclgevf_pci_uninit(hdev); 2041e2cb1decSSalil Mehta return ret; 2042e2cb1decSSalil Mehta } 2043e2cb1decSSalil Mehta 20447a01c897SSalil Mehta static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2045e2cb1decSSalil Mehta { 2046e2cb1decSSalil Mehta hclgevf_state_uninit(hdev); 2047eddf0462SYunsheng Lin hclgevf_misc_irq_uninit(hdev); 2048eddf0462SYunsheng Lin hclgevf_cmd_uninit(hdev); 2049e2cb1decSSalil Mehta hclgevf_uninit_msi(hdev); 2050e2cb1decSSalil Mehta hclgevf_pci_uninit(hdev); 20517a01c897SSalil Mehta } 20527a01c897SSalil Mehta 20537a01c897SSalil Mehta static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 20547a01c897SSalil Mehta { 20557a01c897SSalil Mehta struct pci_dev *pdev = ae_dev->pdev; 20567a01c897SSalil Mehta int ret; 20577a01c897SSalil Mehta 20587a01c897SSalil Mehta ret = hclgevf_alloc_hdev(ae_dev); 20597a01c897SSalil Mehta if (ret) { 20607a01c897SSalil Mehta dev_err(&pdev->dev, "hclge device allocation failed\n"); 20617a01c897SSalil Mehta return ret; 20627a01c897SSalil Mehta } 20637a01c897SSalil Mehta 20647a01c897SSalil Mehta ret = hclgevf_init_hdev(ae_dev->priv); 20657a01c897SSalil Mehta if (ret) 20667a01c897SSalil Mehta dev_err(&pdev->dev, "hclge device initialization failed\n"); 20677a01c897SSalil Mehta 20687a01c897SSalil Mehta return ret; 20697a01c897SSalil Mehta } 20707a01c897SSalil Mehta 20717a01c897SSalil Mehta static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 20727a01c897SSalil Mehta { 20737a01c897SSalil Mehta struct hclgevf_dev *hdev = ae_dev->priv; 20747a01c897SSalil Mehta 20757a01c897SSalil Mehta hclgevf_uninit_hdev(hdev); 2076e2cb1decSSalil Mehta ae_dev->priv = NULL; 2077e2cb1decSSalil Mehta } 2078e2cb1decSSalil Mehta 2079849e4607SPeng Li static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2080849e4607SPeng Li { 2081849e4607SPeng Li struct hnae3_handle *nic = &hdev->nic; 2082849e4607SPeng Li struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2083849e4607SPeng Li 2084849e4607SPeng Li return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 2085849e4607SPeng Li } 2086849e4607SPeng Li 2087849e4607SPeng Li /** 2088849e4607SPeng Li * hclgevf_get_channels - Get the current channels enabled and max supported. 2089849e4607SPeng Li * @handle: hardware information for network interface 2090849e4607SPeng Li * @ch: ethtool channels structure 2091849e4607SPeng Li * 2092849e4607SPeng Li * We don't support separate tx and rx queues as channels. The other count 2093849e4607SPeng Li * represents how many queues are being used for control. max_combined counts 2094849e4607SPeng Li * how many queue pairs we can support. They may not be mapped 1 to 1 with 2095849e4607SPeng Li * q_vectors since we support a lot more queue pairs than q_vectors. 2096849e4607SPeng Li **/ 2097849e4607SPeng Li static void hclgevf_get_channels(struct hnae3_handle *handle, 2098849e4607SPeng Li struct ethtool_channels *ch) 2099849e4607SPeng Li { 2100849e4607SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2101849e4607SPeng Li 2102849e4607SPeng Li ch->max_combined = hclgevf_get_max_channels(hdev); 2103849e4607SPeng Li ch->other_count = 0; 2104849e4607SPeng Li ch->max_other = 0; 2105849e4607SPeng Li ch->combined_count = hdev->num_tqps; 2106849e4607SPeng Li } 2107849e4607SPeng Li 2108cc719218SPeng Li static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 21090d43bf45SHuazhong Tan u16 *alloc_tqps, u16 *max_rss_size) 2110cc719218SPeng Li { 2111cc719218SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2112cc719218SPeng Li 21130d43bf45SHuazhong Tan *alloc_tqps = hdev->num_tqps; 2114cc719218SPeng Li *max_rss_size = hdev->rss_size_max; 2115cc719218SPeng Li } 2116cc719218SPeng Li 2117175ec96bSFuyun Liang static int hclgevf_get_status(struct hnae3_handle *handle) 2118175ec96bSFuyun Liang { 2119175ec96bSFuyun Liang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2120175ec96bSFuyun Liang 2121175ec96bSFuyun Liang return hdev->hw.mac.link; 2122175ec96bSFuyun Liang } 2123175ec96bSFuyun Liang 21244a152de9SFuyun Liang static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 21254a152de9SFuyun Liang u8 *auto_neg, u32 *speed, 21264a152de9SFuyun Liang u8 *duplex) 21274a152de9SFuyun Liang { 21284a152de9SFuyun Liang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 21294a152de9SFuyun Liang 21304a152de9SFuyun Liang if (speed) 21314a152de9SFuyun Liang *speed = hdev->hw.mac.speed; 21324a152de9SFuyun Liang if (duplex) 21334a152de9SFuyun Liang *duplex = hdev->hw.mac.duplex; 21344a152de9SFuyun Liang if (auto_neg) 21354a152de9SFuyun Liang *auto_neg = AUTONEG_DISABLE; 21364a152de9SFuyun Liang } 21374a152de9SFuyun Liang 21384a152de9SFuyun Liang void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 21394a152de9SFuyun Liang u8 duplex) 21404a152de9SFuyun Liang { 21414a152de9SFuyun Liang hdev->hw.mac.speed = speed; 21424a152de9SFuyun Liang hdev->hw.mac.duplex = duplex; 21434a152de9SFuyun Liang } 21444a152de9SFuyun Liang 2145c136b884SPeng Li static void hclgevf_get_media_type(struct hnae3_handle *handle, 2146c136b884SPeng Li u8 *media_type) 2147c136b884SPeng Li { 2148c136b884SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2149c136b884SPeng Li if (media_type) 2150c136b884SPeng Li *media_type = hdev->hw.mac.media_type; 2151c136b884SPeng Li } 2152c136b884SPeng Li 21534d60291bSHuazhong Tan static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 21544d60291bSHuazhong Tan { 21554d60291bSHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 21564d60291bSHuazhong Tan 21574d60291bSHuazhong Tan return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 21584d60291bSHuazhong Tan } 21594d60291bSHuazhong Tan 21604d60291bSHuazhong Tan static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 21614d60291bSHuazhong Tan { 21624d60291bSHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 21634d60291bSHuazhong Tan 21644d60291bSHuazhong Tan return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 21654d60291bSHuazhong Tan } 21664d60291bSHuazhong Tan 21674d60291bSHuazhong Tan static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 21684d60291bSHuazhong Tan { 21694d60291bSHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 21704d60291bSHuazhong Tan 21714d60291bSHuazhong Tan return hdev->reset_count; 21724d60291bSHuazhong Tan } 21734d60291bSHuazhong Tan 2174e2cb1decSSalil Mehta static const struct hnae3_ae_ops hclgevf_ops = { 2175e2cb1decSSalil Mehta .init_ae_dev = hclgevf_init_ae_dev, 2176e2cb1decSSalil Mehta .uninit_ae_dev = hclgevf_uninit_ae_dev, 2177e718a93fSPeng Li .init_client_instance = hclgevf_init_client_instance, 2178e718a93fSPeng Li .uninit_client_instance = hclgevf_uninit_client_instance, 2179e2cb1decSSalil Mehta .start = hclgevf_ae_start, 2180e2cb1decSSalil Mehta .stop = hclgevf_ae_stop, 2181e2cb1decSSalil Mehta .map_ring_to_vector = hclgevf_map_ring_to_vector, 2182e2cb1decSSalil Mehta .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2183e2cb1decSSalil Mehta .get_vector = hclgevf_get_vector, 21840d3e6631SYunsheng Lin .put_vector = hclgevf_put_vector, 2185e2cb1decSSalil Mehta .reset_queue = hclgevf_reset_tqp, 2186e2cb1decSSalil Mehta .set_promisc_mode = hclgevf_set_promisc_mode, 2187e2cb1decSSalil Mehta .get_mac_addr = hclgevf_get_mac_addr, 2188e2cb1decSSalil Mehta .set_mac_addr = hclgevf_set_mac_addr, 2189e2cb1decSSalil Mehta .add_uc_addr = hclgevf_add_uc_addr, 2190e2cb1decSSalil Mehta .rm_uc_addr = hclgevf_rm_uc_addr, 2191e2cb1decSSalil Mehta .add_mc_addr = hclgevf_add_mc_addr, 2192e2cb1decSSalil Mehta .rm_mc_addr = hclgevf_rm_mc_addr, 2193e2cb1decSSalil Mehta .get_stats = hclgevf_get_stats, 2194e2cb1decSSalil Mehta .update_stats = hclgevf_update_stats, 2195e2cb1decSSalil Mehta .get_strings = hclgevf_get_strings, 2196e2cb1decSSalil Mehta .get_sset_count = hclgevf_get_sset_count, 2197e2cb1decSSalil Mehta .get_rss_key_size = hclgevf_get_rss_key_size, 2198e2cb1decSSalil Mehta .get_rss_indir_size = hclgevf_get_rss_indir_size, 2199e2cb1decSSalil Mehta .get_rss = hclgevf_get_rss, 2200e2cb1decSSalil Mehta .set_rss = hclgevf_set_rss, 2201d97b3072SJian Shen .get_rss_tuple = hclgevf_get_rss_tuple, 2202d97b3072SJian Shen .set_rss_tuple = hclgevf_set_rss_tuple, 2203e2cb1decSSalil Mehta .get_tc_size = hclgevf_get_tc_size, 2204e2cb1decSSalil Mehta .get_fw_version = hclgevf_get_fw_version, 2205e2cb1decSSalil Mehta .set_vlan_filter = hclgevf_set_vlan_filter, 2206b2641e2aSYunsheng Lin .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 22076d4c3981SSalil Mehta .reset_event = hclgevf_reset_event, 2208720bd583SHuazhong Tan .set_default_reset_request = hclgevf_set_def_reset_request, 2209849e4607SPeng Li .get_channels = hclgevf_get_channels, 2210cc719218SPeng Li .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2211175ec96bSFuyun Liang .get_status = hclgevf_get_status, 22124a152de9SFuyun Liang .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2213c136b884SPeng Li .get_media_type = hclgevf_get_media_type, 22144d60291bSHuazhong Tan .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 22154d60291bSHuazhong Tan .ae_dev_resetting = hclgevf_ae_dev_resetting, 22164d60291bSHuazhong Tan .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 2217e2cb1decSSalil Mehta }; 2218e2cb1decSSalil Mehta 2219e2cb1decSSalil Mehta static struct hnae3_ae_algo ae_algovf = { 2220e2cb1decSSalil Mehta .ops = &hclgevf_ops, 2221e2cb1decSSalil Mehta .pdev_id_table = ae_algovf_pci_tbl, 2222e2cb1decSSalil Mehta }; 2223e2cb1decSSalil Mehta 2224e2cb1decSSalil Mehta static int hclgevf_init(void) 2225e2cb1decSSalil Mehta { 2226e2cb1decSSalil Mehta pr_info("%s is initializing\n", HCLGEVF_NAME); 2227e2cb1decSSalil Mehta 2228854cf33aSFuyun Liang hnae3_register_ae_algo(&ae_algovf); 2229854cf33aSFuyun Liang 2230854cf33aSFuyun Liang return 0; 2231e2cb1decSSalil Mehta } 2232e2cb1decSSalil Mehta 2233e2cb1decSSalil Mehta static void hclgevf_exit(void) 2234e2cb1decSSalil Mehta { 2235e2cb1decSSalil Mehta hnae3_unregister_ae_algo(&ae_algovf); 2236e2cb1decSSalil Mehta } 2237e2cb1decSSalil Mehta module_init(hclgevf_init); 2238e2cb1decSSalil Mehta module_exit(hclgevf_exit); 2239e2cb1decSSalil Mehta 2240e2cb1decSSalil Mehta MODULE_LICENSE("GPL"); 2241e2cb1decSSalil Mehta MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2242e2cb1decSSalil Mehta MODULE_DESCRIPTION("HCLGEVF Driver"); 2243e2cb1decSSalil Mehta MODULE_VERSION(HCLGEVF_MOD_VERSION); 2244