1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 static const u8 hclgevf_hash_key[] = { 25 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 26 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 27 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 28 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 29 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 30 }; 31 32 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 33 34 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 35 HCLGEVF_CMDQ_TX_ADDR_H_REG, 36 HCLGEVF_CMDQ_TX_DEPTH_REG, 37 HCLGEVF_CMDQ_TX_TAIL_REG, 38 HCLGEVF_CMDQ_TX_HEAD_REG, 39 HCLGEVF_CMDQ_RX_ADDR_L_REG, 40 HCLGEVF_CMDQ_RX_ADDR_H_REG, 41 HCLGEVF_CMDQ_RX_DEPTH_REG, 42 HCLGEVF_CMDQ_RX_TAIL_REG, 43 HCLGEVF_CMDQ_RX_HEAD_REG, 44 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 45 HCLGEVF_CMDQ_INTR_STS_REG, 46 HCLGEVF_CMDQ_INTR_EN_REG, 47 HCLGEVF_CMDQ_INTR_GEN_REG}; 48 49 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 50 HCLGEVF_RST_ING, 51 HCLGEVF_GRO_EN_REG}; 52 53 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 54 HCLGEVF_RING_RX_ADDR_H_REG, 55 HCLGEVF_RING_RX_BD_NUM_REG, 56 HCLGEVF_RING_RX_BD_LENGTH_REG, 57 HCLGEVF_RING_RX_MERGE_EN_REG, 58 HCLGEVF_RING_RX_TAIL_REG, 59 HCLGEVF_RING_RX_HEAD_REG, 60 HCLGEVF_RING_RX_FBD_NUM_REG, 61 HCLGEVF_RING_RX_OFFSET_REG, 62 HCLGEVF_RING_RX_FBD_OFFSET_REG, 63 HCLGEVF_RING_RX_STASH_REG, 64 HCLGEVF_RING_RX_BD_ERR_REG, 65 HCLGEVF_RING_TX_ADDR_L_REG, 66 HCLGEVF_RING_TX_ADDR_H_REG, 67 HCLGEVF_RING_TX_BD_NUM_REG, 68 HCLGEVF_RING_TX_PRIORITY_REG, 69 HCLGEVF_RING_TX_TC_REG, 70 HCLGEVF_RING_TX_MERGE_EN_REG, 71 HCLGEVF_RING_TX_TAIL_REG, 72 HCLGEVF_RING_TX_HEAD_REG, 73 HCLGEVF_RING_TX_FBD_NUM_REG, 74 HCLGEVF_RING_TX_OFFSET_REG, 75 HCLGEVF_RING_TX_EBD_NUM_REG, 76 HCLGEVF_RING_TX_EBD_OFFSET_REG, 77 HCLGEVF_RING_TX_BD_ERR_REG, 78 HCLGEVF_RING_EN_REG}; 79 80 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 81 HCLGEVF_TQP_INTR_GL0_REG, 82 HCLGEVF_TQP_INTR_GL1_REG, 83 HCLGEVF_TQP_INTR_GL2_REG, 84 HCLGEVF_TQP_INTR_RL_REG}; 85 86 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 87 struct hnae3_handle *handle) 88 { 89 if (!handle->client) 90 return container_of(handle, struct hclgevf_dev, nic); 91 else if (handle->client->type == HNAE3_CLIENT_ROCE) 92 return container_of(handle, struct hclgevf_dev, roce); 93 else 94 return container_of(handle, struct hclgevf_dev, nic); 95 } 96 97 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 98 { 99 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 100 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 101 struct hclgevf_desc desc; 102 struct hclgevf_tqp *tqp; 103 int status; 104 int i; 105 106 for (i = 0; i < kinfo->num_tqps; i++) { 107 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 108 hclgevf_cmd_setup_basic_desc(&desc, 109 HCLGEVF_OPC_QUERY_RX_STATUS, 110 true); 111 112 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 113 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 114 if (status) { 115 dev_err(&hdev->pdev->dev, 116 "Query tqp stat fail, status = %d,queue = %d\n", 117 status, i); 118 return status; 119 } 120 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 121 le32_to_cpu(desc.data[1]); 122 123 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 124 true); 125 126 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 127 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 128 if (status) { 129 dev_err(&hdev->pdev->dev, 130 "Query tqp stat fail, status = %d,queue = %d\n", 131 status, i); 132 return status; 133 } 134 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 135 le32_to_cpu(desc.data[1]); 136 } 137 138 return 0; 139 } 140 141 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 142 { 143 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 144 struct hclgevf_tqp *tqp; 145 u64 *buff = data; 146 int i; 147 148 for (i = 0; i < kinfo->num_tqps; i++) { 149 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 150 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 151 } 152 for (i = 0; i < kinfo->num_tqps; i++) { 153 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 154 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 155 } 156 157 return buff; 158 } 159 160 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 161 { 162 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 163 164 return kinfo->num_tqps * 2; 165 } 166 167 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 168 { 169 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 170 u8 *buff = data; 171 int i = 0; 172 173 for (i = 0; i < kinfo->num_tqps; i++) { 174 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 175 struct hclgevf_tqp, q); 176 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 177 tqp->index); 178 buff += ETH_GSTRING_LEN; 179 } 180 181 for (i = 0; i < kinfo->num_tqps; i++) { 182 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 183 struct hclgevf_tqp, q); 184 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 185 tqp->index); 186 buff += ETH_GSTRING_LEN; 187 } 188 189 return buff; 190 } 191 192 static void hclgevf_update_stats(struct hnae3_handle *handle, 193 struct net_device_stats *net_stats) 194 { 195 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 196 int status; 197 198 status = hclgevf_tqps_update_stats(handle); 199 if (status) 200 dev_err(&hdev->pdev->dev, 201 "VF update of TQPS stats fail, status = %d.\n", 202 status); 203 } 204 205 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 206 { 207 if (strset == ETH_SS_TEST) 208 return -EOPNOTSUPP; 209 else if (strset == ETH_SS_STATS) 210 return hclgevf_tqps_get_sset_count(handle, strset); 211 212 return 0; 213 } 214 215 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 216 u8 *data) 217 { 218 u8 *p = (char *)data; 219 220 if (strset == ETH_SS_STATS) 221 p = hclgevf_tqps_get_strings(handle, p); 222 } 223 224 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 225 { 226 hclgevf_tqps_get_stats(handle, data); 227 } 228 229 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 230 { 231 u8 resp_msg; 232 int status; 233 234 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 235 true, &resp_msg, sizeof(u8)); 236 if (status) { 237 dev_err(&hdev->pdev->dev, 238 "VF request to get TC info from PF failed %d", 239 status); 240 return status; 241 } 242 243 hdev->hw_tc_map = resp_msg; 244 245 return 0; 246 } 247 248 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 249 { 250 struct hnae3_handle *nic = &hdev->nic; 251 u8 resp_msg; 252 int ret; 253 254 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 255 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, 256 NULL, 0, true, &resp_msg, sizeof(u8)); 257 if (ret) { 258 dev_err(&hdev->pdev->dev, 259 "VF request to get port based vlan state failed %d", 260 ret); 261 return ret; 262 } 263 264 nic->port_base_vlan_state = resp_msg; 265 266 return 0; 267 } 268 269 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 270 { 271 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 272 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 273 int status; 274 275 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 276 true, resp_msg, 277 HCLGEVF_TQPS_RSS_INFO_LEN); 278 if (status) { 279 dev_err(&hdev->pdev->dev, 280 "VF request to get tqp info from PF failed %d", 281 status); 282 return status; 283 } 284 285 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 286 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 287 memcpy(&hdev->rx_buf_len, &resp_msg[4], sizeof(u16)); 288 289 return 0; 290 } 291 292 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 293 { 294 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 295 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 296 int ret; 297 298 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QDEPTH, 0, NULL, 0, 299 true, resp_msg, 300 HCLGEVF_TQPS_DEPTH_INFO_LEN); 301 if (ret) { 302 dev_err(&hdev->pdev->dev, 303 "VF request to get tqp depth info from PF failed %d", 304 ret); 305 return ret; 306 } 307 308 memcpy(&hdev->num_tx_desc, &resp_msg[0], sizeof(u16)); 309 memcpy(&hdev->num_rx_desc, &resp_msg[2], sizeof(u16)); 310 311 return 0; 312 } 313 314 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 315 { 316 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 317 u8 msg_data[2], resp_data[2]; 318 u16 qid_in_pf = 0; 319 int ret; 320 321 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 322 323 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, 324 2, true, resp_data, 2); 325 if (!ret) 326 qid_in_pf = *(u16 *)resp_data; 327 328 return qid_in_pf; 329 } 330 331 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 332 { 333 u8 resp_msg; 334 int ret; 335 336 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0, 337 true, &resp_msg, sizeof(resp_msg)); 338 if (ret) { 339 dev_err(&hdev->pdev->dev, 340 "VF request to get the pf port media type failed %d", 341 ret); 342 return ret; 343 } 344 345 hdev->hw.mac.media_type = resp_msg; 346 347 return 0; 348 } 349 350 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 351 { 352 struct hclgevf_tqp *tqp; 353 int i; 354 355 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 356 sizeof(struct hclgevf_tqp), GFP_KERNEL); 357 if (!hdev->htqp) 358 return -ENOMEM; 359 360 tqp = hdev->htqp; 361 362 for (i = 0; i < hdev->num_tqps; i++) { 363 tqp->dev = &hdev->pdev->dev; 364 tqp->index = i; 365 366 tqp->q.ae_algo = &ae_algovf; 367 tqp->q.buf_size = hdev->rx_buf_len; 368 tqp->q.tx_desc_num = hdev->num_tx_desc; 369 tqp->q.rx_desc_num = hdev->num_rx_desc; 370 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 371 i * HCLGEVF_TQP_REG_SIZE; 372 373 tqp++; 374 } 375 376 return 0; 377 } 378 379 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 380 { 381 struct hnae3_handle *nic = &hdev->nic; 382 struct hnae3_knic_private_info *kinfo; 383 u16 new_tqps = hdev->num_tqps; 384 int i; 385 386 kinfo = &nic->kinfo; 387 kinfo->num_tc = 0; 388 kinfo->num_tx_desc = hdev->num_tx_desc; 389 kinfo->num_rx_desc = hdev->num_rx_desc; 390 kinfo->rx_buf_len = hdev->rx_buf_len; 391 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 392 if (hdev->hw_tc_map & BIT(i)) 393 kinfo->num_tc++; 394 395 kinfo->rss_size 396 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 397 new_tqps = kinfo->rss_size * kinfo->num_tc; 398 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 399 400 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 401 sizeof(struct hnae3_queue *), GFP_KERNEL); 402 if (!kinfo->tqp) 403 return -ENOMEM; 404 405 for (i = 0; i < kinfo->num_tqps; i++) { 406 hdev->htqp[i].q.handle = &hdev->nic; 407 hdev->htqp[i].q.tqp_index = i; 408 kinfo->tqp[i] = &hdev->htqp[i].q; 409 } 410 411 return 0; 412 } 413 414 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 415 { 416 int status; 417 u8 resp_msg; 418 419 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 420 0, false, &resp_msg, sizeof(u8)); 421 if (status) 422 dev_err(&hdev->pdev->dev, 423 "VF failed to fetch link status(%d) from PF", status); 424 } 425 426 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 427 { 428 struct hnae3_handle *rhandle = &hdev->roce; 429 struct hnae3_handle *handle = &hdev->nic; 430 struct hnae3_client *rclient; 431 struct hnae3_client *client; 432 433 client = handle->client; 434 rclient = hdev->roce_client; 435 436 link_state = 437 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 438 439 if (link_state != hdev->hw.mac.link) { 440 client->ops->link_status_change(handle, !!link_state); 441 if (rclient && rclient->ops->link_status_change) 442 rclient->ops->link_status_change(rhandle, !!link_state); 443 hdev->hw.mac.link = link_state; 444 } 445 } 446 447 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 448 { 449 #define HCLGEVF_ADVERTISING 0 450 #define HCLGEVF_SUPPORTED 1 451 u8 send_msg; 452 u8 resp_msg; 453 454 send_msg = HCLGEVF_ADVERTISING; 455 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, 456 sizeof(u8), false, &resp_msg, sizeof(u8)); 457 send_msg = HCLGEVF_SUPPORTED; 458 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, 459 sizeof(u8), false, &resp_msg, sizeof(u8)); 460 } 461 462 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 463 { 464 struct hnae3_handle *nic = &hdev->nic; 465 int ret; 466 467 nic->ae_algo = &ae_algovf; 468 nic->pdev = hdev->pdev; 469 nic->numa_node_mask = hdev->numa_node_mask; 470 nic->flags |= HNAE3_SUPPORT_VF; 471 472 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 473 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 474 hdev->ae_dev->dev_type); 475 return -EINVAL; 476 } 477 478 ret = hclgevf_knic_setup(hdev); 479 if (ret) 480 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 481 ret); 482 return ret; 483 } 484 485 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 486 { 487 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 488 dev_warn(&hdev->pdev->dev, 489 "vector(vector_id %d) has been freed.\n", vector_id); 490 return; 491 } 492 493 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 494 hdev->num_msi_left += 1; 495 hdev->num_msi_used -= 1; 496 } 497 498 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 499 struct hnae3_vector_info *vector_info) 500 { 501 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 502 struct hnae3_vector_info *vector = vector_info; 503 int alloc = 0; 504 int i, j; 505 506 vector_num = min(hdev->num_msi_left, vector_num); 507 508 for (j = 0; j < vector_num; j++) { 509 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 510 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 511 vector->vector = pci_irq_vector(hdev->pdev, i); 512 vector->io_addr = hdev->hw.io_base + 513 HCLGEVF_VECTOR_REG_BASE + 514 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 515 hdev->vector_status[i] = 0; 516 hdev->vector_irq[i] = vector->vector; 517 518 vector++; 519 alloc++; 520 521 break; 522 } 523 } 524 } 525 hdev->num_msi_left -= alloc; 526 hdev->num_msi_used += alloc; 527 528 return alloc; 529 } 530 531 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 532 { 533 int i; 534 535 for (i = 0; i < hdev->num_msi; i++) 536 if (vector == hdev->vector_irq[i]) 537 return i; 538 539 return -EINVAL; 540 } 541 542 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 543 const u8 hfunc, const u8 *key) 544 { 545 struct hclgevf_rss_config_cmd *req; 546 struct hclgevf_desc desc; 547 int key_offset; 548 int key_size; 549 int ret; 550 551 req = (struct hclgevf_rss_config_cmd *)desc.data; 552 553 for (key_offset = 0; key_offset < 3; key_offset++) { 554 hclgevf_cmd_setup_basic_desc(&desc, 555 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 556 false); 557 558 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 559 req->hash_config |= 560 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 561 562 if (key_offset == 2) 563 key_size = 564 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 565 else 566 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 567 568 memcpy(req->hash_key, 569 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 570 571 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 572 if (ret) { 573 dev_err(&hdev->pdev->dev, 574 "Configure RSS config fail, status = %d\n", 575 ret); 576 return ret; 577 } 578 } 579 580 return 0; 581 } 582 583 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 584 { 585 return HCLGEVF_RSS_KEY_SIZE; 586 } 587 588 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 589 { 590 return HCLGEVF_RSS_IND_TBL_SIZE; 591 } 592 593 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 594 { 595 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 596 struct hclgevf_rss_indirection_table_cmd *req; 597 struct hclgevf_desc desc; 598 int status; 599 int i, j; 600 601 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 602 603 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 604 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 605 false); 606 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 607 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 608 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 609 req->rss_result[j] = 610 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 611 612 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 613 if (status) { 614 dev_err(&hdev->pdev->dev, 615 "VF failed(=%d) to set RSS indirection table\n", 616 status); 617 return status; 618 } 619 } 620 621 return 0; 622 } 623 624 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 625 { 626 struct hclgevf_rss_tc_mode_cmd *req; 627 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 628 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 629 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 630 struct hclgevf_desc desc; 631 u16 roundup_size; 632 int status; 633 int i; 634 635 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 636 637 roundup_size = roundup_pow_of_two(rss_size); 638 roundup_size = ilog2(roundup_size); 639 640 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 641 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 642 tc_size[i] = roundup_size; 643 tc_offset[i] = rss_size * i; 644 } 645 646 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 647 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 648 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 649 (tc_valid[i] & 0x1)); 650 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 651 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 652 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 653 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 654 } 655 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 656 if (status) 657 dev_err(&hdev->pdev->dev, 658 "VF failed(=%d) to set rss tc mode\n", status); 659 660 return status; 661 } 662 663 /* for revision 0x20, vf shared the same rss config with pf */ 664 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 665 { 666 #define HCLGEVF_RSS_MBX_RESP_LEN 8 667 668 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 669 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 670 u16 msg_num, hash_key_index; 671 u8 index; 672 int ret; 673 674 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 675 HCLGEVF_RSS_MBX_RESP_LEN; 676 for (index = 0; index < msg_num; index++) { 677 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_RSS_KEY, 0, 678 &index, sizeof(index), 679 true, resp_msg, 680 HCLGEVF_RSS_MBX_RESP_LEN); 681 if (ret) { 682 dev_err(&hdev->pdev->dev, 683 "VF get rss hash key from PF failed, ret=%d", 684 ret); 685 return ret; 686 } 687 688 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 689 if (index == msg_num - 1) 690 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 691 &resp_msg[0], 692 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 693 else 694 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 695 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 696 } 697 698 return 0; 699 } 700 701 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 702 u8 *hfunc) 703 { 704 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 705 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 706 int i, ret; 707 708 if (handle->pdev->revision >= 0x21) { 709 /* Get hash algorithm */ 710 if (hfunc) { 711 switch (rss_cfg->hash_algo) { 712 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 713 *hfunc = ETH_RSS_HASH_TOP; 714 break; 715 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 716 *hfunc = ETH_RSS_HASH_XOR; 717 break; 718 default: 719 *hfunc = ETH_RSS_HASH_UNKNOWN; 720 break; 721 } 722 } 723 724 /* Get the RSS Key required by the user */ 725 if (key) 726 memcpy(key, rss_cfg->rss_hash_key, 727 HCLGEVF_RSS_KEY_SIZE); 728 } else { 729 if (hfunc) 730 *hfunc = ETH_RSS_HASH_TOP; 731 if (key) { 732 ret = hclgevf_get_rss_hash_key(hdev); 733 if (ret) 734 return ret; 735 memcpy(key, rss_cfg->rss_hash_key, 736 HCLGEVF_RSS_KEY_SIZE); 737 } 738 } 739 740 if (indir) 741 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 742 indir[i] = rss_cfg->rss_indirection_tbl[i]; 743 744 return 0; 745 } 746 747 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 748 const u8 *key, const u8 hfunc) 749 { 750 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 751 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 752 int ret, i; 753 754 if (handle->pdev->revision >= 0x21) { 755 /* Set the RSS Hash Key if specififed by the user */ 756 if (key) { 757 switch (hfunc) { 758 case ETH_RSS_HASH_TOP: 759 rss_cfg->hash_algo = 760 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 761 break; 762 case ETH_RSS_HASH_XOR: 763 rss_cfg->hash_algo = 764 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 765 break; 766 case ETH_RSS_HASH_NO_CHANGE: 767 break; 768 default: 769 return -EINVAL; 770 } 771 772 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 773 key); 774 if (ret) 775 return ret; 776 777 /* Update the shadow RSS key with user specified qids */ 778 memcpy(rss_cfg->rss_hash_key, key, 779 HCLGEVF_RSS_KEY_SIZE); 780 } 781 } 782 783 /* update the shadow RSS table with user specified qids */ 784 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 785 rss_cfg->rss_indirection_tbl[i] = indir[i]; 786 787 /* update the hardware */ 788 return hclgevf_set_rss_indir_table(hdev); 789 } 790 791 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 792 { 793 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 794 795 if (nfc->data & RXH_L4_B_2_3) 796 hash_sets |= HCLGEVF_D_PORT_BIT; 797 else 798 hash_sets &= ~HCLGEVF_D_PORT_BIT; 799 800 if (nfc->data & RXH_IP_SRC) 801 hash_sets |= HCLGEVF_S_IP_BIT; 802 else 803 hash_sets &= ~HCLGEVF_S_IP_BIT; 804 805 if (nfc->data & RXH_IP_DST) 806 hash_sets |= HCLGEVF_D_IP_BIT; 807 else 808 hash_sets &= ~HCLGEVF_D_IP_BIT; 809 810 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 811 hash_sets |= HCLGEVF_V_TAG_BIT; 812 813 return hash_sets; 814 } 815 816 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 817 struct ethtool_rxnfc *nfc) 818 { 819 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 820 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 821 struct hclgevf_rss_input_tuple_cmd *req; 822 struct hclgevf_desc desc; 823 u8 tuple_sets; 824 int ret; 825 826 if (handle->pdev->revision == 0x20) 827 return -EOPNOTSUPP; 828 829 if (nfc->data & 830 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 831 return -EINVAL; 832 833 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 834 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 835 836 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 837 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 838 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 839 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 840 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 841 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 842 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 843 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 844 845 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 846 switch (nfc->flow_type) { 847 case TCP_V4_FLOW: 848 req->ipv4_tcp_en = tuple_sets; 849 break; 850 case TCP_V6_FLOW: 851 req->ipv6_tcp_en = tuple_sets; 852 break; 853 case UDP_V4_FLOW: 854 req->ipv4_udp_en = tuple_sets; 855 break; 856 case UDP_V6_FLOW: 857 req->ipv6_udp_en = tuple_sets; 858 break; 859 case SCTP_V4_FLOW: 860 req->ipv4_sctp_en = tuple_sets; 861 break; 862 case SCTP_V6_FLOW: 863 if ((nfc->data & RXH_L4_B_0_1) || 864 (nfc->data & RXH_L4_B_2_3)) 865 return -EINVAL; 866 867 req->ipv6_sctp_en = tuple_sets; 868 break; 869 case IPV4_FLOW: 870 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 871 break; 872 case IPV6_FLOW: 873 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 874 break; 875 default: 876 return -EINVAL; 877 } 878 879 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 880 if (ret) { 881 dev_err(&hdev->pdev->dev, 882 "Set rss tuple fail, status = %d\n", ret); 883 return ret; 884 } 885 886 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 887 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 888 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 889 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 890 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 891 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 892 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 893 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 894 return 0; 895 } 896 897 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 898 struct ethtool_rxnfc *nfc) 899 { 900 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 901 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 902 u8 tuple_sets; 903 904 if (handle->pdev->revision == 0x20) 905 return -EOPNOTSUPP; 906 907 nfc->data = 0; 908 909 switch (nfc->flow_type) { 910 case TCP_V4_FLOW: 911 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 912 break; 913 case UDP_V4_FLOW: 914 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 915 break; 916 case TCP_V6_FLOW: 917 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 918 break; 919 case UDP_V6_FLOW: 920 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 921 break; 922 case SCTP_V4_FLOW: 923 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 924 break; 925 case SCTP_V6_FLOW: 926 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 927 break; 928 case IPV4_FLOW: 929 case IPV6_FLOW: 930 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 931 break; 932 default: 933 return -EINVAL; 934 } 935 936 if (!tuple_sets) 937 return 0; 938 939 if (tuple_sets & HCLGEVF_D_PORT_BIT) 940 nfc->data |= RXH_L4_B_2_3; 941 if (tuple_sets & HCLGEVF_S_PORT_BIT) 942 nfc->data |= RXH_L4_B_0_1; 943 if (tuple_sets & HCLGEVF_D_IP_BIT) 944 nfc->data |= RXH_IP_DST; 945 if (tuple_sets & HCLGEVF_S_IP_BIT) 946 nfc->data |= RXH_IP_SRC; 947 948 return 0; 949 } 950 951 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 952 struct hclgevf_rss_cfg *rss_cfg) 953 { 954 struct hclgevf_rss_input_tuple_cmd *req; 955 struct hclgevf_desc desc; 956 int ret; 957 958 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 959 960 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 961 962 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 963 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 964 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 965 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 966 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 967 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 968 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 969 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 970 971 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 972 if (ret) 973 dev_err(&hdev->pdev->dev, 974 "Configure rss input fail, status = %d\n", ret); 975 return ret; 976 } 977 978 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 979 { 980 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 981 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 982 983 return rss_cfg->rss_size; 984 } 985 986 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 987 int vector_id, 988 struct hnae3_ring_chain_node *ring_chain) 989 { 990 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 991 struct hnae3_ring_chain_node *node; 992 struct hclge_mbx_vf_to_pf_cmd *req; 993 struct hclgevf_desc desc; 994 int i = 0; 995 int status; 996 u8 type; 997 998 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 999 1000 for (node = ring_chain; node; node = node->next) { 1001 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1002 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 1003 1004 if (i == 0) { 1005 hclgevf_cmd_setup_basic_desc(&desc, 1006 HCLGEVF_OPC_MBX_VF_TO_PF, 1007 false); 1008 type = en ? 1009 HCLGE_MBX_MAP_RING_TO_VECTOR : 1010 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1011 req->msg[0] = type; 1012 req->msg[1] = vector_id; 1013 } 1014 1015 req->msg[idx_offset] = 1016 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1017 req->msg[idx_offset + 1] = node->tqp_index; 1018 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 1019 HNAE3_RING_GL_IDX_M, 1020 HNAE3_RING_GL_IDX_S); 1021 1022 i++; 1023 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 1024 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 1025 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 1026 !node->next) { 1027 req->msg[2] = i; 1028 1029 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1030 if (status) { 1031 dev_err(&hdev->pdev->dev, 1032 "Map TQP fail, status is %d.\n", 1033 status); 1034 return status; 1035 } 1036 i = 0; 1037 hclgevf_cmd_setup_basic_desc(&desc, 1038 HCLGEVF_OPC_MBX_VF_TO_PF, 1039 false); 1040 req->msg[0] = type; 1041 req->msg[1] = vector_id; 1042 } 1043 } 1044 1045 return 0; 1046 } 1047 1048 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1049 struct hnae3_ring_chain_node *ring_chain) 1050 { 1051 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1052 int vector_id; 1053 1054 vector_id = hclgevf_get_vector_index(hdev, vector); 1055 if (vector_id < 0) { 1056 dev_err(&handle->pdev->dev, 1057 "Get vector index fail. ret =%d\n", vector_id); 1058 return vector_id; 1059 } 1060 1061 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1062 } 1063 1064 static int hclgevf_unmap_ring_from_vector( 1065 struct hnae3_handle *handle, 1066 int vector, 1067 struct hnae3_ring_chain_node *ring_chain) 1068 { 1069 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1070 int ret, vector_id; 1071 1072 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1073 return 0; 1074 1075 vector_id = hclgevf_get_vector_index(hdev, vector); 1076 if (vector_id < 0) { 1077 dev_err(&handle->pdev->dev, 1078 "Get vector index fail. ret =%d\n", vector_id); 1079 return vector_id; 1080 } 1081 1082 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1083 if (ret) 1084 dev_err(&handle->pdev->dev, 1085 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1086 vector_id, 1087 ret); 1088 1089 return ret; 1090 } 1091 1092 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1093 { 1094 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1095 int vector_id; 1096 1097 vector_id = hclgevf_get_vector_index(hdev, vector); 1098 if (vector_id < 0) { 1099 dev_err(&handle->pdev->dev, 1100 "hclgevf_put_vector get vector index fail. ret =%d\n", 1101 vector_id); 1102 return vector_id; 1103 } 1104 1105 hclgevf_free_vector(hdev, vector_id); 1106 1107 return 0; 1108 } 1109 1110 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1111 bool en_bc_pmc) 1112 { 1113 struct hclge_mbx_vf_to_pf_cmd *req; 1114 struct hclgevf_desc desc; 1115 int ret; 1116 1117 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 1118 1119 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 1120 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 1121 req->msg[1] = en_bc_pmc ? 1 : 0; 1122 1123 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1124 if (ret) 1125 dev_err(&hdev->pdev->dev, 1126 "Set promisc mode fail, status is %d.\n", ret); 1127 1128 return ret; 1129 } 1130 1131 static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc) 1132 { 1133 return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc); 1134 } 1135 1136 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 1137 int stream_id, bool enable) 1138 { 1139 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1140 struct hclgevf_desc desc; 1141 int status; 1142 1143 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1144 1145 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1146 false); 1147 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1148 req->stream_id = cpu_to_le16(stream_id); 1149 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 1150 1151 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1152 if (status) 1153 dev_err(&hdev->pdev->dev, 1154 "TQP enable fail, status =%d.\n", status); 1155 1156 return status; 1157 } 1158 1159 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1160 { 1161 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1162 struct hclgevf_tqp *tqp; 1163 int i; 1164 1165 for (i = 0; i < kinfo->num_tqps; i++) { 1166 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1167 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1168 } 1169 } 1170 1171 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1172 { 1173 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1174 1175 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1176 } 1177 1178 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1179 bool is_first) 1180 { 1181 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1182 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1183 u8 *new_mac_addr = (u8 *)p; 1184 u8 msg_data[ETH_ALEN * 2]; 1185 u16 subcode; 1186 int status; 1187 1188 ether_addr_copy(msg_data, new_mac_addr); 1189 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 1190 1191 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 1192 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1193 1194 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1195 subcode, msg_data, ETH_ALEN * 2, 1196 true, NULL, 0); 1197 if (!status) 1198 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1199 1200 return status; 1201 } 1202 1203 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1204 const unsigned char *addr) 1205 { 1206 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1207 1208 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1209 HCLGE_MBX_MAC_VLAN_UC_ADD, 1210 addr, ETH_ALEN, false, NULL, 0); 1211 } 1212 1213 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1214 const unsigned char *addr) 1215 { 1216 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1217 1218 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1219 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1220 addr, ETH_ALEN, false, NULL, 0); 1221 } 1222 1223 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1224 const unsigned char *addr) 1225 { 1226 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1227 1228 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1229 HCLGE_MBX_MAC_VLAN_MC_ADD, 1230 addr, ETH_ALEN, false, NULL, 0); 1231 } 1232 1233 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1234 const unsigned char *addr) 1235 { 1236 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1237 1238 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1239 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1240 addr, ETH_ALEN, false, NULL, 0); 1241 } 1242 1243 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1244 __be16 proto, u16 vlan_id, 1245 bool is_kill) 1246 { 1247 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1248 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1249 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1250 1251 if (vlan_id > 4095) 1252 return -EINVAL; 1253 1254 if (proto != htons(ETH_P_8021Q)) 1255 return -EPROTONOSUPPORT; 1256 1257 msg_data[0] = is_kill; 1258 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1259 memcpy(&msg_data[3], &proto, sizeof(proto)); 1260 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1261 HCLGE_MBX_VLAN_FILTER, msg_data, 1262 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 1263 } 1264 1265 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1266 { 1267 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1268 u8 msg_data; 1269 1270 msg_data = enable ? 1 : 0; 1271 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1272 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1273 1, false, NULL, 0); 1274 } 1275 1276 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1277 { 1278 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1279 u8 msg_data[2]; 1280 int ret; 1281 1282 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 1283 1284 /* disable vf queue before send queue reset msg to PF */ 1285 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1286 if (ret) 1287 return ret; 1288 1289 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1290 2, true, NULL, 0); 1291 } 1292 1293 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1294 { 1295 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1296 1297 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu, 1298 sizeof(new_mtu), true, NULL, 0); 1299 } 1300 1301 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1302 enum hnae3_reset_notify_type type) 1303 { 1304 struct hnae3_client *client = hdev->nic_client; 1305 struct hnae3_handle *handle = &hdev->nic; 1306 int ret; 1307 1308 if (!client->ops->reset_notify) 1309 return -EOPNOTSUPP; 1310 1311 ret = client->ops->reset_notify(handle, type); 1312 if (ret) 1313 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1314 type, ret); 1315 1316 return ret; 1317 } 1318 1319 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 1320 { 1321 struct hclgevf_dev *hdev = ae_dev->priv; 1322 1323 set_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1324 } 1325 1326 static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev, 1327 unsigned long delay_us, 1328 unsigned long wait_cnt) 1329 { 1330 unsigned long cnt = 0; 1331 1332 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && 1333 cnt++ < wait_cnt) 1334 usleep_range(delay_us, delay_us * 2); 1335 1336 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { 1337 dev_err(&hdev->pdev->dev, 1338 "flr wait timeout\n"); 1339 return -ETIMEDOUT; 1340 } 1341 1342 return 0; 1343 } 1344 1345 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1346 { 1347 #define HCLGEVF_RESET_WAIT_US 20000 1348 #define HCLGEVF_RESET_WAIT_CNT 2000 1349 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1350 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1351 1352 u32 val; 1353 int ret; 1354 1355 /* wait to check the hardware reset completion status */ 1356 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1357 dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val); 1358 1359 if (hdev->reset_type == HNAE3_FLR_RESET) 1360 return hclgevf_flr_poll_timeout(hdev, 1361 HCLGEVF_RESET_WAIT_US, 1362 HCLGEVF_RESET_WAIT_CNT); 1363 1364 ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val, 1365 !(val & HCLGEVF_RST_ING_BITS), 1366 HCLGEVF_RESET_WAIT_US, 1367 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1368 1369 /* hardware completion status should be available by this time */ 1370 if (ret) { 1371 dev_err(&hdev->pdev->dev, 1372 "could'nt get reset done status from h/w, timeout!\n"); 1373 return ret; 1374 } 1375 1376 /* we will wait a bit more to let reset of the stack to complete. This 1377 * might happen in case reset assertion was made by PF. Yes, this also 1378 * means we might end up waiting bit more even for VF reset. 1379 */ 1380 msleep(5000); 1381 1382 return 0; 1383 } 1384 1385 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1386 { 1387 int ret; 1388 1389 /* uninitialize the nic client */ 1390 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1391 if (ret) 1392 return ret; 1393 1394 /* re-initialize the hclge device */ 1395 ret = hclgevf_reset_hdev(hdev); 1396 if (ret) { 1397 dev_err(&hdev->pdev->dev, 1398 "hclge device re-init failed, VF is disabled!\n"); 1399 return ret; 1400 } 1401 1402 /* bring up the nic client again */ 1403 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1404 if (ret) 1405 return ret; 1406 1407 return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); 1408 } 1409 1410 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1411 { 1412 int ret = 0; 1413 1414 switch (hdev->reset_type) { 1415 case HNAE3_VF_FUNC_RESET: 1416 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1417 0, true, NULL, sizeof(u8)); 1418 hdev->rst_stats.vf_func_rst_cnt++; 1419 break; 1420 case HNAE3_FLR_RESET: 1421 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1422 hdev->rst_stats.flr_rst_cnt++; 1423 break; 1424 default: 1425 break; 1426 } 1427 1428 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1429 1430 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1431 hdev->reset_type, ret); 1432 1433 return ret; 1434 } 1435 1436 static int hclgevf_reset(struct hclgevf_dev *hdev) 1437 { 1438 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1439 int ret; 1440 1441 /* Initialize ae_dev reset status as well, in case enet layer wants to 1442 * know if device is undergoing reset 1443 */ 1444 ae_dev->reset_type = hdev->reset_type; 1445 hdev->rst_stats.rst_cnt++; 1446 rtnl_lock(); 1447 1448 /* bring down the nic to stop any ongoing TX/RX */ 1449 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1450 if (ret) 1451 goto err_reset_lock; 1452 1453 rtnl_unlock(); 1454 1455 ret = hclgevf_reset_prepare_wait(hdev); 1456 if (ret) 1457 goto err_reset; 1458 1459 /* check if VF could successfully fetch the hardware reset completion 1460 * status from the hardware 1461 */ 1462 ret = hclgevf_reset_wait(hdev); 1463 if (ret) { 1464 /* can't do much in this situation, will disable VF */ 1465 dev_err(&hdev->pdev->dev, 1466 "VF failed(=%d) to fetch H/W reset completion status\n", 1467 ret); 1468 goto err_reset; 1469 } 1470 1471 hdev->rst_stats.hw_rst_done_cnt++; 1472 1473 rtnl_lock(); 1474 1475 /* now, re-initialize the nic client and ae device*/ 1476 ret = hclgevf_reset_stack(hdev); 1477 if (ret) { 1478 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1479 goto err_reset_lock; 1480 } 1481 1482 /* bring up the nic to enable TX/RX again */ 1483 ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1484 if (ret) 1485 goto err_reset_lock; 1486 1487 rtnl_unlock(); 1488 1489 hdev->last_reset_time = jiffies; 1490 ae_dev->reset_type = HNAE3_NONE_RESET; 1491 hdev->rst_stats.rst_done_cnt++; 1492 1493 return ret; 1494 err_reset_lock: 1495 rtnl_unlock(); 1496 err_reset: 1497 /* When VF reset failed, only the higher level reset asserted by PF 1498 * can restore it, so re-initialize the command queue to receive 1499 * this higher reset event. 1500 */ 1501 hclgevf_cmd_init(hdev); 1502 dev_err(&hdev->pdev->dev, "failed to reset VF\n"); 1503 if (hclgevf_is_reset_pending(hdev)) 1504 hclgevf_reset_task_schedule(hdev); 1505 1506 return ret; 1507 } 1508 1509 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1510 unsigned long *addr) 1511 { 1512 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1513 1514 /* return the highest priority reset level amongst all */ 1515 if (test_bit(HNAE3_VF_RESET, addr)) { 1516 rst_level = HNAE3_VF_RESET; 1517 clear_bit(HNAE3_VF_RESET, addr); 1518 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1519 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1520 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1521 rst_level = HNAE3_VF_FULL_RESET; 1522 clear_bit(HNAE3_VF_FULL_RESET, addr); 1523 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1524 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1525 rst_level = HNAE3_VF_PF_FUNC_RESET; 1526 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1527 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1528 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1529 rst_level = HNAE3_VF_FUNC_RESET; 1530 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1531 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1532 rst_level = HNAE3_FLR_RESET; 1533 clear_bit(HNAE3_FLR_RESET, addr); 1534 } 1535 1536 return rst_level; 1537 } 1538 1539 static void hclgevf_reset_event(struct pci_dev *pdev, 1540 struct hnae3_handle *handle) 1541 { 1542 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1543 struct hclgevf_dev *hdev = ae_dev->priv; 1544 1545 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1546 1547 if (hdev->default_reset_request) 1548 hdev->reset_level = 1549 hclgevf_get_reset_level(hdev, 1550 &hdev->default_reset_request); 1551 else 1552 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1553 1554 /* reset of this VF requested */ 1555 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1556 hclgevf_reset_task_schedule(hdev); 1557 1558 hdev->last_reset_time = jiffies; 1559 } 1560 1561 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1562 enum hnae3_reset_type rst_type) 1563 { 1564 struct hclgevf_dev *hdev = ae_dev->priv; 1565 1566 set_bit(rst_type, &hdev->default_reset_request); 1567 } 1568 1569 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1570 { 1571 #define HCLGEVF_FLR_WAIT_MS 100 1572 #define HCLGEVF_FLR_WAIT_CNT 50 1573 struct hclgevf_dev *hdev = ae_dev->priv; 1574 int cnt = 0; 1575 1576 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1577 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1578 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); 1579 hclgevf_reset_event(hdev->pdev, NULL); 1580 1581 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && 1582 cnt++ < HCLGEVF_FLR_WAIT_CNT) 1583 msleep(HCLGEVF_FLR_WAIT_MS); 1584 1585 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) 1586 dev_err(&hdev->pdev->dev, 1587 "flr wait down timeout: %d\n", cnt); 1588 } 1589 1590 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1591 { 1592 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1593 1594 return hdev->fw_version; 1595 } 1596 1597 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1598 { 1599 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1600 1601 vector->vector_irq = pci_irq_vector(hdev->pdev, 1602 HCLGEVF_MISC_VECTOR_NUM); 1603 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1604 /* vector status always valid for Vector 0 */ 1605 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1606 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1607 1608 hdev->num_msi_left -= 1; 1609 hdev->num_msi_used += 1; 1610 } 1611 1612 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1613 { 1614 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) { 1615 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1616 schedule_work(&hdev->rst_service_task); 1617 } 1618 } 1619 1620 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1621 { 1622 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1623 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1624 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1625 schedule_work(&hdev->mbx_service_task); 1626 } 1627 } 1628 1629 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1630 { 1631 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1632 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1633 schedule_work(&hdev->service_task); 1634 } 1635 1636 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1637 { 1638 /* if we have any pending mailbox event then schedule the mbx task */ 1639 if (hdev->mbx_event_pending) 1640 hclgevf_mbx_task_schedule(hdev); 1641 1642 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1643 hclgevf_reset_task_schedule(hdev); 1644 } 1645 1646 static void hclgevf_service_timer(struct timer_list *t) 1647 { 1648 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1649 1650 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1651 1652 hclgevf_task_schedule(hdev); 1653 } 1654 1655 static void hclgevf_reset_service_task(struct work_struct *work) 1656 { 1657 struct hclgevf_dev *hdev = 1658 container_of(work, struct hclgevf_dev, rst_service_task); 1659 int ret; 1660 1661 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1662 return; 1663 1664 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1665 1666 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1667 &hdev->reset_state)) { 1668 /* PF has initmated that it is about to reset the hardware. 1669 * We now have to poll & check if harware has actually completed 1670 * the reset sequence. On hardware reset completion, VF needs to 1671 * reset the client and ae device. 1672 */ 1673 hdev->reset_attempts = 0; 1674 1675 hdev->last_reset_time = jiffies; 1676 while ((hdev->reset_type = 1677 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 1678 != HNAE3_NONE_RESET) { 1679 ret = hclgevf_reset(hdev); 1680 if (ret) 1681 dev_err(&hdev->pdev->dev, 1682 "VF stack reset failed %d.\n", ret); 1683 } 1684 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1685 &hdev->reset_state)) { 1686 /* we could be here when either of below happens: 1687 * 1. reset was initiated due to watchdog timeout due to 1688 * a. IMP was earlier reset and our TX got choked down and 1689 * which resulted in watchdog reacting and inducing VF 1690 * reset. This also means our cmdq would be unreliable. 1691 * b. problem in TX due to other lower layer(example link 1692 * layer not functioning properly etc.) 1693 * 2. VF reset might have been initiated due to some config 1694 * change. 1695 * 1696 * NOTE: Theres no clear way to detect above cases than to react 1697 * to the response of PF for this reset request. PF will ack the 1698 * 1b and 2. cases but we will not get any intimation about 1a 1699 * from PF as cmdq would be in unreliable state i.e. mailbox 1700 * communication between PF and VF would be broken. 1701 */ 1702 1703 /* if we are never geting into pending state it means either: 1704 * 1. PF is not receiving our request which could be due to IMP 1705 * reset 1706 * 2. PF is screwed 1707 * We cannot do much for 2. but to check first we can try reset 1708 * our PCIe + stack and see if it alleviates the problem. 1709 */ 1710 if (hdev->reset_attempts > 3) { 1711 /* prepare for full reset of stack + pcie interface */ 1712 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1713 1714 /* "defer" schedule the reset task again */ 1715 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1716 } else { 1717 hdev->reset_attempts++; 1718 1719 set_bit(hdev->reset_level, &hdev->reset_pending); 1720 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1721 } 1722 hclgevf_reset_task_schedule(hdev); 1723 } 1724 1725 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1726 } 1727 1728 static void hclgevf_mailbox_service_task(struct work_struct *work) 1729 { 1730 struct hclgevf_dev *hdev; 1731 1732 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1733 1734 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1735 return; 1736 1737 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1738 1739 hclgevf_mbx_async_handler(hdev); 1740 1741 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1742 } 1743 1744 static void hclgevf_keep_alive_timer(struct timer_list *t) 1745 { 1746 struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); 1747 1748 schedule_work(&hdev->keep_alive_task); 1749 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 1750 } 1751 1752 static void hclgevf_keep_alive_task(struct work_struct *work) 1753 { 1754 struct hclgevf_dev *hdev; 1755 u8 respmsg; 1756 int ret; 1757 1758 hdev = container_of(work, struct hclgevf_dev, keep_alive_task); 1759 1760 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1761 return; 1762 1763 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, 1764 0, false, &respmsg, sizeof(u8)); 1765 if (ret) 1766 dev_err(&hdev->pdev->dev, 1767 "VF sends keep alive cmd failed(=%d)\n", ret); 1768 } 1769 1770 static void hclgevf_service_task(struct work_struct *work) 1771 { 1772 struct hclgevf_dev *hdev; 1773 1774 hdev = container_of(work, struct hclgevf_dev, service_task); 1775 1776 /* request the link status from the PF. PF would be able to tell VF 1777 * about such updates in future so we might remove this later 1778 */ 1779 hclgevf_request_link_info(hdev); 1780 1781 hclgevf_update_link_mode(hdev); 1782 1783 hclgevf_deferred_task_schedule(hdev); 1784 1785 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1786 } 1787 1788 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1789 { 1790 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1791 } 1792 1793 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1794 u32 *clearval) 1795 { 1796 u32 cmdq_src_reg, rst_ing_reg; 1797 1798 /* fetch the events from their corresponding regs */ 1799 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1800 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1801 1802 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) { 1803 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1804 dev_info(&hdev->pdev->dev, 1805 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1806 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1807 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1808 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1809 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); 1810 *clearval = cmdq_src_reg; 1811 hdev->rst_stats.vf_rst_cnt++; 1812 return HCLGEVF_VECTOR0_EVENT_RST; 1813 } 1814 1815 /* check for vector0 mailbox(=CMDQ RX) event source */ 1816 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1817 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1818 *clearval = cmdq_src_reg; 1819 return HCLGEVF_VECTOR0_EVENT_MBX; 1820 } 1821 1822 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1823 1824 return HCLGEVF_VECTOR0_EVENT_OTHER; 1825 } 1826 1827 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1828 { 1829 writel(en ? 1 : 0, vector->addr); 1830 } 1831 1832 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1833 { 1834 enum hclgevf_evt_cause event_cause; 1835 struct hclgevf_dev *hdev = data; 1836 u32 clearval; 1837 1838 hclgevf_enable_vector(&hdev->misc_vector, false); 1839 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 1840 1841 switch (event_cause) { 1842 case HCLGEVF_VECTOR0_EVENT_RST: 1843 hclgevf_reset_task_schedule(hdev); 1844 break; 1845 case HCLGEVF_VECTOR0_EVENT_MBX: 1846 hclgevf_mbx_handler(hdev); 1847 break; 1848 default: 1849 break; 1850 } 1851 1852 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 1853 hclgevf_clear_event_cause(hdev, clearval); 1854 hclgevf_enable_vector(&hdev->misc_vector, true); 1855 } 1856 1857 return IRQ_HANDLED; 1858 } 1859 1860 static int hclgevf_configure(struct hclgevf_dev *hdev) 1861 { 1862 int ret; 1863 1864 /* get current port based vlan state from PF */ 1865 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 1866 if (ret) 1867 return ret; 1868 1869 /* get queue configuration from PF */ 1870 ret = hclgevf_get_queue_info(hdev); 1871 if (ret) 1872 return ret; 1873 1874 /* get queue depth info from PF */ 1875 ret = hclgevf_get_queue_depth(hdev); 1876 if (ret) 1877 return ret; 1878 1879 ret = hclgevf_get_pf_media_type(hdev); 1880 if (ret) 1881 return ret; 1882 1883 /* get tc configuration from PF */ 1884 return hclgevf_get_tc_info(hdev); 1885 } 1886 1887 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1888 { 1889 struct pci_dev *pdev = ae_dev->pdev; 1890 struct hclgevf_dev *hdev; 1891 1892 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1893 if (!hdev) 1894 return -ENOMEM; 1895 1896 hdev->pdev = pdev; 1897 hdev->ae_dev = ae_dev; 1898 ae_dev->priv = hdev; 1899 1900 return 0; 1901 } 1902 1903 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1904 { 1905 struct hnae3_handle *roce = &hdev->roce; 1906 struct hnae3_handle *nic = &hdev->nic; 1907 1908 roce->rinfo.num_vectors = hdev->num_roce_msix; 1909 1910 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1911 hdev->num_msi_left == 0) 1912 return -EINVAL; 1913 1914 roce->rinfo.base_vector = hdev->roce_base_vector; 1915 1916 roce->rinfo.netdev = nic->kinfo.netdev; 1917 roce->rinfo.roce_io_base = hdev->hw.io_base; 1918 1919 roce->pdev = nic->pdev; 1920 roce->ae_algo = nic->ae_algo; 1921 roce->numa_node_mask = nic->numa_node_mask; 1922 1923 return 0; 1924 } 1925 1926 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 1927 { 1928 struct hclgevf_cfg_gro_status_cmd *req; 1929 struct hclgevf_desc desc; 1930 int ret; 1931 1932 if (!hnae3_dev_gro_supported(hdev)) 1933 return 0; 1934 1935 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 1936 false); 1937 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 1938 1939 req->gro_en = cpu_to_le16(en ? 1 : 0); 1940 1941 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1942 if (ret) 1943 dev_err(&hdev->pdev->dev, 1944 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 1945 1946 return ret; 1947 } 1948 1949 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1950 { 1951 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1952 int i, ret; 1953 1954 rss_cfg->rss_size = hdev->rss_size_max; 1955 1956 if (hdev->pdev->revision >= 0x21) { 1957 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 1958 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 1959 HCLGEVF_RSS_KEY_SIZE); 1960 1961 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 1962 rss_cfg->rss_hash_key); 1963 if (ret) 1964 return ret; 1965 1966 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 1967 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1968 rss_cfg->rss_tuple_sets.ipv4_udp_en = 1969 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1970 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 1971 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1972 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 1973 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1974 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 1975 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1976 rss_cfg->rss_tuple_sets.ipv6_udp_en = 1977 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1978 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 1979 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1980 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 1981 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1982 1983 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 1984 if (ret) 1985 return ret; 1986 1987 } 1988 1989 /* Initialize RSS indirect table for each vport */ 1990 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1991 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1992 1993 ret = hclgevf_set_rss_indir_table(hdev); 1994 if (ret) 1995 return ret; 1996 1997 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1998 } 1999 2000 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2001 { 2002 /* other vlan config(like, VLAN TX/RX offload) would also be added 2003 * here later 2004 */ 2005 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2006 false); 2007 } 2008 2009 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2010 { 2011 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2012 2013 if (enable) { 2014 mod_timer(&hdev->service_timer, jiffies + HZ); 2015 } else { 2016 del_timer_sync(&hdev->service_timer); 2017 cancel_work_sync(&hdev->service_task); 2018 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 2019 } 2020 } 2021 2022 static int hclgevf_ae_start(struct hnae3_handle *handle) 2023 { 2024 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2025 2026 /* reset tqp stats */ 2027 hclgevf_reset_tqp_stats(handle); 2028 2029 hclgevf_request_link_info(hdev); 2030 2031 hclgevf_update_link_mode(hdev); 2032 2033 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2034 2035 return 0; 2036 } 2037 2038 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2039 { 2040 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2041 int i; 2042 2043 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2044 2045 for (i = 0; i < handle->kinfo.num_tqps; i++) 2046 hclgevf_reset_tqp(handle, i); 2047 2048 /* reset tqp stats */ 2049 hclgevf_reset_tqp_stats(handle); 2050 hclgevf_update_link_status(hdev, 0); 2051 } 2052 2053 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2054 { 2055 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2056 u8 msg_data; 2057 2058 msg_data = alive ? 1 : 0; 2059 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE, 2060 0, &msg_data, 1, false, NULL, 0); 2061 } 2062 2063 static int hclgevf_client_start(struct hnae3_handle *handle) 2064 { 2065 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2066 int ret; 2067 2068 ret = hclgevf_set_alive(handle, true); 2069 if (ret) 2070 return ret; 2071 2072 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 2073 2074 return 0; 2075 } 2076 2077 static void hclgevf_client_stop(struct hnae3_handle *handle) 2078 { 2079 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2080 int ret; 2081 2082 ret = hclgevf_set_alive(handle, false); 2083 if (ret) 2084 dev_warn(&hdev->pdev->dev, 2085 "%s failed %d\n", __func__, ret); 2086 2087 del_timer_sync(&hdev->keep_alive_timer); 2088 cancel_work_sync(&hdev->keep_alive_task); 2089 } 2090 2091 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2092 { 2093 /* setup tasks for the MBX */ 2094 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 2095 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2096 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2097 2098 /* setup tasks for service timer */ 2099 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 2100 2101 INIT_WORK(&hdev->service_task, hclgevf_service_task); 2102 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 2103 2104 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 2105 2106 mutex_init(&hdev->mbx_resp.mbx_mutex); 2107 2108 /* bring the device down */ 2109 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2110 } 2111 2112 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2113 { 2114 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2115 2116 if (hdev->keep_alive_timer.function) 2117 del_timer_sync(&hdev->keep_alive_timer); 2118 if (hdev->keep_alive_task.func) 2119 cancel_work_sync(&hdev->keep_alive_task); 2120 if (hdev->service_timer.function) 2121 del_timer_sync(&hdev->service_timer); 2122 if (hdev->service_task.func) 2123 cancel_work_sync(&hdev->service_task); 2124 if (hdev->mbx_service_task.func) 2125 cancel_work_sync(&hdev->mbx_service_task); 2126 if (hdev->rst_service_task.func) 2127 cancel_work_sync(&hdev->rst_service_task); 2128 2129 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2130 } 2131 2132 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2133 { 2134 struct pci_dev *pdev = hdev->pdev; 2135 int vectors; 2136 int i; 2137 2138 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 2139 vectors = pci_alloc_irq_vectors(pdev, 2140 hdev->roce_base_msix_offset + 1, 2141 hdev->num_msi, 2142 PCI_IRQ_MSIX); 2143 else 2144 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 2145 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2146 2147 if (vectors < 0) { 2148 dev_err(&pdev->dev, 2149 "failed(%d) to allocate MSI/MSI-X vectors\n", 2150 vectors); 2151 return vectors; 2152 } 2153 if (vectors < hdev->num_msi) 2154 dev_warn(&hdev->pdev->dev, 2155 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2156 hdev->num_msi, vectors); 2157 2158 hdev->num_msi = vectors; 2159 hdev->num_msi_left = vectors; 2160 hdev->base_msi_vector = pdev->irq; 2161 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2162 2163 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2164 sizeof(u16), GFP_KERNEL); 2165 if (!hdev->vector_status) { 2166 pci_free_irq_vectors(pdev); 2167 return -ENOMEM; 2168 } 2169 2170 for (i = 0; i < hdev->num_msi; i++) 2171 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2172 2173 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2174 sizeof(int), GFP_KERNEL); 2175 if (!hdev->vector_irq) { 2176 devm_kfree(&pdev->dev, hdev->vector_status); 2177 pci_free_irq_vectors(pdev); 2178 return -ENOMEM; 2179 } 2180 2181 return 0; 2182 } 2183 2184 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2185 { 2186 struct pci_dev *pdev = hdev->pdev; 2187 2188 devm_kfree(&pdev->dev, hdev->vector_status); 2189 devm_kfree(&pdev->dev, hdev->vector_irq); 2190 pci_free_irq_vectors(pdev); 2191 } 2192 2193 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2194 { 2195 int ret = 0; 2196 2197 hclgevf_get_misc_vector(hdev); 2198 2199 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2200 0, "hclgevf_cmd", hdev); 2201 if (ret) { 2202 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2203 hdev->misc_vector.vector_irq); 2204 return ret; 2205 } 2206 2207 hclgevf_clear_event_cause(hdev, 0); 2208 2209 /* enable misc. vector(vector 0) */ 2210 hclgevf_enable_vector(&hdev->misc_vector, true); 2211 2212 return ret; 2213 } 2214 2215 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2216 { 2217 /* disable misc vector(vector 0) */ 2218 hclgevf_enable_vector(&hdev->misc_vector, false); 2219 synchronize_irq(hdev->misc_vector.vector_irq); 2220 free_irq(hdev->misc_vector.vector_irq, hdev); 2221 hclgevf_free_vector(hdev, 0); 2222 } 2223 2224 static int hclgevf_init_client_instance(struct hnae3_client *client, 2225 struct hnae3_ae_dev *ae_dev) 2226 { 2227 struct hclgevf_dev *hdev = ae_dev->priv; 2228 int ret; 2229 2230 switch (client->type) { 2231 case HNAE3_CLIENT_KNIC: 2232 hdev->nic_client = client; 2233 hdev->nic.client = client; 2234 2235 ret = client->ops->init_instance(&hdev->nic); 2236 if (ret) 2237 goto clear_nic; 2238 2239 hnae3_set_client_init_flag(client, ae_dev, 1); 2240 2241 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 2242 struct hnae3_client *rc = hdev->roce_client; 2243 2244 ret = hclgevf_init_roce_base_info(hdev); 2245 if (ret) 2246 goto clear_roce; 2247 ret = rc->ops->init_instance(&hdev->roce); 2248 if (ret) 2249 goto clear_roce; 2250 2251 hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 2252 1); 2253 } 2254 break; 2255 case HNAE3_CLIENT_UNIC: 2256 hdev->nic_client = client; 2257 hdev->nic.client = client; 2258 2259 ret = client->ops->init_instance(&hdev->nic); 2260 if (ret) 2261 goto clear_nic; 2262 2263 hnae3_set_client_init_flag(client, ae_dev, 1); 2264 break; 2265 case HNAE3_CLIENT_ROCE: 2266 if (hnae3_dev_roce_supported(hdev)) { 2267 hdev->roce_client = client; 2268 hdev->roce.client = client; 2269 } 2270 2271 if (hdev->roce_client && hdev->nic_client) { 2272 ret = hclgevf_init_roce_base_info(hdev); 2273 if (ret) 2274 goto clear_roce; 2275 2276 ret = client->ops->init_instance(&hdev->roce); 2277 if (ret) 2278 goto clear_roce; 2279 } 2280 2281 hnae3_set_client_init_flag(client, ae_dev, 1); 2282 break; 2283 default: 2284 return -EINVAL; 2285 } 2286 2287 return 0; 2288 2289 clear_nic: 2290 hdev->nic_client = NULL; 2291 hdev->nic.client = NULL; 2292 return ret; 2293 clear_roce: 2294 hdev->roce_client = NULL; 2295 hdev->roce.client = NULL; 2296 return ret; 2297 } 2298 2299 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2300 struct hnae3_ae_dev *ae_dev) 2301 { 2302 struct hclgevf_dev *hdev = ae_dev->priv; 2303 2304 /* un-init roce, if it exists */ 2305 if (hdev->roce_client) { 2306 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2307 hdev->roce_client = NULL; 2308 hdev->roce.client = NULL; 2309 } 2310 2311 /* un-init nic/unic, if this was not called by roce client */ 2312 if (client->ops->uninit_instance && hdev->nic_client && 2313 client->type != HNAE3_CLIENT_ROCE) { 2314 client->ops->uninit_instance(&hdev->nic, 0); 2315 hdev->nic_client = NULL; 2316 hdev->nic.client = NULL; 2317 } 2318 } 2319 2320 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2321 { 2322 struct pci_dev *pdev = hdev->pdev; 2323 struct hclgevf_hw *hw; 2324 int ret; 2325 2326 ret = pci_enable_device(pdev); 2327 if (ret) { 2328 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2329 return ret; 2330 } 2331 2332 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2333 if (ret) { 2334 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2335 goto err_disable_device; 2336 } 2337 2338 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2339 if (ret) { 2340 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2341 goto err_disable_device; 2342 } 2343 2344 pci_set_master(pdev); 2345 hw = &hdev->hw; 2346 hw->hdev = hdev; 2347 hw->io_base = pci_iomap(pdev, 2, 0); 2348 if (!hw->io_base) { 2349 dev_err(&pdev->dev, "can't map configuration register space\n"); 2350 ret = -ENOMEM; 2351 goto err_clr_master; 2352 } 2353 2354 return 0; 2355 2356 err_clr_master: 2357 pci_clear_master(pdev); 2358 pci_release_regions(pdev); 2359 err_disable_device: 2360 pci_disable_device(pdev); 2361 2362 return ret; 2363 } 2364 2365 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2366 { 2367 struct pci_dev *pdev = hdev->pdev; 2368 2369 pci_iounmap(pdev, hdev->hw.io_base); 2370 pci_clear_master(pdev); 2371 pci_release_regions(pdev); 2372 pci_disable_device(pdev); 2373 } 2374 2375 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2376 { 2377 struct hclgevf_query_res_cmd *req; 2378 struct hclgevf_desc desc; 2379 int ret; 2380 2381 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2382 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2383 if (ret) { 2384 dev_err(&hdev->pdev->dev, 2385 "query vf resource failed, ret = %d.\n", ret); 2386 return ret; 2387 } 2388 2389 req = (struct hclgevf_query_res_cmd *)desc.data; 2390 2391 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 2392 hdev->roce_base_msix_offset = 2393 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 2394 HCLGEVF_MSIX_OFT_ROCEE_M, 2395 HCLGEVF_MSIX_OFT_ROCEE_S); 2396 hdev->num_roce_msix = 2397 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2398 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2399 2400 /* VF should have NIC vectors and Roce vectors, NIC vectors 2401 * are queued before Roce vectors. The offset is fixed to 64. 2402 */ 2403 hdev->num_msi = hdev->num_roce_msix + 2404 hdev->roce_base_msix_offset; 2405 } else { 2406 hdev->num_msi = 2407 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2408 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2409 } 2410 2411 return 0; 2412 } 2413 2414 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2415 { 2416 struct pci_dev *pdev = hdev->pdev; 2417 int ret = 0; 2418 2419 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2420 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2421 hclgevf_misc_irq_uninit(hdev); 2422 hclgevf_uninit_msi(hdev); 2423 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2424 } 2425 2426 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2427 pci_set_master(pdev); 2428 ret = hclgevf_init_msi(hdev); 2429 if (ret) { 2430 dev_err(&pdev->dev, 2431 "failed(%d) to init MSI/MSI-X\n", ret); 2432 return ret; 2433 } 2434 2435 ret = hclgevf_misc_irq_init(hdev); 2436 if (ret) { 2437 hclgevf_uninit_msi(hdev); 2438 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2439 ret); 2440 return ret; 2441 } 2442 2443 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2444 } 2445 2446 return ret; 2447 } 2448 2449 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2450 { 2451 struct pci_dev *pdev = hdev->pdev; 2452 int ret; 2453 2454 ret = hclgevf_pci_reset(hdev); 2455 if (ret) { 2456 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2457 return ret; 2458 } 2459 2460 ret = hclgevf_cmd_init(hdev); 2461 if (ret) { 2462 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2463 return ret; 2464 } 2465 2466 ret = hclgevf_rss_init_hw(hdev); 2467 if (ret) { 2468 dev_err(&hdev->pdev->dev, 2469 "failed(%d) to initialize RSS\n", ret); 2470 return ret; 2471 } 2472 2473 ret = hclgevf_config_gro(hdev, true); 2474 if (ret) 2475 return ret; 2476 2477 ret = hclgevf_init_vlan_config(hdev); 2478 if (ret) { 2479 dev_err(&hdev->pdev->dev, 2480 "failed(%d) to initialize VLAN config\n", ret); 2481 return ret; 2482 } 2483 2484 dev_info(&hdev->pdev->dev, "Reset done\n"); 2485 2486 return 0; 2487 } 2488 2489 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2490 { 2491 struct pci_dev *pdev = hdev->pdev; 2492 int ret; 2493 2494 ret = hclgevf_pci_init(hdev); 2495 if (ret) { 2496 dev_err(&pdev->dev, "PCI initialization failed\n"); 2497 return ret; 2498 } 2499 2500 ret = hclgevf_cmd_queue_init(hdev); 2501 if (ret) { 2502 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); 2503 goto err_cmd_queue_init; 2504 } 2505 2506 ret = hclgevf_cmd_init(hdev); 2507 if (ret) 2508 goto err_cmd_init; 2509 2510 /* Get vf resource */ 2511 ret = hclgevf_query_vf_resource(hdev); 2512 if (ret) { 2513 dev_err(&hdev->pdev->dev, 2514 "Query vf status error, ret = %d.\n", ret); 2515 goto err_cmd_init; 2516 } 2517 2518 ret = hclgevf_init_msi(hdev); 2519 if (ret) { 2520 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2521 goto err_cmd_init; 2522 } 2523 2524 hclgevf_state_init(hdev); 2525 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2526 2527 ret = hclgevf_misc_irq_init(hdev); 2528 if (ret) { 2529 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2530 ret); 2531 goto err_misc_irq_init; 2532 } 2533 2534 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2535 2536 ret = hclgevf_configure(hdev); 2537 if (ret) { 2538 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2539 goto err_config; 2540 } 2541 2542 ret = hclgevf_alloc_tqps(hdev); 2543 if (ret) { 2544 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2545 goto err_config; 2546 } 2547 2548 ret = hclgevf_set_handle_info(hdev); 2549 if (ret) { 2550 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2551 goto err_config; 2552 } 2553 2554 ret = hclgevf_config_gro(hdev, true); 2555 if (ret) 2556 goto err_config; 2557 2558 /* vf is not allowed to enable unicast/multicast promisc mode. 2559 * For revision 0x20, default to disable broadcast promisc mode, 2560 * firmware makes sure broadcast packets can be accepted. 2561 * For revision 0x21, default to enable broadcast promisc mode. 2562 */ 2563 ret = hclgevf_set_promisc_mode(hdev, true); 2564 if (ret) 2565 goto err_config; 2566 2567 /* Initialize RSS for this VF */ 2568 ret = hclgevf_rss_init_hw(hdev); 2569 if (ret) { 2570 dev_err(&hdev->pdev->dev, 2571 "failed(%d) to initialize RSS\n", ret); 2572 goto err_config; 2573 } 2574 2575 ret = hclgevf_init_vlan_config(hdev); 2576 if (ret) { 2577 dev_err(&hdev->pdev->dev, 2578 "failed(%d) to initialize VLAN config\n", ret); 2579 goto err_config; 2580 } 2581 2582 hdev->last_reset_time = jiffies; 2583 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 2584 2585 return 0; 2586 2587 err_config: 2588 hclgevf_misc_irq_uninit(hdev); 2589 err_misc_irq_init: 2590 hclgevf_state_uninit(hdev); 2591 hclgevf_uninit_msi(hdev); 2592 err_cmd_init: 2593 hclgevf_cmd_uninit(hdev); 2594 err_cmd_queue_init: 2595 hclgevf_pci_uninit(hdev); 2596 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2597 return ret; 2598 } 2599 2600 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2601 { 2602 hclgevf_state_uninit(hdev); 2603 2604 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2605 hclgevf_misc_irq_uninit(hdev); 2606 hclgevf_uninit_msi(hdev); 2607 } 2608 2609 hclgevf_pci_uninit(hdev); 2610 hclgevf_cmd_uninit(hdev); 2611 } 2612 2613 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2614 { 2615 struct pci_dev *pdev = ae_dev->pdev; 2616 struct hclgevf_dev *hdev; 2617 int ret; 2618 2619 ret = hclgevf_alloc_hdev(ae_dev); 2620 if (ret) { 2621 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2622 return ret; 2623 } 2624 2625 ret = hclgevf_init_hdev(ae_dev->priv); 2626 if (ret) { 2627 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2628 return ret; 2629 } 2630 2631 hdev = ae_dev->priv; 2632 timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0); 2633 INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task); 2634 2635 return 0; 2636 } 2637 2638 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2639 { 2640 struct hclgevf_dev *hdev = ae_dev->priv; 2641 2642 hclgevf_uninit_hdev(hdev); 2643 ae_dev->priv = NULL; 2644 } 2645 2646 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2647 { 2648 struct hnae3_handle *nic = &hdev->nic; 2649 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2650 2651 return min_t(u32, hdev->rss_size_max, 2652 hdev->num_tqps / kinfo->num_tc); 2653 } 2654 2655 /** 2656 * hclgevf_get_channels - Get the current channels enabled and max supported. 2657 * @handle: hardware information for network interface 2658 * @ch: ethtool channels structure 2659 * 2660 * We don't support separate tx and rx queues as channels. The other count 2661 * represents how many queues are being used for control. max_combined counts 2662 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2663 * q_vectors since we support a lot more queue pairs than q_vectors. 2664 **/ 2665 static void hclgevf_get_channels(struct hnae3_handle *handle, 2666 struct ethtool_channels *ch) 2667 { 2668 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2669 2670 ch->max_combined = hclgevf_get_max_channels(hdev); 2671 ch->other_count = 0; 2672 ch->max_other = 0; 2673 ch->combined_count = handle->kinfo.rss_size; 2674 } 2675 2676 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2677 u16 *alloc_tqps, u16 *max_rss_size) 2678 { 2679 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2680 2681 *alloc_tqps = hdev->num_tqps; 2682 *max_rss_size = hdev->rss_size_max; 2683 } 2684 2685 static int hclgevf_get_status(struct hnae3_handle *handle) 2686 { 2687 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2688 2689 return hdev->hw.mac.link; 2690 } 2691 2692 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2693 u8 *auto_neg, u32 *speed, 2694 u8 *duplex) 2695 { 2696 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2697 2698 if (speed) 2699 *speed = hdev->hw.mac.speed; 2700 if (duplex) 2701 *duplex = hdev->hw.mac.duplex; 2702 if (auto_neg) 2703 *auto_neg = AUTONEG_DISABLE; 2704 } 2705 2706 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 2707 u8 duplex) 2708 { 2709 hdev->hw.mac.speed = speed; 2710 hdev->hw.mac.duplex = duplex; 2711 } 2712 2713 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 2714 { 2715 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2716 2717 return hclgevf_config_gro(hdev, enable); 2718 } 2719 2720 static void hclgevf_get_media_type(struct hnae3_handle *handle, 2721 u8 *media_type) 2722 { 2723 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2724 if (media_type) 2725 *media_type = hdev->hw.mac.media_type; 2726 } 2727 2728 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 2729 { 2730 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2731 2732 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2733 } 2734 2735 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 2736 { 2737 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2738 2739 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2740 } 2741 2742 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 2743 { 2744 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2745 2746 return hdev->rst_stats.hw_rst_done_cnt; 2747 } 2748 2749 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 2750 unsigned long *supported, 2751 unsigned long *advertising) 2752 { 2753 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2754 2755 *supported = hdev->hw.mac.supported; 2756 *advertising = hdev->hw.mac.advertising; 2757 } 2758 2759 #define MAX_SEPARATE_NUM 4 2760 #define SEPARATOR_VALUE 0xFFFFFFFF 2761 #define REG_NUM_PER_LINE 4 2762 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 2763 2764 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 2765 { 2766 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 2767 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2768 2769 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 2770 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 2771 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 2772 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 2773 2774 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 2775 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 2776 } 2777 2778 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 2779 void *data) 2780 { 2781 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2782 int i, j, reg_um, separator_num; 2783 u32 *reg = data; 2784 2785 *version = hdev->fw_version; 2786 2787 /* fetching per-VF registers values from VF PCIe register space */ 2788 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 2789 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2790 for (i = 0; i < reg_um; i++) 2791 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 2792 for (i = 0; i < separator_num; i++) 2793 *reg++ = SEPARATOR_VALUE; 2794 2795 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 2796 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2797 for (i = 0; i < reg_um; i++) 2798 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 2799 for (i = 0; i < separator_num; i++) 2800 *reg++ = SEPARATOR_VALUE; 2801 2802 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 2803 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2804 for (j = 0; j < hdev->num_tqps; j++) { 2805 for (i = 0; i < reg_um; i++) 2806 *reg++ = hclgevf_read_dev(&hdev->hw, 2807 ring_reg_addr_list[i] + 2808 0x200 * j); 2809 for (i = 0; i < separator_num; i++) 2810 *reg++ = SEPARATOR_VALUE; 2811 } 2812 2813 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 2814 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2815 for (j = 0; j < hdev->num_msi_used - 1; j++) { 2816 for (i = 0; i < reg_um; i++) 2817 *reg++ = hclgevf_read_dev(&hdev->hw, 2818 tqp_intr_reg_addr_list[i] + 2819 4 * j); 2820 for (i = 0; i < separator_num; i++) 2821 *reg++ = SEPARATOR_VALUE; 2822 } 2823 } 2824 2825 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 2826 u8 *port_base_vlan_info, u8 data_size) 2827 { 2828 struct hnae3_handle *nic = &hdev->nic; 2829 2830 rtnl_lock(); 2831 hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 2832 rtnl_unlock(); 2833 2834 /* send msg to PF and wait update port based vlan info */ 2835 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 2836 HCLGE_MBX_PORT_BASE_VLAN_CFG, 2837 port_base_vlan_info, data_size, 2838 false, NULL, 0); 2839 2840 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 2841 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; 2842 else 2843 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 2844 2845 rtnl_lock(); 2846 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 2847 rtnl_unlock(); 2848 } 2849 2850 static const struct hnae3_ae_ops hclgevf_ops = { 2851 .init_ae_dev = hclgevf_init_ae_dev, 2852 .uninit_ae_dev = hclgevf_uninit_ae_dev, 2853 .flr_prepare = hclgevf_flr_prepare, 2854 .flr_done = hclgevf_flr_done, 2855 .init_client_instance = hclgevf_init_client_instance, 2856 .uninit_client_instance = hclgevf_uninit_client_instance, 2857 .start = hclgevf_ae_start, 2858 .stop = hclgevf_ae_stop, 2859 .client_start = hclgevf_client_start, 2860 .client_stop = hclgevf_client_stop, 2861 .map_ring_to_vector = hclgevf_map_ring_to_vector, 2862 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2863 .get_vector = hclgevf_get_vector, 2864 .put_vector = hclgevf_put_vector, 2865 .reset_queue = hclgevf_reset_tqp, 2866 .get_mac_addr = hclgevf_get_mac_addr, 2867 .set_mac_addr = hclgevf_set_mac_addr, 2868 .add_uc_addr = hclgevf_add_uc_addr, 2869 .rm_uc_addr = hclgevf_rm_uc_addr, 2870 .add_mc_addr = hclgevf_add_mc_addr, 2871 .rm_mc_addr = hclgevf_rm_mc_addr, 2872 .get_stats = hclgevf_get_stats, 2873 .update_stats = hclgevf_update_stats, 2874 .get_strings = hclgevf_get_strings, 2875 .get_sset_count = hclgevf_get_sset_count, 2876 .get_rss_key_size = hclgevf_get_rss_key_size, 2877 .get_rss_indir_size = hclgevf_get_rss_indir_size, 2878 .get_rss = hclgevf_get_rss, 2879 .set_rss = hclgevf_set_rss, 2880 .get_rss_tuple = hclgevf_get_rss_tuple, 2881 .set_rss_tuple = hclgevf_set_rss_tuple, 2882 .get_tc_size = hclgevf_get_tc_size, 2883 .get_fw_version = hclgevf_get_fw_version, 2884 .set_vlan_filter = hclgevf_set_vlan_filter, 2885 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 2886 .reset_event = hclgevf_reset_event, 2887 .set_default_reset_request = hclgevf_set_def_reset_request, 2888 .get_channels = hclgevf_get_channels, 2889 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2890 .get_regs_len = hclgevf_get_regs_len, 2891 .get_regs = hclgevf_get_regs, 2892 .get_status = hclgevf_get_status, 2893 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2894 .get_media_type = hclgevf_get_media_type, 2895 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 2896 .ae_dev_resetting = hclgevf_ae_dev_resetting, 2897 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 2898 .set_gro_en = hclgevf_gro_en, 2899 .set_mtu = hclgevf_set_mtu, 2900 .get_global_queue_id = hclgevf_get_qid_global, 2901 .set_timer_task = hclgevf_set_timer_task, 2902 .get_link_mode = hclgevf_get_link_mode, 2903 }; 2904 2905 static struct hnae3_ae_algo ae_algovf = { 2906 .ops = &hclgevf_ops, 2907 .pdev_id_table = ae_algovf_pci_tbl, 2908 }; 2909 2910 static int hclgevf_init(void) 2911 { 2912 pr_info("%s is initializing\n", HCLGEVF_NAME); 2913 2914 hnae3_register_ae_algo(&ae_algovf); 2915 2916 return 0; 2917 } 2918 2919 static void hclgevf_exit(void) 2920 { 2921 hnae3_unregister_ae_algo(&ae_algovf); 2922 } 2923 module_init(hclgevf_init); 2924 module_exit(hclgevf_exit); 2925 2926 MODULE_LICENSE("GPL"); 2927 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2928 MODULE_DESCRIPTION("HCLGEVF Driver"); 2929 MODULE_VERSION(HCLGEVF_MOD_VERSION); 2930