1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <net/rtnetlink.h> 6 #include "hclgevf_cmd.h" 7 #include "hclgevf_main.h" 8 #include "hclge_mbx.h" 9 #include "hnae3.h" 10 11 #define HCLGEVF_NAME "hclgevf" 12 13 static int hclgevf_init_hdev(struct hclgevf_dev *hdev); 14 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 25 26 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 27 struct hnae3_handle *handle) 28 { 29 return container_of(handle, struct hclgevf_dev, nic); 30 } 31 32 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 33 { 34 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 35 struct hnae3_queue *queue; 36 struct hclgevf_desc desc; 37 struct hclgevf_tqp *tqp; 38 int status; 39 int i; 40 41 for (i = 0; i < hdev->num_tqps; i++) { 42 queue = handle->kinfo.tqp[i]; 43 tqp = container_of(queue, struct hclgevf_tqp, q); 44 hclgevf_cmd_setup_basic_desc(&desc, 45 HCLGEVF_OPC_QUERY_RX_STATUS, 46 true); 47 48 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 49 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 50 if (status) { 51 dev_err(&hdev->pdev->dev, 52 "Query tqp stat fail, status = %d,queue = %d\n", 53 status, i); 54 return status; 55 } 56 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 57 le32_to_cpu(desc.data[1]); 58 59 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 60 true); 61 62 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 63 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 64 if (status) { 65 dev_err(&hdev->pdev->dev, 66 "Query tqp stat fail, status = %d,queue = %d\n", 67 status, i); 68 return status; 69 } 70 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 71 le32_to_cpu(desc.data[1]); 72 } 73 74 return 0; 75 } 76 77 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 78 { 79 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 80 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 81 struct hclgevf_tqp *tqp; 82 u64 *buff = data; 83 int i; 84 85 for (i = 0; i < hdev->num_tqps; i++) { 86 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); 87 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 88 } 89 for (i = 0; i < kinfo->num_tqps; i++) { 90 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); 91 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 92 } 93 94 return buff; 95 } 96 97 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 98 { 99 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 100 101 return hdev->num_tqps * 2; 102 } 103 104 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 105 { 106 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 107 u8 *buff = data; 108 int i = 0; 109 110 for (i = 0; i < hdev->num_tqps; i++) { 111 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], 112 struct hclgevf_tqp, q); 113 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", 114 tqp->index); 115 buff += ETH_GSTRING_LEN; 116 } 117 118 for (i = 0; i < hdev->num_tqps; i++) { 119 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], 120 struct hclgevf_tqp, q); 121 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", 122 tqp->index); 123 buff += ETH_GSTRING_LEN; 124 } 125 126 return buff; 127 } 128 129 static void hclgevf_update_stats(struct hnae3_handle *handle, 130 struct net_device_stats *net_stats) 131 { 132 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 133 int status; 134 135 status = hclgevf_tqps_update_stats(handle); 136 if (status) 137 dev_err(&hdev->pdev->dev, 138 "VF update of TQPS stats fail, status = %d.\n", 139 status); 140 } 141 142 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 143 { 144 if (strset == ETH_SS_TEST) 145 return -EOPNOTSUPP; 146 else if (strset == ETH_SS_STATS) 147 return hclgevf_tqps_get_sset_count(handle, strset); 148 149 return 0; 150 } 151 152 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 153 u8 *data) 154 { 155 u8 *p = (char *)data; 156 157 if (strset == ETH_SS_STATS) 158 p = hclgevf_tqps_get_strings(handle, p); 159 } 160 161 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 162 { 163 hclgevf_tqps_get_stats(handle, data); 164 } 165 166 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 167 { 168 u8 resp_msg; 169 int status; 170 171 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 172 true, &resp_msg, sizeof(u8)); 173 if (status) { 174 dev_err(&hdev->pdev->dev, 175 "VF request to get TC info from PF failed %d", 176 status); 177 return status; 178 } 179 180 hdev->hw_tc_map = resp_msg; 181 182 return 0; 183 } 184 185 static int hclge_get_queue_info(struct hclgevf_dev *hdev) 186 { 187 #define HCLGEVF_TQPS_RSS_INFO_LEN 8 188 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 189 int status; 190 191 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 192 true, resp_msg, 193 HCLGEVF_TQPS_RSS_INFO_LEN); 194 if (status) { 195 dev_err(&hdev->pdev->dev, 196 "VF request to get tqp info from PF failed %d", 197 status); 198 return status; 199 } 200 201 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 202 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 203 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 204 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 205 206 return 0; 207 } 208 209 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 210 { 211 struct hclgevf_tqp *tqp; 212 int i; 213 214 /* if this is on going reset then we need to re-allocate the TPQs 215 * since we cannot assume we would get same number of TPQs back from PF 216 */ 217 if (hclgevf_dev_ongoing_reset(hdev)) 218 devm_kfree(&hdev->pdev->dev, hdev->htqp); 219 220 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 221 sizeof(struct hclgevf_tqp), GFP_KERNEL); 222 if (!hdev->htqp) 223 return -ENOMEM; 224 225 tqp = hdev->htqp; 226 227 for (i = 0; i < hdev->num_tqps; i++) { 228 tqp->dev = &hdev->pdev->dev; 229 tqp->index = i; 230 231 tqp->q.ae_algo = &ae_algovf; 232 tqp->q.buf_size = hdev->rx_buf_len; 233 tqp->q.desc_num = hdev->num_desc; 234 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 235 i * HCLGEVF_TQP_REG_SIZE; 236 237 tqp++; 238 } 239 240 return 0; 241 } 242 243 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 244 { 245 struct hnae3_handle *nic = &hdev->nic; 246 struct hnae3_knic_private_info *kinfo; 247 u16 new_tqps = hdev->num_tqps; 248 int i; 249 250 kinfo = &nic->kinfo; 251 kinfo->num_tc = 0; 252 kinfo->num_desc = hdev->num_desc; 253 kinfo->rx_buf_len = hdev->rx_buf_len; 254 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 255 if (hdev->hw_tc_map & BIT(i)) 256 kinfo->num_tc++; 257 258 kinfo->rss_size 259 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 260 new_tqps = kinfo->rss_size * kinfo->num_tc; 261 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 262 263 /* if this is on going reset then we need to re-allocate the hnae queues 264 * as well since number of TPQs from PF might have changed. 265 */ 266 if (hclgevf_dev_ongoing_reset(hdev)) 267 devm_kfree(&hdev->pdev->dev, kinfo->tqp); 268 269 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 270 sizeof(struct hnae3_queue *), GFP_KERNEL); 271 if (!kinfo->tqp) 272 return -ENOMEM; 273 274 for (i = 0; i < kinfo->num_tqps; i++) { 275 hdev->htqp[i].q.handle = &hdev->nic; 276 hdev->htqp[i].q.tqp_index = i; 277 kinfo->tqp[i] = &hdev->htqp[i].q; 278 } 279 280 return 0; 281 } 282 283 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 284 { 285 int status; 286 u8 resp_msg; 287 288 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 289 0, false, &resp_msg, sizeof(u8)); 290 if (status) 291 dev_err(&hdev->pdev->dev, 292 "VF failed to fetch link status(%d) from PF", status); 293 } 294 295 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 296 { 297 struct hnae3_handle *handle = &hdev->nic; 298 struct hnae3_client *client; 299 300 client = handle->client; 301 302 if (link_state != hdev->hw.mac.link) { 303 client->ops->link_status_change(handle, !!link_state); 304 hdev->hw.mac.link = link_state; 305 } 306 } 307 308 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 309 { 310 struct hnae3_handle *nic = &hdev->nic; 311 int ret; 312 313 nic->ae_algo = &ae_algovf; 314 nic->pdev = hdev->pdev; 315 nic->numa_node_mask = hdev->numa_node_mask; 316 nic->flags |= HNAE3_SUPPORT_VF; 317 318 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 319 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 320 hdev->ae_dev->dev_type); 321 return -EINVAL; 322 } 323 324 ret = hclgevf_knic_setup(hdev); 325 if (ret) 326 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 327 ret); 328 return ret; 329 } 330 331 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 332 { 333 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 334 hdev->num_msi_left += 1; 335 hdev->num_msi_used -= 1; 336 } 337 338 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 339 struct hnae3_vector_info *vector_info) 340 { 341 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 342 struct hnae3_vector_info *vector = vector_info; 343 int alloc = 0; 344 int i, j; 345 346 vector_num = min(hdev->num_msi_left, vector_num); 347 348 for (j = 0; j < vector_num; j++) { 349 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 350 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 351 vector->vector = pci_irq_vector(hdev->pdev, i); 352 vector->io_addr = hdev->hw.io_base + 353 HCLGEVF_VECTOR_REG_BASE + 354 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 355 hdev->vector_status[i] = 0; 356 hdev->vector_irq[i] = vector->vector; 357 358 vector++; 359 alloc++; 360 361 break; 362 } 363 } 364 } 365 hdev->num_msi_left -= alloc; 366 hdev->num_msi_used += alloc; 367 368 return alloc; 369 } 370 371 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 372 { 373 int i; 374 375 for (i = 0; i < hdev->num_msi; i++) 376 if (vector == hdev->vector_irq[i]) 377 return i; 378 379 return -EINVAL; 380 } 381 382 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 383 { 384 return HCLGEVF_RSS_KEY_SIZE; 385 } 386 387 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 388 { 389 return HCLGEVF_RSS_IND_TBL_SIZE; 390 } 391 392 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 393 { 394 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 395 struct hclgevf_rss_indirection_table_cmd *req; 396 struct hclgevf_desc desc; 397 int status; 398 int i, j; 399 400 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 401 402 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 403 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 404 false); 405 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 406 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 407 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 408 req->rss_result[j] = 409 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 410 411 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 412 if (status) { 413 dev_err(&hdev->pdev->dev, 414 "VF failed(=%d) to set RSS indirection table\n", 415 status); 416 return status; 417 } 418 } 419 420 return 0; 421 } 422 423 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 424 { 425 struct hclgevf_rss_tc_mode_cmd *req; 426 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 427 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 428 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 429 struct hclgevf_desc desc; 430 u16 roundup_size; 431 int status; 432 int i; 433 434 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 435 436 roundup_size = roundup_pow_of_two(rss_size); 437 roundup_size = ilog2(roundup_size); 438 439 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 440 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 441 tc_size[i] = roundup_size; 442 tc_offset[i] = rss_size * i; 443 } 444 445 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 446 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 447 hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 448 (tc_valid[i] & 0x1)); 449 hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 450 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 451 hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 452 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 453 } 454 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 455 if (status) 456 dev_err(&hdev->pdev->dev, 457 "VF failed(=%d) to set rss tc mode\n", status); 458 459 return status; 460 } 461 462 static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash, 463 u8 *key) 464 { 465 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 466 struct hclgevf_rss_config_cmd *req; 467 int lkup_times = key ? 3 : 1; 468 struct hclgevf_desc desc; 469 int key_offset; 470 int key_size; 471 int status; 472 473 req = (struct hclgevf_rss_config_cmd *)desc.data; 474 lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0); 475 476 for (key_offset = 0; key_offset < lkup_times; key_offset++) { 477 hclgevf_cmd_setup_basic_desc(&desc, 478 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 479 true); 480 req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET); 481 482 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 483 if (status) { 484 dev_err(&hdev->pdev->dev, 485 "failed to get hardware RSS cfg, status = %d\n", 486 status); 487 return status; 488 } 489 490 if (key_offset == 2) 491 key_size = 492 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 493 else 494 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 495 496 if (key) 497 memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, 498 req->hash_key, 499 key_size); 500 } 501 502 if (hash) { 503 if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ) 504 *hash = ETH_RSS_HASH_TOP; 505 else 506 *hash = ETH_RSS_HASH_UNKNOWN; 507 } 508 509 return 0; 510 } 511 512 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 513 u8 *hfunc) 514 { 515 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 516 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 517 int i; 518 519 if (indir) 520 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 521 indir[i] = rss_cfg->rss_indirection_tbl[i]; 522 523 return hclgevf_get_rss_hw_cfg(handle, hfunc, key); 524 } 525 526 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 527 const u8 *key, const u8 hfunc) 528 { 529 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 530 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 531 int i; 532 533 /* update the shadow RSS table with user specified qids */ 534 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 535 rss_cfg->rss_indirection_tbl[i] = indir[i]; 536 537 /* update the hardware */ 538 return hclgevf_set_rss_indir_table(hdev); 539 } 540 541 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 542 { 543 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 544 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 545 546 return rss_cfg->rss_size; 547 } 548 549 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 550 int vector, 551 struct hnae3_ring_chain_node *ring_chain) 552 { 553 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 554 struct hnae3_ring_chain_node *node; 555 struct hclge_mbx_vf_to_pf_cmd *req; 556 struct hclgevf_desc desc; 557 int i = 0, vector_id; 558 int status; 559 u8 type; 560 561 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 562 vector_id = hclgevf_get_vector_index(hdev, vector); 563 if (vector_id < 0) { 564 dev_err(&handle->pdev->dev, 565 "Get vector index fail. ret =%d\n", vector_id); 566 return vector_id; 567 } 568 569 for (node = ring_chain; node; node = node->next) { 570 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 571 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 572 573 if (i == 0) { 574 hclgevf_cmd_setup_basic_desc(&desc, 575 HCLGEVF_OPC_MBX_VF_TO_PF, 576 false); 577 type = en ? 578 HCLGE_MBX_MAP_RING_TO_VECTOR : 579 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 580 req->msg[0] = type; 581 req->msg[1] = vector_id; 582 } 583 584 req->msg[idx_offset] = 585 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B); 586 req->msg[idx_offset + 1] = node->tqp_index; 587 req->msg[idx_offset + 2] = hnae_get_field(node->int_gl_idx, 588 HNAE3_RING_GL_IDX_M, 589 HNAE3_RING_GL_IDX_S); 590 591 i++; 592 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 593 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 594 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 595 !node->next) { 596 req->msg[2] = i; 597 598 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 599 if (status) { 600 dev_err(&hdev->pdev->dev, 601 "Map TQP fail, status is %d.\n", 602 status); 603 return status; 604 } 605 i = 0; 606 hclgevf_cmd_setup_basic_desc(&desc, 607 HCLGEVF_OPC_MBX_VF_TO_PF, 608 false); 609 req->msg[0] = type; 610 req->msg[1] = vector_id; 611 } 612 } 613 614 return 0; 615 } 616 617 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 618 struct hnae3_ring_chain_node *ring_chain) 619 { 620 return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain); 621 } 622 623 static int hclgevf_unmap_ring_from_vector( 624 struct hnae3_handle *handle, 625 int vector, 626 struct hnae3_ring_chain_node *ring_chain) 627 { 628 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 629 int ret, vector_id; 630 631 vector_id = hclgevf_get_vector_index(hdev, vector); 632 if (vector_id < 0) { 633 dev_err(&handle->pdev->dev, 634 "Get vector index fail. ret =%d\n", vector_id); 635 return vector_id; 636 } 637 638 ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain); 639 if (ret) 640 dev_err(&handle->pdev->dev, 641 "Unmap ring from vector fail. vector=%d, ret =%d\n", 642 vector_id, 643 ret); 644 645 return ret; 646 } 647 648 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 649 { 650 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 651 652 hclgevf_free_vector(hdev, vector); 653 654 return 0; 655 } 656 657 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 658 bool en_uc_pmc, bool en_mc_pmc) 659 { 660 struct hclge_mbx_vf_to_pf_cmd *req; 661 struct hclgevf_desc desc; 662 int status; 663 664 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 665 666 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 667 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 668 req->msg[1] = en_uc_pmc ? 1 : 0; 669 req->msg[2] = en_mc_pmc ? 1 : 0; 670 671 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 672 if (status) 673 dev_err(&hdev->pdev->dev, 674 "Set promisc mode fail, status is %d.\n", status); 675 676 return status; 677 } 678 679 static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, 680 bool en_uc_pmc, bool en_mc_pmc) 681 { 682 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 683 684 hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); 685 } 686 687 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 688 int stream_id, bool enable) 689 { 690 struct hclgevf_cfg_com_tqp_queue_cmd *req; 691 struct hclgevf_desc desc; 692 int status; 693 694 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 695 696 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 697 false); 698 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 699 req->stream_id = cpu_to_le16(stream_id); 700 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 701 702 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 703 if (status) 704 dev_err(&hdev->pdev->dev, 705 "TQP enable fail, status =%d.\n", status); 706 707 return status; 708 } 709 710 static int hclgevf_get_queue_id(struct hnae3_queue *queue) 711 { 712 struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q); 713 714 return tqp->index; 715 } 716 717 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 718 { 719 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 720 struct hnae3_queue *queue; 721 struct hclgevf_tqp *tqp; 722 int i; 723 724 for (i = 0; i < hdev->num_tqps; i++) { 725 queue = handle->kinfo.tqp[i]; 726 tqp = container_of(queue, struct hclgevf_tqp, q); 727 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 728 } 729 } 730 731 static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en) 732 { 733 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 734 u8 msg[2] = {0}; 735 736 msg[0] = en; 737 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 738 HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, 739 msg, 1, false, NULL, 0); 740 } 741 742 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 743 { 744 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 745 746 ether_addr_copy(p, hdev->hw.mac.mac_addr); 747 } 748 749 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 750 bool is_first) 751 { 752 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 753 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 754 u8 *new_mac_addr = (u8 *)p; 755 u8 msg_data[ETH_ALEN * 2]; 756 u16 subcode; 757 int status; 758 759 ether_addr_copy(msg_data, new_mac_addr); 760 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 761 762 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 763 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 764 765 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 766 subcode, msg_data, ETH_ALEN * 2, 767 true, NULL, 0); 768 if (!status) 769 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 770 771 return status; 772 } 773 774 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 775 const unsigned char *addr) 776 { 777 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 778 779 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 780 HCLGE_MBX_MAC_VLAN_UC_ADD, 781 addr, ETH_ALEN, false, NULL, 0); 782 } 783 784 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 785 const unsigned char *addr) 786 { 787 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 788 789 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 790 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 791 addr, ETH_ALEN, false, NULL, 0); 792 } 793 794 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 795 const unsigned char *addr) 796 { 797 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 798 799 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 800 HCLGE_MBX_MAC_VLAN_MC_ADD, 801 addr, ETH_ALEN, false, NULL, 0); 802 } 803 804 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 805 const unsigned char *addr) 806 { 807 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 808 809 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 810 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 811 addr, ETH_ALEN, false, NULL, 0); 812 } 813 814 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 815 __be16 proto, u16 vlan_id, 816 bool is_kill) 817 { 818 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 819 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 820 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 821 822 if (vlan_id > 4095) 823 return -EINVAL; 824 825 if (proto != htons(ETH_P_8021Q)) 826 return -EPROTONOSUPPORT; 827 828 msg_data[0] = is_kill; 829 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 830 memcpy(&msg_data[3], &proto, sizeof(proto)); 831 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 832 HCLGE_MBX_VLAN_FILTER, msg_data, 833 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 834 } 835 836 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 837 { 838 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 839 u8 msg_data; 840 841 msg_data = enable ? 1 : 0; 842 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 843 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 844 1, false, NULL, 0); 845 } 846 847 static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 848 { 849 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 850 u8 msg_data[2]; 851 int ret; 852 853 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 854 855 /* disable vf queue before send queue reset msg to PF */ 856 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 857 if (ret) 858 return; 859 860 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 861 2, true, NULL, 0); 862 } 863 864 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 865 enum hnae3_reset_notify_type type) 866 { 867 struct hnae3_client *client = hdev->nic_client; 868 struct hnae3_handle *handle = &hdev->nic; 869 870 if (!client->ops->reset_notify) 871 return -EOPNOTSUPP; 872 873 return client->ops->reset_notify(handle, type); 874 } 875 876 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 877 { 878 #define HCLGEVF_RESET_WAIT_MS 500 879 #define HCLGEVF_RESET_WAIT_CNT 20 880 u32 val, cnt = 0; 881 882 /* wait to check the hardware reset completion status */ 883 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 884 while (hnae_get_bit(val, HCLGEVF_FUN_RST_ING_B) && 885 (cnt < HCLGEVF_RESET_WAIT_CNT)) { 886 msleep(HCLGEVF_RESET_WAIT_MS); 887 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 888 cnt++; 889 } 890 891 /* hardware completion status should be available by this time */ 892 if (cnt >= HCLGEVF_RESET_WAIT_CNT) { 893 dev_warn(&hdev->pdev->dev, 894 "could'nt get reset done status from h/w, timeout!\n"); 895 return -EBUSY; 896 } 897 898 /* we will wait a bit more to let reset of the stack to complete. This 899 * might happen in case reset assertion was made by PF. Yes, this also 900 * means we might end up waiting bit more even for VF reset. 901 */ 902 msleep(5000); 903 904 return 0; 905 } 906 907 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 908 { 909 int ret; 910 911 /* uninitialize the nic client */ 912 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 913 914 /* re-initialize the hclge device */ 915 ret = hclgevf_init_hdev(hdev); 916 if (ret) { 917 dev_err(&hdev->pdev->dev, 918 "hclge device re-init failed, VF is disabled!\n"); 919 return ret; 920 } 921 922 /* bring up the nic client again */ 923 hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 924 925 return 0; 926 } 927 928 static int hclgevf_reset(struct hclgevf_dev *hdev) 929 { 930 int ret; 931 932 rtnl_lock(); 933 934 /* bring down the nic to stop any ongoing TX/RX */ 935 hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 936 937 /* check if VF could successfully fetch the hardware reset completion 938 * status from the hardware 939 */ 940 ret = hclgevf_reset_wait(hdev); 941 if (ret) { 942 /* can't do much in this situation, will disable VF */ 943 dev_err(&hdev->pdev->dev, 944 "VF failed(=%d) to fetch H/W reset completion status\n", 945 ret); 946 947 dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); 948 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 949 950 rtnl_unlock(); 951 return ret; 952 } 953 954 /* now, re-initialize the nic client and ae device*/ 955 ret = hclgevf_reset_stack(hdev); 956 if (ret) 957 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 958 959 /* bring up the nic to enable TX/RX again */ 960 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 961 962 rtnl_unlock(); 963 964 return ret; 965 } 966 967 static int hclgevf_do_reset(struct hclgevf_dev *hdev) 968 { 969 int status; 970 u8 respmsg; 971 972 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 973 0, false, &respmsg, sizeof(u8)); 974 if (status) 975 dev_err(&hdev->pdev->dev, 976 "VF reset request to PF failed(=%d)\n", status); 977 978 return status; 979 } 980 981 static void hclgevf_reset_event(struct hnae3_handle *handle) 982 { 983 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 984 985 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 986 987 handle->reset_level = HNAE3_VF_RESET; 988 989 /* reset of this VF requested */ 990 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 991 hclgevf_reset_task_schedule(hdev); 992 993 handle->last_reset_time = jiffies; 994 } 995 996 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 997 { 998 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 999 1000 return hdev->fw_version; 1001 } 1002 1003 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1004 { 1005 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1006 1007 vector->vector_irq = pci_irq_vector(hdev->pdev, 1008 HCLGEVF_MISC_VECTOR_NUM); 1009 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1010 /* vector status always valid for Vector 0 */ 1011 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1012 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1013 1014 hdev->num_msi_left -= 1; 1015 hdev->num_msi_used += 1; 1016 } 1017 1018 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1019 { 1020 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1021 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1022 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1023 schedule_work(&hdev->rst_service_task); 1024 } 1025 } 1026 1027 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1028 { 1029 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1030 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1031 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1032 schedule_work(&hdev->mbx_service_task); 1033 } 1034 } 1035 1036 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1037 { 1038 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1039 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1040 schedule_work(&hdev->service_task); 1041 } 1042 1043 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1044 { 1045 /* if we have any pending mailbox event then schedule the mbx task */ 1046 if (hdev->mbx_event_pending) 1047 hclgevf_mbx_task_schedule(hdev); 1048 1049 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1050 hclgevf_reset_task_schedule(hdev); 1051 } 1052 1053 static void hclgevf_service_timer(struct timer_list *t) 1054 { 1055 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1056 1057 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1058 1059 hclgevf_task_schedule(hdev); 1060 } 1061 1062 static void hclgevf_reset_service_task(struct work_struct *work) 1063 { 1064 struct hclgevf_dev *hdev = 1065 container_of(work, struct hclgevf_dev, rst_service_task); 1066 int ret; 1067 1068 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1069 return; 1070 1071 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1072 1073 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1074 &hdev->reset_state)) { 1075 /* PF has initmated that it is about to reset the hardware. 1076 * We now have to poll & check if harware has actually completed 1077 * the reset sequence. On hardware reset completion, VF needs to 1078 * reset the client and ae device. 1079 */ 1080 hdev->reset_attempts = 0; 1081 1082 ret = hclgevf_reset(hdev); 1083 if (ret) 1084 dev_err(&hdev->pdev->dev, "VF stack reset failed.\n"); 1085 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1086 &hdev->reset_state)) { 1087 /* we could be here when either of below happens: 1088 * 1. reset was initiated due to watchdog timeout due to 1089 * a. IMP was earlier reset and our TX got choked down and 1090 * which resulted in watchdog reacting and inducing VF 1091 * reset. This also means our cmdq would be unreliable. 1092 * b. problem in TX due to other lower layer(example link 1093 * layer not functioning properly etc.) 1094 * 2. VF reset might have been initiated due to some config 1095 * change. 1096 * 1097 * NOTE: Theres no clear way to detect above cases than to react 1098 * to the response of PF for this reset request. PF will ack the 1099 * 1b and 2. cases but we will not get any intimation about 1a 1100 * from PF as cmdq would be in unreliable state i.e. mailbox 1101 * communication between PF and VF would be broken. 1102 */ 1103 1104 /* if we are never geting into pending state it means either: 1105 * 1. PF is not receiving our request which could be due to IMP 1106 * reset 1107 * 2. PF is screwed 1108 * We cannot do much for 2. but to check first we can try reset 1109 * our PCIe + stack and see if it alleviates the problem. 1110 */ 1111 if (hdev->reset_attempts > 3) { 1112 /* prepare for full reset of stack + pcie interface */ 1113 hdev->nic.reset_level = HNAE3_VF_FULL_RESET; 1114 1115 /* "defer" schedule the reset task again */ 1116 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1117 } else { 1118 hdev->reset_attempts++; 1119 1120 /* request PF for resetting this VF via mailbox */ 1121 ret = hclgevf_do_reset(hdev); 1122 if (ret) 1123 dev_warn(&hdev->pdev->dev, 1124 "VF rst fail, stack will call\n"); 1125 } 1126 } 1127 1128 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1129 } 1130 1131 static void hclgevf_mailbox_service_task(struct work_struct *work) 1132 { 1133 struct hclgevf_dev *hdev; 1134 1135 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1136 1137 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1138 return; 1139 1140 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1141 1142 hclgevf_mbx_async_handler(hdev); 1143 1144 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1145 } 1146 1147 static void hclgevf_service_task(struct work_struct *work) 1148 { 1149 struct hclgevf_dev *hdev; 1150 1151 hdev = container_of(work, struct hclgevf_dev, service_task); 1152 1153 /* request the link status from the PF. PF would be able to tell VF 1154 * about such updates in future so we might remove this later 1155 */ 1156 hclgevf_request_link_info(hdev); 1157 1158 hclgevf_deferred_task_schedule(hdev); 1159 1160 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1161 } 1162 1163 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1164 { 1165 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1166 } 1167 1168 static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) 1169 { 1170 u32 cmdq_src_reg; 1171 1172 /* fetch the events from their corresponding regs */ 1173 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1174 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1175 1176 /* check for vector0 mailbox(=CMDQ RX) event source */ 1177 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1178 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1179 *clearval = cmdq_src_reg; 1180 return true; 1181 } 1182 1183 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1184 1185 return false; 1186 } 1187 1188 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1189 { 1190 writel(en ? 1 : 0, vector->addr); 1191 } 1192 1193 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1194 { 1195 struct hclgevf_dev *hdev = data; 1196 u32 clearval; 1197 1198 hclgevf_enable_vector(&hdev->misc_vector, false); 1199 if (!hclgevf_check_event_cause(hdev, &clearval)) 1200 goto skip_sched; 1201 1202 hclgevf_mbx_handler(hdev); 1203 1204 hclgevf_clear_event_cause(hdev, clearval); 1205 1206 skip_sched: 1207 hclgevf_enable_vector(&hdev->misc_vector, true); 1208 1209 return IRQ_HANDLED; 1210 } 1211 1212 static int hclgevf_configure(struct hclgevf_dev *hdev) 1213 { 1214 int ret; 1215 1216 /* get queue configuration from PF */ 1217 ret = hclge_get_queue_info(hdev); 1218 if (ret) 1219 return ret; 1220 /* get tc configuration from PF */ 1221 return hclgevf_get_tc_info(hdev); 1222 } 1223 1224 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1225 { 1226 struct pci_dev *pdev = ae_dev->pdev; 1227 struct hclgevf_dev *hdev = ae_dev->priv; 1228 1229 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1230 if (!hdev) 1231 return -ENOMEM; 1232 1233 hdev->pdev = pdev; 1234 hdev->ae_dev = ae_dev; 1235 ae_dev->priv = hdev; 1236 1237 return 0; 1238 } 1239 1240 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1241 { 1242 struct hnae3_handle *roce = &hdev->roce; 1243 struct hnae3_handle *nic = &hdev->nic; 1244 1245 roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM; 1246 1247 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1248 hdev->num_msi_left == 0) 1249 return -EINVAL; 1250 1251 roce->rinfo.base_vector = 1252 hdev->vector_status[hdev->num_msi_used]; 1253 1254 roce->rinfo.netdev = nic->kinfo.netdev; 1255 roce->rinfo.roce_io_base = hdev->hw.io_base; 1256 1257 roce->pdev = nic->pdev; 1258 roce->ae_algo = nic->ae_algo; 1259 roce->numa_node_mask = nic->numa_node_mask; 1260 1261 return 0; 1262 } 1263 1264 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1265 { 1266 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1267 int i, ret; 1268 1269 rss_cfg->rss_size = hdev->rss_size_max; 1270 1271 /* Initialize RSS indirect table for each vport */ 1272 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1273 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1274 1275 ret = hclgevf_set_rss_indir_table(hdev); 1276 if (ret) 1277 return ret; 1278 1279 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1280 } 1281 1282 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1283 { 1284 /* other vlan config(like, VLAN TX/RX offload) would also be added 1285 * here later 1286 */ 1287 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1288 false); 1289 } 1290 1291 static int hclgevf_ae_start(struct hnae3_handle *handle) 1292 { 1293 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1294 int i, queue_id; 1295 1296 for (i = 0; i < handle->kinfo.num_tqps; i++) { 1297 /* ring enable */ 1298 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); 1299 if (queue_id < 0) { 1300 dev_warn(&hdev->pdev->dev, 1301 "Get invalid queue id, ignore it\n"); 1302 continue; 1303 } 1304 1305 hclgevf_tqp_enable(hdev, queue_id, 0, true); 1306 } 1307 1308 /* reset tqp stats */ 1309 hclgevf_reset_tqp_stats(handle); 1310 1311 hclgevf_request_link_info(hdev); 1312 1313 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1314 mod_timer(&hdev->service_timer, jiffies + HZ); 1315 1316 return 0; 1317 } 1318 1319 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1320 { 1321 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1322 int i, queue_id; 1323 1324 for (i = 0; i < hdev->num_tqps; i++) { 1325 /* Ring disable */ 1326 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); 1327 if (queue_id < 0) { 1328 dev_warn(&hdev->pdev->dev, 1329 "Get invalid queue id, ignore it\n"); 1330 continue; 1331 } 1332 1333 hclgevf_tqp_enable(hdev, queue_id, 0, false); 1334 } 1335 1336 /* reset tqp stats */ 1337 hclgevf_reset_tqp_stats(handle); 1338 del_timer_sync(&hdev->service_timer); 1339 cancel_work_sync(&hdev->service_task); 1340 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1341 hclgevf_update_link_status(hdev, 0); 1342 } 1343 1344 static void hclgevf_state_init(struct hclgevf_dev *hdev) 1345 { 1346 /* if this is on going reset then skip this initialization */ 1347 if (hclgevf_dev_ongoing_reset(hdev)) 1348 return; 1349 1350 /* setup tasks for the MBX */ 1351 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1352 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1353 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1354 1355 /* setup tasks for service timer */ 1356 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1357 1358 INIT_WORK(&hdev->service_task, hclgevf_service_task); 1359 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1360 1361 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 1362 1363 mutex_init(&hdev->mbx_resp.mbx_mutex); 1364 1365 /* bring the device down */ 1366 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1367 } 1368 1369 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1370 { 1371 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1372 1373 if (hdev->service_timer.function) 1374 del_timer_sync(&hdev->service_timer); 1375 if (hdev->service_task.func) 1376 cancel_work_sync(&hdev->service_task); 1377 if (hdev->mbx_service_task.func) 1378 cancel_work_sync(&hdev->mbx_service_task); 1379 if (hdev->rst_service_task.func) 1380 cancel_work_sync(&hdev->rst_service_task); 1381 1382 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1383 } 1384 1385 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1386 { 1387 struct pci_dev *pdev = hdev->pdev; 1388 int vectors; 1389 int i; 1390 1391 /* if this is on going reset then skip this initialization */ 1392 if (hclgevf_dev_ongoing_reset(hdev)) 1393 return 0; 1394 1395 hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM; 1396 1397 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1398 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1399 if (vectors < 0) { 1400 dev_err(&pdev->dev, 1401 "failed(%d) to allocate MSI/MSI-X vectors\n", 1402 vectors); 1403 return vectors; 1404 } 1405 if (vectors < hdev->num_msi) 1406 dev_warn(&hdev->pdev->dev, 1407 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1408 hdev->num_msi, vectors); 1409 1410 hdev->num_msi = vectors; 1411 hdev->num_msi_left = vectors; 1412 hdev->base_msi_vector = pdev->irq; 1413 1414 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1415 sizeof(u16), GFP_KERNEL); 1416 if (!hdev->vector_status) { 1417 pci_free_irq_vectors(pdev); 1418 return -ENOMEM; 1419 } 1420 1421 for (i = 0; i < hdev->num_msi; i++) 1422 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 1423 1424 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 1425 sizeof(int), GFP_KERNEL); 1426 if (!hdev->vector_irq) { 1427 pci_free_irq_vectors(pdev); 1428 return -ENOMEM; 1429 } 1430 1431 return 0; 1432 } 1433 1434 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 1435 { 1436 struct pci_dev *pdev = hdev->pdev; 1437 1438 pci_free_irq_vectors(pdev); 1439 } 1440 1441 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 1442 { 1443 int ret = 0; 1444 1445 /* if this is on going reset then skip this initialization */ 1446 if (hclgevf_dev_ongoing_reset(hdev)) 1447 return 0; 1448 1449 hclgevf_get_misc_vector(hdev); 1450 1451 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 1452 0, "hclgevf_cmd", hdev); 1453 if (ret) { 1454 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 1455 hdev->misc_vector.vector_irq); 1456 return ret; 1457 } 1458 1459 /* enable misc. vector(vector 0) */ 1460 hclgevf_enable_vector(&hdev->misc_vector, true); 1461 1462 return ret; 1463 } 1464 1465 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 1466 { 1467 /* disable misc vector(vector 0) */ 1468 hclgevf_enable_vector(&hdev->misc_vector, false); 1469 free_irq(hdev->misc_vector.vector_irq, hdev); 1470 hclgevf_free_vector(hdev, 0); 1471 } 1472 1473 static int hclgevf_init_instance(struct hclgevf_dev *hdev, 1474 struct hnae3_client *client) 1475 { 1476 int ret; 1477 1478 switch (client->type) { 1479 case HNAE3_CLIENT_KNIC: 1480 hdev->nic_client = client; 1481 hdev->nic.client = client; 1482 1483 ret = client->ops->init_instance(&hdev->nic); 1484 if (ret) 1485 return ret; 1486 1487 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 1488 struct hnae3_client *rc = hdev->roce_client; 1489 1490 ret = hclgevf_init_roce_base_info(hdev); 1491 if (ret) 1492 return ret; 1493 ret = rc->ops->init_instance(&hdev->roce); 1494 if (ret) 1495 return ret; 1496 } 1497 break; 1498 case HNAE3_CLIENT_UNIC: 1499 hdev->nic_client = client; 1500 hdev->nic.client = client; 1501 1502 ret = client->ops->init_instance(&hdev->nic); 1503 if (ret) 1504 return ret; 1505 break; 1506 case HNAE3_CLIENT_ROCE: 1507 if (hnae3_dev_roce_supported(hdev)) { 1508 hdev->roce_client = client; 1509 hdev->roce.client = client; 1510 } 1511 1512 if (hdev->roce_client && hdev->nic_client) { 1513 ret = hclgevf_init_roce_base_info(hdev); 1514 if (ret) 1515 return ret; 1516 1517 ret = client->ops->init_instance(&hdev->roce); 1518 if (ret) 1519 return ret; 1520 } 1521 } 1522 1523 return 0; 1524 } 1525 1526 static void hclgevf_uninit_instance(struct hclgevf_dev *hdev, 1527 struct hnae3_client *client) 1528 { 1529 /* un-init roce, if it exists */ 1530 if (hdev->roce_client) 1531 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 1532 1533 /* un-init nic/unic, if this was not called by roce client */ 1534 if ((client->ops->uninit_instance) && 1535 (client->type != HNAE3_CLIENT_ROCE)) 1536 client->ops->uninit_instance(&hdev->nic, 0); 1537 } 1538 1539 static int hclgevf_register_client(struct hnae3_client *client, 1540 struct hnae3_ae_dev *ae_dev) 1541 { 1542 struct hclgevf_dev *hdev = ae_dev->priv; 1543 1544 return hclgevf_init_instance(hdev, client); 1545 } 1546 1547 static void hclgevf_unregister_client(struct hnae3_client *client, 1548 struct hnae3_ae_dev *ae_dev) 1549 { 1550 struct hclgevf_dev *hdev = ae_dev->priv; 1551 1552 hclgevf_uninit_instance(hdev, client); 1553 } 1554 1555 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 1556 { 1557 struct pci_dev *pdev = hdev->pdev; 1558 struct hclgevf_hw *hw; 1559 int ret; 1560 1561 /* check if we need to skip initialization of pci. This will happen if 1562 * device is undergoing VF reset. Otherwise, we would need to 1563 * re-initialize pci interface again i.e. when device is not going 1564 * through *any* reset or actually undergoing full reset. 1565 */ 1566 if (hclgevf_dev_ongoing_reset(hdev)) 1567 return 0; 1568 1569 ret = pci_enable_device(pdev); 1570 if (ret) { 1571 dev_err(&pdev->dev, "failed to enable PCI device\n"); 1572 return ret; 1573 } 1574 1575 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1576 if (ret) { 1577 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 1578 goto err_disable_device; 1579 } 1580 1581 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 1582 if (ret) { 1583 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 1584 goto err_disable_device; 1585 } 1586 1587 pci_set_master(pdev); 1588 hw = &hdev->hw; 1589 hw->hdev = hdev; 1590 hw->io_base = pci_iomap(pdev, 2, 0); 1591 if (!hw->io_base) { 1592 dev_err(&pdev->dev, "can't map configuration register space\n"); 1593 ret = -ENOMEM; 1594 goto err_clr_master; 1595 } 1596 1597 return 0; 1598 1599 err_clr_master: 1600 pci_clear_master(pdev); 1601 pci_release_regions(pdev); 1602 err_disable_device: 1603 pci_disable_device(pdev); 1604 1605 return ret; 1606 } 1607 1608 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 1609 { 1610 struct pci_dev *pdev = hdev->pdev; 1611 1612 pci_iounmap(pdev, hdev->hw.io_base); 1613 pci_clear_master(pdev); 1614 pci_release_regions(pdev); 1615 pci_disable_device(pdev); 1616 } 1617 1618 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 1619 { 1620 struct pci_dev *pdev = hdev->pdev; 1621 int ret; 1622 1623 /* check if device is on-going full reset(i.e. pcie as well) */ 1624 if (hclgevf_dev_ongoing_full_reset(hdev)) { 1625 dev_warn(&pdev->dev, "device is going full reset\n"); 1626 hclgevf_uninit_hdev(hdev); 1627 } 1628 1629 ret = hclgevf_pci_init(hdev); 1630 if (ret) { 1631 dev_err(&pdev->dev, "PCI initialization failed\n"); 1632 return ret; 1633 } 1634 1635 ret = hclgevf_init_msi(hdev); 1636 if (ret) { 1637 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 1638 goto err_irq_init; 1639 } 1640 1641 hclgevf_state_init(hdev); 1642 1643 ret = hclgevf_cmd_init(hdev); 1644 if (ret) 1645 goto err_cmd_init; 1646 1647 ret = hclgevf_misc_irq_init(hdev); 1648 if (ret) { 1649 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 1650 ret); 1651 goto err_misc_irq_init; 1652 } 1653 1654 ret = hclgevf_configure(hdev); 1655 if (ret) { 1656 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 1657 goto err_config; 1658 } 1659 1660 ret = hclgevf_alloc_tqps(hdev); 1661 if (ret) { 1662 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 1663 goto err_config; 1664 } 1665 1666 ret = hclgevf_set_handle_info(hdev); 1667 if (ret) { 1668 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 1669 goto err_config; 1670 } 1671 1672 /* Initialize VF's MTA */ 1673 hdev->accept_mta_mc = true; 1674 ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc); 1675 if (ret) { 1676 dev_err(&hdev->pdev->dev, 1677 "failed(%d) to set mta filter mode\n", ret); 1678 goto err_config; 1679 } 1680 1681 /* Initialize RSS for this VF */ 1682 ret = hclgevf_rss_init_hw(hdev); 1683 if (ret) { 1684 dev_err(&hdev->pdev->dev, 1685 "failed(%d) to initialize RSS\n", ret); 1686 goto err_config; 1687 } 1688 1689 ret = hclgevf_init_vlan_config(hdev); 1690 if (ret) { 1691 dev_err(&hdev->pdev->dev, 1692 "failed(%d) to initialize VLAN config\n", ret); 1693 goto err_config; 1694 } 1695 1696 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 1697 1698 return 0; 1699 1700 err_config: 1701 hclgevf_misc_irq_uninit(hdev); 1702 err_misc_irq_init: 1703 hclgevf_cmd_uninit(hdev); 1704 err_cmd_init: 1705 hclgevf_state_uninit(hdev); 1706 hclgevf_uninit_msi(hdev); 1707 err_irq_init: 1708 hclgevf_pci_uninit(hdev); 1709 return ret; 1710 } 1711 1712 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 1713 { 1714 hclgevf_state_uninit(hdev); 1715 hclgevf_misc_irq_uninit(hdev); 1716 hclgevf_cmd_uninit(hdev); 1717 hclgevf_uninit_msi(hdev); 1718 hclgevf_pci_uninit(hdev); 1719 } 1720 1721 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 1722 { 1723 struct pci_dev *pdev = ae_dev->pdev; 1724 int ret; 1725 1726 ret = hclgevf_alloc_hdev(ae_dev); 1727 if (ret) { 1728 dev_err(&pdev->dev, "hclge device allocation failed\n"); 1729 return ret; 1730 } 1731 1732 ret = hclgevf_init_hdev(ae_dev->priv); 1733 if (ret) 1734 dev_err(&pdev->dev, "hclge device initialization failed\n"); 1735 1736 return ret; 1737 } 1738 1739 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 1740 { 1741 struct hclgevf_dev *hdev = ae_dev->priv; 1742 1743 hclgevf_uninit_hdev(hdev); 1744 ae_dev->priv = NULL; 1745 } 1746 1747 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 1748 { 1749 struct hnae3_handle *nic = &hdev->nic; 1750 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1751 1752 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 1753 } 1754 1755 /** 1756 * hclgevf_get_channels - Get the current channels enabled and max supported. 1757 * @handle: hardware information for network interface 1758 * @ch: ethtool channels structure 1759 * 1760 * We don't support separate tx and rx queues as channels. The other count 1761 * represents how many queues are being used for control. max_combined counts 1762 * how many queue pairs we can support. They may not be mapped 1 to 1 with 1763 * q_vectors since we support a lot more queue pairs than q_vectors. 1764 **/ 1765 static void hclgevf_get_channels(struct hnae3_handle *handle, 1766 struct ethtool_channels *ch) 1767 { 1768 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1769 1770 ch->max_combined = hclgevf_get_max_channels(hdev); 1771 ch->other_count = 0; 1772 ch->max_other = 0; 1773 ch->combined_count = hdev->num_tqps; 1774 } 1775 1776 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 1777 u16 *free_tqps, u16 *max_rss_size) 1778 { 1779 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1780 1781 *free_tqps = 0; 1782 *max_rss_size = hdev->rss_size_max; 1783 } 1784 1785 static int hclgevf_get_status(struct hnae3_handle *handle) 1786 { 1787 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1788 1789 return hdev->hw.mac.link; 1790 } 1791 1792 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 1793 u8 *auto_neg, u32 *speed, 1794 u8 *duplex) 1795 { 1796 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1797 1798 if (speed) 1799 *speed = hdev->hw.mac.speed; 1800 if (duplex) 1801 *duplex = hdev->hw.mac.duplex; 1802 if (auto_neg) 1803 *auto_neg = AUTONEG_DISABLE; 1804 } 1805 1806 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 1807 u8 duplex) 1808 { 1809 hdev->hw.mac.speed = speed; 1810 hdev->hw.mac.duplex = duplex; 1811 } 1812 1813 static const struct hnae3_ae_ops hclgevf_ops = { 1814 .init_ae_dev = hclgevf_init_ae_dev, 1815 .uninit_ae_dev = hclgevf_uninit_ae_dev, 1816 .init_client_instance = hclgevf_register_client, 1817 .uninit_client_instance = hclgevf_unregister_client, 1818 .start = hclgevf_ae_start, 1819 .stop = hclgevf_ae_stop, 1820 .map_ring_to_vector = hclgevf_map_ring_to_vector, 1821 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 1822 .get_vector = hclgevf_get_vector, 1823 .put_vector = hclgevf_put_vector, 1824 .reset_queue = hclgevf_reset_tqp, 1825 .set_promisc_mode = hclgevf_set_promisc_mode, 1826 .get_mac_addr = hclgevf_get_mac_addr, 1827 .set_mac_addr = hclgevf_set_mac_addr, 1828 .add_uc_addr = hclgevf_add_uc_addr, 1829 .rm_uc_addr = hclgevf_rm_uc_addr, 1830 .add_mc_addr = hclgevf_add_mc_addr, 1831 .rm_mc_addr = hclgevf_rm_mc_addr, 1832 .get_stats = hclgevf_get_stats, 1833 .update_stats = hclgevf_update_stats, 1834 .get_strings = hclgevf_get_strings, 1835 .get_sset_count = hclgevf_get_sset_count, 1836 .get_rss_key_size = hclgevf_get_rss_key_size, 1837 .get_rss_indir_size = hclgevf_get_rss_indir_size, 1838 .get_rss = hclgevf_get_rss, 1839 .set_rss = hclgevf_set_rss, 1840 .get_tc_size = hclgevf_get_tc_size, 1841 .get_fw_version = hclgevf_get_fw_version, 1842 .set_vlan_filter = hclgevf_set_vlan_filter, 1843 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 1844 .reset_event = hclgevf_reset_event, 1845 .get_channels = hclgevf_get_channels, 1846 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 1847 .get_status = hclgevf_get_status, 1848 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 1849 }; 1850 1851 static struct hnae3_ae_algo ae_algovf = { 1852 .ops = &hclgevf_ops, 1853 .name = HCLGEVF_NAME, 1854 .pdev_id_table = ae_algovf_pci_tbl, 1855 }; 1856 1857 static int hclgevf_init(void) 1858 { 1859 pr_info("%s is initializing\n", HCLGEVF_NAME); 1860 1861 hnae3_register_ae_algo(&ae_algovf); 1862 1863 return 0; 1864 } 1865 1866 static void hclgevf_exit(void) 1867 { 1868 hnae3_unregister_ae_algo(&ae_algovf); 1869 } 1870 module_init(hclgevf_init); 1871 module_exit(hclgevf_exit); 1872 1873 MODULE_LICENSE("GPL"); 1874 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 1875 MODULE_DESCRIPTION("HCLGEVF Driver"); 1876 MODULE_VERSION(HCLGEVF_MOD_VERSION); 1877