1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <net/rtnetlink.h> 6 #include "hclgevf_cmd.h" 7 #include "hclgevf_main.h" 8 #include "hclge_mbx.h" 9 #include "hnae3.h" 10 11 #define HCLGEVF_NAME "hclgevf" 12 13 static int hclgevf_init_hdev(struct hclgevf_dev *hdev); 14 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 25 26 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 27 struct hnae3_handle *handle) 28 { 29 return container_of(handle, struct hclgevf_dev, nic); 30 } 31 32 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 33 { 34 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 35 struct hnae3_queue *queue; 36 struct hclgevf_desc desc; 37 struct hclgevf_tqp *tqp; 38 int status; 39 int i; 40 41 for (i = 0; i < hdev->num_tqps; i++) { 42 queue = handle->kinfo.tqp[i]; 43 tqp = container_of(queue, struct hclgevf_tqp, q); 44 hclgevf_cmd_setup_basic_desc(&desc, 45 HCLGEVF_OPC_QUERY_RX_STATUS, 46 true); 47 48 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 49 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 50 if (status) { 51 dev_err(&hdev->pdev->dev, 52 "Query tqp stat fail, status = %d,queue = %d\n", 53 status, i); 54 return status; 55 } 56 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 57 le32_to_cpu(desc.data[1]); 58 59 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 60 true); 61 62 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 63 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 64 if (status) { 65 dev_err(&hdev->pdev->dev, 66 "Query tqp stat fail, status = %d,queue = %d\n", 67 status, i); 68 return status; 69 } 70 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 71 le32_to_cpu(desc.data[1]); 72 } 73 74 return 0; 75 } 76 77 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 78 { 79 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 80 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 81 struct hclgevf_tqp *tqp; 82 u64 *buff = data; 83 int i; 84 85 for (i = 0; i < hdev->num_tqps; i++) { 86 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); 87 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 88 } 89 for (i = 0; i < kinfo->num_tqps; i++) { 90 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); 91 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 92 } 93 94 return buff; 95 } 96 97 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 98 { 99 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 100 101 return hdev->num_tqps * 2; 102 } 103 104 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 105 { 106 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 107 u8 *buff = data; 108 int i = 0; 109 110 for (i = 0; i < hdev->num_tqps; i++) { 111 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], 112 struct hclgevf_tqp, q); 113 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", 114 tqp->index); 115 buff += ETH_GSTRING_LEN; 116 } 117 118 for (i = 0; i < hdev->num_tqps; i++) { 119 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], 120 struct hclgevf_tqp, q); 121 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", 122 tqp->index); 123 buff += ETH_GSTRING_LEN; 124 } 125 126 return buff; 127 } 128 129 static void hclgevf_update_stats(struct hnae3_handle *handle, 130 struct net_device_stats *net_stats) 131 { 132 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 133 int status; 134 135 status = hclgevf_tqps_update_stats(handle); 136 if (status) 137 dev_err(&hdev->pdev->dev, 138 "VF update of TQPS stats fail, status = %d.\n", 139 status); 140 } 141 142 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 143 { 144 if (strset == ETH_SS_TEST) 145 return -EOPNOTSUPP; 146 else if (strset == ETH_SS_STATS) 147 return hclgevf_tqps_get_sset_count(handle, strset); 148 149 return 0; 150 } 151 152 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 153 u8 *data) 154 { 155 u8 *p = (char *)data; 156 157 if (strset == ETH_SS_STATS) 158 p = hclgevf_tqps_get_strings(handle, p); 159 } 160 161 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 162 { 163 hclgevf_tqps_get_stats(handle, data); 164 } 165 166 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 167 { 168 u8 resp_msg; 169 int status; 170 171 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 172 true, &resp_msg, sizeof(u8)); 173 if (status) { 174 dev_err(&hdev->pdev->dev, 175 "VF request to get TC info from PF failed %d", 176 status); 177 return status; 178 } 179 180 hdev->hw_tc_map = resp_msg; 181 182 return 0; 183 } 184 185 static int hclge_get_queue_info(struct hclgevf_dev *hdev) 186 { 187 #define HCLGEVF_TQPS_RSS_INFO_LEN 8 188 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 189 int status; 190 191 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 192 true, resp_msg, 193 HCLGEVF_TQPS_RSS_INFO_LEN); 194 if (status) { 195 dev_err(&hdev->pdev->dev, 196 "VF request to get tqp info from PF failed %d", 197 status); 198 return status; 199 } 200 201 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 202 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 203 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 204 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 205 206 return 0; 207 } 208 209 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 210 { 211 struct hclgevf_tqp *tqp; 212 int i; 213 214 /* if this is on going reset then we need to re-allocate the TPQs 215 * since we cannot assume we would get same number of TPQs back from PF 216 */ 217 if (hclgevf_dev_ongoing_reset(hdev)) 218 devm_kfree(&hdev->pdev->dev, hdev->htqp); 219 220 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 221 sizeof(struct hclgevf_tqp), GFP_KERNEL); 222 if (!hdev->htqp) 223 return -ENOMEM; 224 225 tqp = hdev->htqp; 226 227 for (i = 0; i < hdev->num_tqps; i++) { 228 tqp->dev = &hdev->pdev->dev; 229 tqp->index = i; 230 231 tqp->q.ae_algo = &ae_algovf; 232 tqp->q.buf_size = hdev->rx_buf_len; 233 tqp->q.desc_num = hdev->num_desc; 234 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 235 i * HCLGEVF_TQP_REG_SIZE; 236 237 tqp++; 238 } 239 240 return 0; 241 } 242 243 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 244 { 245 struct hnae3_handle *nic = &hdev->nic; 246 struct hnae3_knic_private_info *kinfo; 247 u16 new_tqps = hdev->num_tqps; 248 int i; 249 250 kinfo = &nic->kinfo; 251 kinfo->num_tc = 0; 252 kinfo->num_desc = hdev->num_desc; 253 kinfo->rx_buf_len = hdev->rx_buf_len; 254 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 255 if (hdev->hw_tc_map & BIT(i)) 256 kinfo->num_tc++; 257 258 kinfo->rss_size 259 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 260 new_tqps = kinfo->rss_size * kinfo->num_tc; 261 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 262 263 /* if this is on going reset then we need to re-allocate the hnae queues 264 * as well since number of TPQs from PF might have changed. 265 */ 266 if (hclgevf_dev_ongoing_reset(hdev)) 267 devm_kfree(&hdev->pdev->dev, kinfo->tqp); 268 269 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 270 sizeof(struct hnae3_queue *), GFP_KERNEL); 271 if (!kinfo->tqp) 272 return -ENOMEM; 273 274 for (i = 0; i < kinfo->num_tqps; i++) { 275 hdev->htqp[i].q.handle = &hdev->nic; 276 hdev->htqp[i].q.tqp_index = i; 277 kinfo->tqp[i] = &hdev->htqp[i].q; 278 } 279 280 return 0; 281 } 282 283 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 284 { 285 int status; 286 u8 resp_msg; 287 288 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 289 0, false, &resp_msg, sizeof(u8)); 290 if (status) 291 dev_err(&hdev->pdev->dev, 292 "VF failed to fetch link status(%d) from PF", status); 293 } 294 295 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 296 { 297 struct hnae3_handle *handle = &hdev->nic; 298 struct hnae3_client *client; 299 300 client = handle->client; 301 302 if (link_state != hdev->hw.mac.link) { 303 client->ops->link_status_change(handle, !!link_state); 304 hdev->hw.mac.link = link_state; 305 } 306 } 307 308 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 309 { 310 struct hnae3_handle *nic = &hdev->nic; 311 int ret; 312 313 nic->ae_algo = &ae_algovf; 314 nic->pdev = hdev->pdev; 315 nic->numa_node_mask = hdev->numa_node_mask; 316 nic->flags |= HNAE3_SUPPORT_VF; 317 318 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 319 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 320 hdev->ae_dev->dev_type); 321 return -EINVAL; 322 } 323 324 ret = hclgevf_knic_setup(hdev); 325 if (ret) 326 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 327 ret); 328 return ret; 329 } 330 331 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 332 { 333 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 334 hdev->num_msi_left += 1; 335 hdev->num_msi_used -= 1; 336 } 337 338 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 339 struct hnae3_vector_info *vector_info) 340 { 341 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 342 struct hnae3_vector_info *vector = vector_info; 343 int alloc = 0; 344 int i, j; 345 346 vector_num = min(hdev->num_msi_left, vector_num); 347 348 for (j = 0; j < vector_num; j++) { 349 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 350 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 351 vector->vector = pci_irq_vector(hdev->pdev, i); 352 vector->io_addr = hdev->hw.io_base + 353 HCLGEVF_VECTOR_REG_BASE + 354 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 355 hdev->vector_status[i] = 0; 356 hdev->vector_irq[i] = vector->vector; 357 358 vector++; 359 alloc++; 360 361 break; 362 } 363 } 364 } 365 hdev->num_msi_left -= alloc; 366 hdev->num_msi_used += alloc; 367 368 return alloc; 369 } 370 371 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 372 { 373 int i; 374 375 for (i = 0; i < hdev->num_msi; i++) 376 if (vector == hdev->vector_irq[i]) 377 return i; 378 379 return -EINVAL; 380 } 381 382 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 383 { 384 return HCLGEVF_RSS_KEY_SIZE; 385 } 386 387 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 388 { 389 return HCLGEVF_RSS_IND_TBL_SIZE; 390 } 391 392 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 393 { 394 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 395 struct hclgevf_rss_indirection_table_cmd *req; 396 struct hclgevf_desc desc; 397 int status; 398 int i, j; 399 400 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 401 402 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 403 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 404 false); 405 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 406 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 407 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 408 req->rss_result[j] = 409 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 410 411 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 412 if (status) { 413 dev_err(&hdev->pdev->dev, 414 "VF failed(=%d) to set RSS indirection table\n", 415 status); 416 return status; 417 } 418 } 419 420 return 0; 421 } 422 423 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 424 { 425 struct hclgevf_rss_tc_mode_cmd *req; 426 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 427 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 428 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 429 struct hclgevf_desc desc; 430 u16 roundup_size; 431 int status; 432 int i; 433 434 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 435 436 roundup_size = roundup_pow_of_two(rss_size); 437 roundup_size = ilog2(roundup_size); 438 439 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 440 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 441 tc_size[i] = roundup_size; 442 tc_offset[i] = rss_size * i; 443 } 444 445 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 446 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 447 hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 448 (tc_valid[i] & 0x1)); 449 hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 450 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 451 hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 452 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 453 } 454 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 455 if (status) 456 dev_err(&hdev->pdev->dev, 457 "VF failed(=%d) to set rss tc mode\n", status); 458 459 return status; 460 } 461 462 static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash, 463 u8 *key) 464 { 465 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 466 struct hclgevf_rss_config_cmd *req; 467 int lkup_times = key ? 3 : 1; 468 struct hclgevf_desc desc; 469 int key_offset; 470 int key_size; 471 int status; 472 473 req = (struct hclgevf_rss_config_cmd *)desc.data; 474 lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0); 475 476 for (key_offset = 0; key_offset < lkup_times; key_offset++) { 477 hclgevf_cmd_setup_basic_desc(&desc, 478 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 479 true); 480 req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET); 481 482 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 483 if (status) { 484 dev_err(&hdev->pdev->dev, 485 "failed to get hardware RSS cfg, status = %d\n", 486 status); 487 return status; 488 } 489 490 if (key_offset == 2) 491 key_size = 492 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 493 else 494 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 495 496 if (key) 497 memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, 498 req->hash_key, 499 key_size); 500 } 501 502 if (hash) { 503 if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ) 504 *hash = ETH_RSS_HASH_TOP; 505 else 506 *hash = ETH_RSS_HASH_UNKNOWN; 507 } 508 509 return 0; 510 } 511 512 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 513 u8 *hfunc) 514 { 515 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 516 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 517 int i; 518 519 if (indir) 520 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 521 indir[i] = rss_cfg->rss_indirection_tbl[i]; 522 523 return hclgevf_get_rss_hw_cfg(handle, hfunc, key); 524 } 525 526 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 527 const u8 *key, const u8 hfunc) 528 { 529 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 530 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 531 int i; 532 533 /* update the shadow RSS table with user specified qids */ 534 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 535 rss_cfg->rss_indirection_tbl[i] = indir[i]; 536 537 /* update the hardware */ 538 return hclgevf_set_rss_indir_table(hdev); 539 } 540 541 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 542 { 543 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 544 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 545 546 return rss_cfg->rss_size; 547 } 548 549 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 550 int vector, 551 struct hnae3_ring_chain_node *ring_chain) 552 { 553 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 554 struct hnae3_ring_chain_node *node; 555 struct hclge_mbx_vf_to_pf_cmd *req; 556 struct hclgevf_desc desc; 557 int i = 0, vector_id; 558 int status; 559 u8 type; 560 561 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 562 vector_id = hclgevf_get_vector_index(hdev, vector); 563 if (vector_id < 0) { 564 dev_err(&handle->pdev->dev, 565 "Get vector index fail. ret =%d\n", vector_id); 566 return vector_id; 567 } 568 569 for (node = ring_chain; node; node = node->next) { 570 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 571 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 572 573 if (i == 0) { 574 hclgevf_cmd_setup_basic_desc(&desc, 575 HCLGEVF_OPC_MBX_VF_TO_PF, 576 false); 577 type = en ? 578 HCLGE_MBX_MAP_RING_TO_VECTOR : 579 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 580 req->msg[0] = type; 581 req->msg[1] = vector_id; 582 } 583 584 req->msg[idx_offset] = 585 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B); 586 req->msg[idx_offset + 1] = node->tqp_index; 587 req->msg[idx_offset + 2] = hnae_get_field(node->int_gl_idx, 588 HNAE3_RING_GL_IDX_M, 589 HNAE3_RING_GL_IDX_S); 590 591 i++; 592 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 593 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 594 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 595 !node->next) { 596 req->msg[2] = i; 597 598 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 599 if (status) { 600 dev_err(&hdev->pdev->dev, 601 "Map TQP fail, status is %d.\n", 602 status); 603 return status; 604 } 605 i = 0; 606 hclgevf_cmd_setup_basic_desc(&desc, 607 HCLGEVF_OPC_MBX_VF_TO_PF, 608 false); 609 req->msg[0] = type; 610 req->msg[1] = vector_id; 611 } 612 } 613 614 return 0; 615 } 616 617 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 618 struct hnae3_ring_chain_node *ring_chain) 619 { 620 return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain); 621 } 622 623 static int hclgevf_unmap_ring_from_vector( 624 struct hnae3_handle *handle, 625 int vector, 626 struct hnae3_ring_chain_node *ring_chain) 627 { 628 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 629 int ret, vector_id; 630 631 vector_id = hclgevf_get_vector_index(hdev, vector); 632 if (vector_id < 0) { 633 dev_err(&handle->pdev->dev, 634 "Get vector index fail. ret =%d\n", vector_id); 635 return vector_id; 636 } 637 638 ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain); 639 if (ret) 640 dev_err(&handle->pdev->dev, 641 "Unmap ring from vector fail. vector=%d, ret =%d\n", 642 vector_id, 643 ret); 644 645 return ret; 646 } 647 648 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 649 { 650 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 651 652 hclgevf_free_vector(hdev, vector); 653 654 return 0; 655 } 656 657 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en) 658 { 659 struct hclge_mbx_vf_to_pf_cmd *req; 660 struct hclgevf_desc desc; 661 int status; 662 663 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 664 665 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 666 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 667 req->msg[1] = en; 668 669 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 670 if (status) 671 dev_err(&hdev->pdev->dev, 672 "Set promisc mode fail, status is %d.\n", status); 673 674 return status; 675 } 676 677 static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en) 678 { 679 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 680 681 hclgevf_cmd_set_promisc_mode(hdev, en); 682 } 683 684 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 685 int stream_id, bool enable) 686 { 687 struct hclgevf_cfg_com_tqp_queue_cmd *req; 688 struct hclgevf_desc desc; 689 int status; 690 691 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 692 693 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 694 false); 695 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 696 req->stream_id = cpu_to_le16(stream_id); 697 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 698 699 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 700 if (status) 701 dev_err(&hdev->pdev->dev, 702 "TQP enable fail, status =%d.\n", status); 703 704 return status; 705 } 706 707 static int hclgevf_get_queue_id(struct hnae3_queue *queue) 708 { 709 struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q); 710 711 return tqp->index; 712 } 713 714 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 715 { 716 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 717 struct hnae3_queue *queue; 718 struct hclgevf_tqp *tqp; 719 int i; 720 721 for (i = 0; i < hdev->num_tqps; i++) { 722 queue = handle->kinfo.tqp[i]; 723 tqp = container_of(queue, struct hclgevf_tqp, q); 724 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 725 } 726 } 727 728 static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en) 729 { 730 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 731 u8 msg[2] = {0}; 732 733 msg[0] = en; 734 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 735 HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, 736 msg, 1, false, NULL, 0); 737 } 738 739 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 740 { 741 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 742 743 ether_addr_copy(p, hdev->hw.mac.mac_addr); 744 } 745 746 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 747 bool is_first) 748 { 749 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 750 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 751 u8 *new_mac_addr = (u8 *)p; 752 u8 msg_data[ETH_ALEN * 2]; 753 u16 subcode; 754 int status; 755 756 ether_addr_copy(msg_data, new_mac_addr); 757 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 758 759 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 760 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 761 762 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 763 subcode, msg_data, ETH_ALEN * 2, 764 true, NULL, 0); 765 if (!status) 766 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 767 768 return status; 769 } 770 771 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 772 const unsigned char *addr) 773 { 774 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 775 776 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 777 HCLGE_MBX_MAC_VLAN_UC_ADD, 778 addr, ETH_ALEN, false, NULL, 0); 779 } 780 781 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 782 const unsigned char *addr) 783 { 784 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 785 786 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 787 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 788 addr, ETH_ALEN, false, NULL, 0); 789 } 790 791 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 792 const unsigned char *addr) 793 { 794 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 795 796 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 797 HCLGE_MBX_MAC_VLAN_MC_ADD, 798 addr, ETH_ALEN, false, NULL, 0); 799 } 800 801 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 802 const unsigned char *addr) 803 { 804 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 805 806 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 807 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 808 addr, ETH_ALEN, false, NULL, 0); 809 } 810 811 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 812 __be16 proto, u16 vlan_id, 813 bool is_kill) 814 { 815 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 816 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 817 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 818 819 if (vlan_id > 4095) 820 return -EINVAL; 821 822 if (proto != htons(ETH_P_8021Q)) 823 return -EPROTONOSUPPORT; 824 825 msg_data[0] = is_kill; 826 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 827 memcpy(&msg_data[3], &proto, sizeof(proto)); 828 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 829 HCLGE_MBX_VLAN_FILTER, msg_data, 830 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 831 } 832 833 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 834 { 835 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 836 u8 msg_data; 837 838 msg_data = enable ? 1 : 0; 839 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 840 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 841 1, false, NULL, 0); 842 } 843 844 static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 845 { 846 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 847 u8 msg_data[2]; 848 int ret; 849 850 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 851 852 /* disable vf queue before send queue reset msg to PF */ 853 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 854 if (ret) 855 return; 856 857 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 858 2, true, NULL, 0); 859 } 860 861 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 862 enum hnae3_reset_notify_type type) 863 { 864 struct hnae3_client *client = hdev->nic_client; 865 struct hnae3_handle *handle = &hdev->nic; 866 867 if (!client->ops->reset_notify) 868 return -EOPNOTSUPP; 869 870 return client->ops->reset_notify(handle, type); 871 } 872 873 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 874 { 875 #define HCLGEVF_RESET_WAIT_MS 500 876 #define HCLGEVF_RESET_WAIT_CNT 20 877 u32 val, cnt = 0; 878 879 /* wait to check the hardware reset completion status */ 880 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 881 while (hnae_get_bit(val, HCLGEVF_FUN_RST_ING_B) && 882 (cnt < HCLGEVF_RESET_WAIT_CNT)) { 883 msleep(HCLGEVF_RESET_WAIT_MS); 884 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 885 cnt++; 886 } 887 888 /* hardware completion status should be available by this time */ 889 if (cnt >= HCLGEVF_RESET_WAIT_CNT) { 890 dev_warn(&hdev->pdev->dev, 891 "could'nt get reset done status from h/w, timeout!\n"); 892 return -EBUSY; 893 } 894 895 /* we will wait a bit more to let reset of the stack to complete. This 896 * might happen in case reset assertion was made by PF. Yes, this also 897 * means we might end up waiting bit more even for VF reset. 898 */ 899 msleep(5000); 900 901 return 0; 902 } 903 904 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 905 { 906 int ret; 907 908 /* uninitialize the nic client */ 909 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 910 911 /* re-initialize the hclge device */ 912 ret = hclgevf_init_hdev(hdev); 913 if (ret) { 914 dev_err(&hdev->pdev->dev, 915 "hclge device re-init failed, VF is disabled!\n"); 916 return ret; 917 } 918 919 /* bring up the nic client again */ 920 hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 921 922 return 0; 923 } 924 925 static int hclgevf_reset(struct hclgevf_dev *hdev) 926 { 927 int ret; 928 929 rtnl_lock(); 930 931 /* bring down the nic to stop any ongoing TX/RX */ 932 hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 933 934 /* check if VF could successfully fetch the hardware reset completion 935 * status from the hardware 936 */ 937 ret = hclgevf_reset_wait(hdev); 938 if (ret) { 939 /* can't do much in this situation, will disable VF */ 940 dev_err(&hdev->pdev->dev, 941 "VF failed(=%d) to fetch H/W reset completion status\n", 942 ret); 943 944 dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); 945 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 946 947 rtnl_unlock(); 948 return ret; 949 } 950 951 /* now, re-initialize the nic client and ae device*/ 952 ret = hclgevf_reset_stack(hdev); 953 if (ret) 954 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 955 956 /* bring up the nic to enable TX/RX again */ 957 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 958 959 rtnl_unlock(); 960 961 return ret; 962 } 963 964 static int hclgevf_do_reset(struct hclgevf_dev *hdev) 965 { 966 int status; 967 u8 respmsg; 968 969 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 970 0, false, &respmsg, sizeof(u8)); 971 if (status) 972 dev_err(&hdev->pdev->dev, 973 "VF reset request to PF failed(=%d)\n", status); 974 975 return status; 976 } 977 978 static void hclgevf_reset_event(struct hnae3_handle *handle) 979 { 980 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 981 982 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 983 984 handle->reset_level = HNAE3_VF_RESET; 985 986 /* reset of this VF requested */ 987 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 988 hclgevf_reset_task_schedule(hdev); 989 990 handle->last_reset_time = jiffies; 991 } 992 993 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 994 { 995 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 996 997 return hdev->fw_version; 998 } 999 1000 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1001 { 1002 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1003 1004 vector->vector_irq = pci_irq_vector(hdev->pdev, 1005 HCLGEVF_MISC_VECTOR_NUM); 1006 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1007 /* vector status always valid for Vector 0 */ 1008 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1009 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1010 1011 hdev->num_msi_left -= 1; 1012 hdev->num_msi_used += 1; 1013 } 1014 1015 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1016 { 1017 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1018 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1019 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1020 schedule_work(&hdev->rst_service_task); 1021 } 1022 } 1023 1024 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1025 { 1026 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1027 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1028 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1029 schedule_work(&hdev->mbx_service_task); 1030 } 1031 } 1032 1033 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1034 { 1035 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1036 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1037 schedule_work(&hdev->service_task); 1038 } 1039 1040 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1041 { 1042 /* if we have any pending mailbox event then schedule the mbx task */ 1043 if (hdev->mbx_event_pending) 1044 hclgevf_mbx_task_schedule(hdev); 1045 1046 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1047 hclgevf_reset_task_schedule(hdev); 1048 } 1049 1050 static void hclgevf_service_timer(struct timer_list *t) 1051 { 1052 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1053 1054 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1055 1056 hclgevf_task_schedule(hdev); 1057 } 1058 1059 static void hclgevf_reset_service_task(struct work_struct *work) 1060 { 1061 struct hclgevf_dev *hdev = 1062 container_of(work, struct hclgevf_dev, rst_service_task); 1063 int ret; 1064 1065 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1066 return; 1067 1068 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1069 1070 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1071 &hdev->reset_state)) { 1072 /* PF has initmated that it is about to reset the hardware. 1073 * We now have to poll & check if harware has actually completed 1074 * the reset sequence. On hardware reset completion, VF needs to 1075 * reset the client and ae device. 1076 */ 1077 hdev->reset_attempts = 0; 1078 1079 ret = hclgevf_reset(hdev); 1080 if (ret) 1081 dev_err(&hdev->pdev->dev, "VF stack reset failed.\n"); 1082 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1083 &hdev->reset_state)) { 1084 /* we could be here when either of below happens: 1085 * 1. reset was initiated due to watchdog timeout due to 1086 * a. IMP was earlier reset and our TX got choked down and 1087 * which resulted in watchdog reacting and inducing VF 1088 * reset. This also means our cmdq would be unreliable. 1089 * b. problem in TX due to other lower layer(example link 1090 * layer not functioning properly etc.) 1091 * 2. VF reset might have been initiated due to some config 1092 * change. 1093 * 1094 * NOTE: Theres no clear way to detect above cases than to react 1095 * to the response of PF for this reset request. PF will ack the 1096 * 1b and 2. cases but we will not get any intimation about 1a 1097 * from PF as cmdq would be in unreliable state i.e. mailbox 1098 * communication between PF and VF would be broken. 1099 */ 1100 1101 /* if we are never geting into pending state it means either: 1102 * 1. PF is not receiving our request which could be due to IMP 1103 * reset 1104 * 2. PF is screwed 1105 * We cannot do much for 2. but to check first we can try reset 1106 * our PCIe + stack and see if it alleviates the problem. 1107 */ 1108 if (hdev->reset_attempts > 3) { 1109 /* prepare for full reset of stack + pcie interface */ 1110 hdev->nic.reset_level = HNAE3_VF_FULL_RESET; 1111 1112 /* "defer" schedule the reset task again */ 1113 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1114 } else { 1115 hdev->reset_attempts++; 1116 1117 /* request PF for resetting this VF via mailbox */ 1118 ret = hclgevf_do_reset(hdev); 1119 if (ret) 1120 dev_warn(&hdev->pdev->dev, 1121 "VF rst fail, stack will call\n"); 1122 } 1123 } 1124 1125 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1126 } 1127 1128 static void hclgevf_mailbox_service_task(struct work_struct *work) 1129 { 1130 struct hclgevf_dev *hdev; 1131 1132 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1133 1134 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1135 return; 1136 1137 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1138 1139 hclgevf_mbx_async_handler(hdev); 1140 1141 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1142 } 1143 1144 static void hclgevf_service_task(struct work_struct *work) 1145 { 1146 struct hclgevf_dev *hdev; 1147 1148 hdev = container_of(work, struct hclgevf_dev, service_task); 1149 1150 /* request the link status from the PF. PF would be able to tell VF 1151 * about such updates in future so we might remove this later 1152 */ 1153 hclgevf_request_link_info(hdev); 1154 1155 hclgevf_deferred_task_schedule(hdev); 1156 1157 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1158 } 1159 1160 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1161 { 1162 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1163 } 1164 1165 static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) 1166 { 1167 u32 cmdq_src_reg; 1168 1169 /* fetch the events from their corresponding regs */ 1170 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1171 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1172 1173 /* check for vector0 mailbox(=CMDQ RX) event source */ 1174 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1175 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1176 *clearval = cmdq_src_reg; 1177 return true; 1178 } 1179 1180 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1181 1182 return false; 1183 } 1184 1185 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1186 { 1187 writel(en ? 1 : 0, vector->addr); 1188 } 1189 1190 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1191 { 1192 struct hclgevf_dev *hdev = data; 1193 u32 clearval; 1194 1195 hclgevf_enable_vector(&hdev->misc_vector, false); 1196 if (!hclgevf_check_event_cause(hdev, &clearval)) 1197 goto skip_sched; 1198 1199 hclgevf_mbx_handler(hdev); 1200 1201 hclgevf_clear_event_cause(hdev, clearval); 1202 1203 skip_sched: 1204 hclgevf_enable_vector(&hdev->misc_vector, true); 1205 1206 return IRQ_HANDLED; 1207 } 1208 1209 static int hclgevf_configure(struct hclgevf_dev *hdev) 1210 { 1211 int ret; 1212 1213 /* get queue configuration from PF */ 1214 ret = hclge_get_queue_info(hdev); 1215 if (ret) 1216 return ret; 1217 /* get tc configuration from PF */ 1218 return hclgevf_get_tc_info(hdev); 1219 } 1220 1221 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1222 { 1223 struct pci_dev *pdev = ae_dev->pdev; 1224 struct hclgevf_dev *hdev = ae_dev->priv; 1225 1226 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1227 if (!hdev) 1228 return -ENOMEM; 1229 1230 hdev->pdev = pdev; 1231 hdev->ae_dev = ae_dev; 1232 ae_dev->priv = hdev; 1233 1234 return 0; 1235 } 1236 1237 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1238 { 1239 struct hnae3_handle *roce = &hdev->roce; 1240 struct hnae3_handle *nic = &hdev->nic; 1241 1242 roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM; 1243 1244 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1245 hdev->num_msi_left == 0) 1246 return -EINVAL; 1247 1248 roce->rinfo.base_vector = 1249 hdev->vector_status[hdev->num_msi_used]; 1250 1251 roce->rinfo.netdev = nic->kinfo.netdev; 1252 roce->rinfo.roce_io_base = hdev->hw.io_base; 1253 1254 roce->pdev = nic->pdev; 1255 roce->ae_algo = nic->ae_algo; 1256 roce->numa_node_mask = nic->numa_node_mask; 1257 1258 return 0; 1259 } 1260 1261 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1262 { 1263 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1264 int i, ret; 1265 1266 rss_cfg->rss_size = hdev->rss_size_max; 1267 1268 /* Initialize RSS indirect table for each vport */ 1269 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1270 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1271 1272 ret = hclgevf_set_rss_indir_table(hdev); 1273 if (ret) 1274 return ret; 1275 1276 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1277 } 1278 1279 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1280 { 1281 /* other vlan config(like, VLAN TX/RX offload) would also be added 1282 * here later 1283 */ 1284 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1285 false); 1286 } 1287 1288 static int hclgevf_ae_start(struct hnae3_handle *handle) 1289 { 1290 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1291 int i, queue_id; 1292 1293 for (i = 0; i < handle->kinfo.num_tqps; i++) { 1294 /* ring enable */ 1295 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); 1296 if (queue_id < 0) { 1297 dev_warn(&hdev->pdev->dev, 1298 "Get invalid queue id, ignore it\n"); 1299 continue; 1300 } 1301 1302 hclgevf_tqp_enable(hdev, queue_id, 0, true); 1303 } 1304 1305 /* reset tqp stats */ 1306 hclgevf_reset_tqp_stats(handle); 1307 1308 hclgevf_request_link_info(hdev); 1309 1310 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1311 mod_timer(&hdev->service_timer, jiffies + HZ); 1312 1313 return 0; 1314 } 1315 1316 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1317 { 1318 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1319 int i, queue_id; 1320 1321 for (i = 0; i < hdev->num_tqps; i++) { 1322 /* Ring disable */ 1323 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); 1324 if (queue_id < 0) { 1325 dev_warn(&hdev->pdev->dev, 1326 "Get invalid queue id, ignore it\n"); 1327 continue; 1328 } 1329 1330 hclgevf_tqp_enable(hdev, queue_id, 0, false); 1331 } 1332 1333 /* reset tqp stats */ 1334 hclgevf_reset_tqp_stats(handle); 1335 del_timer_sync(&hdev->service_timer); 1336 cancel_work_sync(&hdev->service_task); 1337 hclgevf_update_link_status(hdev, 0); 1338 } 1339 1340 static void hclgevf_state_init(struct hclgevf_dev *hdev) 1341 { 1342 /* if this is on going reset then skip this initialization */ 1343 if (hclgevf_dev_ongoing_reset(hdev)) 1344 return; 1345 1346 /* setup tasks for the MBX */ 1347 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1348 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1349 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1350 1351 /* setup tasks for service timer */ 1352 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1353 1354 INIT_WORK(&hdev->service_task, hclgevf_service_task); 1355 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1356 1357 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 1358 1359 mutex_init(&hdev->mbx_resp.mbx_mutex); 1360 1361 /* bring the device down */ 1362 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1363 } 1364 1365 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1366 { 1367 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1368 1369 if (hdev->service_timer.function) 1370 del_timer_sync(&hdev->service_timer); 1371 if (hdev->service_task.func) 1372 cancel_work_sync(&hdev->service_task); 1373 if (hdev->mbx_service_task.func) 1374 cancel_work_sync(&hdev->mbx_service_task); 1375 if (hdev->rst_service_task.func) 1376 cancel_work_sync(&hdev->rst_service_task); 1377 1378 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1379 } 1380 1381 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1382 { 1383 struct pci_dev *pdev = hdev->pdev; 1384 int vectors; 1385 int i; 1386 1387 /* if this is on going reset then skip this initialization */ 1388 if (hclgevf_dev_ongoing_reset(hdev)) 1389 return 0; 1390 1391 hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM; 1392 1393 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1394 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1395 if (vectors < 0) { 1396 dev_err(&pdev->dev, 1397 "failed(%d) to allocate MSI/MSI-X vectors\n", 1398 vectors); 1399 return vectors; 1400 } 1401 if (vectors < hdev->num_msi) 1402 dev_warn(&hdev->pdev->dev, 1403 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1404 hdev->num_msi, vectors); 1405 1406 hdev->num_msi = vectors; 1407 hdev->num_msi_left = vectors; 1408 hdev->base_msi_vector = pdev->irq; 1409 1410 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1411 sizeof(u16), GFP_KERNEL); 1412 if (!hdev->vector_status) { 1413 pci_free_irq_vectors(pdev); 1414 return -ENOMEM; 1415 } 1416 1417 for (i = 0; i < hdev->num_msi; i++) 1418 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 1419 1420 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 1421 sizeof(int), GFP_KERNEL); 1422 if (!hdev->vector_irq) { 1423 pci_free_irq_vectors(pdev); 1424 return -ENOMEM; 1425 } 1426 1427 return 0; 1428 } 1429 1430 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 1431 { 1432 struct pci_dev *pdev = hdev->pdev; 1433 1434 pci_free_irq_vectors(pdev); 1435 } 1436 1437 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 1438 { 1439 int ret = 0; 1440 1441 /* if this is on going reset then skip this initialization */ 1442 if (hclgevf_dev_ongoing_reset(hdev)) 1443 return 0; 1444 1445 hclgevf_get_misc_vector(hdev); 1446 1447 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 1448 0, "hclgevf_cmd", hdev); 1449 if (ret) { 1450 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 1451 hdev->misc_vector.vector_irq); 1452 return ret; 1453 } 1454 1455 /* enable misc. vector(vector 0) */ 1456 hclgevf_enable_vector(&hdev->misc_vector, true); 1457 1458 return ret; 1459 } 1460 1461 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 1462 { 1463 /* disable misc vector(vector 0) */ 1464 hclgevf_enable_vector(&hdev->misc_vector, false); 1465 free_irq(hdev->misc_vector.vector_irq, hdev); 1466 hclgevf_free_vector(hdev, 0); 1467 } 1468 1469 static int hclgevf_init_instance(struct hclgevf_dev *hdev, 1470 struct hnae3_client *client) 1471 { 1472 int ret; 1473 1474 switch (client->type) { 1475 case HNAE3_CLIENT_KNIC: 1476 hdev->nic_client = client; 1477 hdev->nic.client = client; 1478 1479 ret = client->ops->init_instance(&hdev->nic); 1480 if (ret) 1481 return ret; 1482 1483 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 1484 struct hnae3_client *rc = hdev->roce_client; 1485 1486 ret = hclgevf_init_roce_base_info(hdev); 1487 if (ret) 1488 return ret; 1489 ret = rc->ops->init_instance(&hdev->roce); 1490 if (ret) 1491 return ret; 1492 } 1493 break; 1494 case HNAE3_CLIENT_UNIC: 1495 hdev->nic_client = client; 1496 hdev->nic.client = client; 1497 1498 ret = client->ops->init_instance(&hdev->nic); 1499 if (ret) 1500 return ret; 1501 break; 1502 case HNAE3_CLIENT_ROCE: 1503 hdev->roce_client = client; 1504 hdev->roce.client = client; 1505 1506 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 1507 ret = hclgevf_init_roce_base_info(hdev); 1508 if (ret) 1509 return ret; 1510 1511 ret = client->ops->init_instance(&hdev->roce); 1512 if (ret) 1513 return ret; 1514 } 1515 } 1516 1517 return 0; 1518 } 1519 1520 static void hclgevf_uninit_instance(struct hclgevf_dev *hdev, 1521 struct hnae3_client *client) 1522 { 1523 /* un-init roce, if it exists */ 1524 if (hdev->roce_client) 1525 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 1526 1527 /* un-init nic/unic, if this was not called by roce client */ 1528 if ((client->ops->uninit_instance) && 1529 (client->type != HNAE3_CLIENT_ROCE)) 1530 client->ops->uninit_instance(&hdev->nic, 0); 1531 } 1532 1533 static int hclgevf_register_client(struct hnae3_client *client, 1534 struct hnae3_ae_dev *ae_dev) 1535 { 1536 struct hclgevf_dev *hdev = ae_dev->priv; 1537 1538 return hclgevf_init_instance(hdev, client); 1539 } 1540 1541 static void hclgevf_unregister_client(struct hnae3_client *client, 1542 struct hnae3_ae_dev *ae_dev) 1543 { 1544 struct hclgevf_dev *hdev = ae_dev->priv; 1545 1546 hclgevf_uninit_instance(hdev, client); 1547 } 1548 1549 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 1550 { 1551 struct pci_dev *pdev = hdev->pdev; 1552 struct hclgevf_hw *hw; 1553 int ret; 1554 1555 /* check if we need to skip initialization of pci. This will happen if 1556 * device is undergoing VF reset. Otherwise, we would need to 1557 * re-initialize pci interface again i.e. when device is not going 1558 * through *any* reset or actually undergoing full reset. 1559 */ 1560 if (hclgevf_dev_ongoing_reset(hdev)) 1561 return 0; 1562 1563 ret = pci_enable_device(pdev); 1564 if (ret) { 1565 dev_err(&pdev->dev, "failed to enable PCI device\n"); 1566 return ret; 1567 } 1568 1569 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1570 if (ret) { 1571 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 1572 goto err_disable_device; 1573 } 1574 1575 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 1576 if (ret) { 1577 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 1578 goto err_disable_device; 1579 } 1580 1581 pci_set_master(pdev); 1582 hw = &hdev->hw; 1583 hw->hdev = hdev; 1584 hw->io_base = pci_iomap(pdev, 2, 0); 1585 if (!hw->io_base) { 1586 dev_err(&pdev->dev, "can't map configuration register space\n"); 1587 ret = -ENOMEM; 1588 goto err_clr_master; 1589 } 1590 1591 return 0; 1592 1593 err_clr_master: 1594 pci_clear_master(pdev); 1595 pci_release_regions(pdev); 1596 err_disable_device: 1597 pci_disable_device(pdev); 1598 1599 return ret; 1600 } 1601 1602 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 1603 { 1604 struct pci_dev *pdev = hdev->pdev; 1605 1606 pci_iounmap(pdev, hdev->hw.io_base); 1607 pci_clear_master(pdev); 1608 pci_release_regions(pdev); 1609 pci_disable_device(pdev); 1610 } 1611 1612 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 1613 { 1614 struct pci_dev *pdev = hdev->pdev; 1615 int ret; 1616 1617 /* check if device is on-going full reset(i.e. pcie as well) */ 1618 if (hclgevf_dev_ongoing_full_reset(hdev)) { 1619 dev_warn(&pdev->dev, "device is going full reset\n"); 1620 hclgevf_uninit_hdev(hdev); 1621 } 1622 1623 ret = hclgevf_pci_init(hdev); 1624 if (ret) { 1625 dev_err(&pdev->dev, "PCI initialization failed\n"); 1626 return ret; 1627 } 1628 1629 ret = hclgevf_init_msi(hdev); 1630 if (ret) { 1631 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 1632 goto err_irq_init; 1633 } 1634 1635 hclgevf_state_init(hdev); 1636 1637 ret = hclgevf_cmd_init(hdev); 1638 if (ret) 1639 goto err_cmd_init; 1640 1641 ret = hclgevf_misc_irq_init(hdev); 1642 if (ret) { 1643 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 1644 ret); 1645 goto err_misc_irq_init; 1646 } 1647 1648 ret = hclgevf_configure(hdev); 1649 if (ret) { 1650 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 1651 goto err_config; 1652 } 1653 1654 ret = hclgevf_alloc_tqps(hdev); 1655 if (ret) { 1656 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 1657 goto err_config; 1658 } 1659 1660 ret = hclgevf_set_handle_info(hdev); 1661 if (ret) { 1662 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 1663 goto err_config; 1664 } 1665 1666 /* Initialize VF's MTA */ 1667 hdev->accept_mta_mc = true; 1668 ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc); 1669 if (ret) { 1670 dev_err(&hdev->pdev->dev, 1671 "failed(%d) to set mta filter mode\n", ret); 1672 goto err_config; 1673 } 1674 1675 /* Initialize RSS for this VF */ 1676 ret = hclgevf_rss_init_hw(hdev); 1677 if (ret) { 1678 dev_err(&hdev->pdev->dev, 1679 "failed(%d) to initialize RSS\n", ret); 1680 goto err_config; 1681 } 1682 1683 ret = hclgevf_init_vlan_config(hdev); 1684 if (ret) { 1685 dev_err(&hdev->pdev->dev, 1686 "failed(%d) to initialize VLAN config\n", ret); 1687 goto err_config; 1688 } 1689 1690 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 1691 1692 return 0; 1693 1694 err_config: 1695 hclgevf_misc_irq_uninit(hdev); 1696 err_misc_irq_init: 1697 hclgevf_cmd_uninit(hdev); 1698 err_cmd_init: 1699 hclgevf_state_uninit(hdev); 1700 hclgevf_uninit_msi(hdev); 1701 err_irq_init: 1702 hclgevf_pci_uninit(hdev); 1703 return ret; 1704 } 1705 1706 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 1707 { 1708 hclgevf_state_uninit(hdev); 1709 hclgevf_misc_irq_uninit(hdev); 1710 hclgevf_cmd_uninit(hdev); 1711 hclgevf_uninit_msi(hdev); 1712 hclgevf_pci_uninit(hdev); 1713 } 1714 1715 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 1716 { 1717 struct pci_dev *pdev = ae_dev->pdev; 1718 int ret; 1719 1720 ret = hclgevf_alloc_hdev(ae_dev); 1721 if (ret) { 1722 dev_err(&pdev->dev, "hclge device allocation failed\n"); 1723 return ret; 1724 } 1725 1726 ret = hclgevf_init_hdev(ae_dev->priv); 1727 if (ret) 1728 dev_err(&pdev->dev, "hclge device initialization failed\n"); 1729 1730 return ret; 1731 } 1732 1733 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 1734 { 1735 struct hclgevf_dev *hdev = ae_dev->priv; 1736 1737 hclgevf_uninit_hdev(hdev); 1738 ae_dev->priv = NULL; 1739 } 1740 1741 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 1742 { 1743 struct hnae3_handle *nic = &hdev->nic; 1744 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1745 1746 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 1747 } 1748 1749 /** 1750 * hclgevf_get_channels - Get the current channels enabled and max supported. 1751 * @handle: hardware information for network interface 1752 * @ch: ethtool channels structure 1753 * 1754 * We don't support separate tx and rx queues as channels. The other count 1755 * represents how many queues are being used for control. max_combined counts 1756 * how many queue pairs we can support. They may not be mapped 1 to 1 with 1757 * q_vectors since we support a lot more queue pairs than q_vectors. 1758 **/ 1759 static void hclgevf_get_channels(struct hnae3_handle *handle, 1760 struct ethtool_channels *ch) 1761 { 1762 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1763 1764 ch->max_combined = hclgevf_get_max_channels(hdev); 1765 ch->other_count = 0; 1766 ch->max_other = 0; 1767 ch->combined_count = hdev->num_tqps; 1768 } 1769 1770 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 1771 u16 *free_tqps, u16 *max_rss_size) 1772 { 1773 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1774 1775 *free_tqps = 0; 1776 *max_rss_size = hdev->rss_size_max; 1777 } 1778 1779 static int hclgevf_get_status(struct hnae3_handle *handle) 1780 { 1781 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1782 1783 return hdev->hw.mac.link; 1784 } 1785 1786 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 1787 u8 *auto_neg, u32 *speed, 1788 u8 *duplex) 1789 { 1790 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1791 1792 if (speed) 1793 *speed = hdev->hw.mac.speed; 1794 if (duplex) 1795 *duplex = hdev->hw.mac.duplex; 1796 if (auto_neg) 1797 *auto_neg = AUTONEG_DISABLE; 1798 } 1799 1800 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 1801 u8 duplex) 1802 { 1803 hdev->hw.mac.speed = speed; 1804 hdev->hw.mac.duplex = duplex; 1805 } 1806 1807 static const struct hnae3_ae_ops hclgevf_ops = { 1808 .init_ae_dev = hclgevf_init_ae_dev, 1809 .uninit_ae_dev = hclgevf_uninit_ae_dev, 1810 .init_client_instance = hclgevf_register_client, 1811 .uninit_client_instance = hclgevf_unregister_client, 1812 .start = hclgevf_ae_start, 1813 .stop = hclgevf_ae_stop, 1814 .map_ring_to_vector = hclgevf_map_ring_to_vector, 1815 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 1816 .get_vector = hclgevf_get_vector, 1817 .put_vector = hclgevf_put_vector, 1818 .reset_queue = hclgevf_reset_tqp, 1819 .set_promisc_mode = hclgevf_set_promisc_mode, 1820 .get_mac_addr = hclgevf_get_mac_addr, 1821 .set_mac_addr = hclgevf_set_mac_addr, 1822 .add_uc_addr = hclgevf_add_uc_addr, 1823 .rm_uc_addr = hclgevf_rm_uc_addr, 1824 .add_mc_addr = hclgevf_add_mc_addr, 1825 .rm_mc_addr = hclgevf_rm_mc_addr, 1826 .get_stats = hclgevf_get_stats, 1827 .update_stats = hclgevf_update_stats, 1828 .get_strings = hclgevf_get_strings, 1829 .get_sset_count = hclgevf_get_sset_count, 1830 .get_rss_key_size = hclgevf_get_rss_key_size, 1831 .get_rss_indir_size = hclgevf_get_rss_indir_size, 1832 .get_rss = hclgevf_get_rss, 1833 .set_rss = hclgevf_set_rss, 1834 .get_tc_size = hclgevf_get_tc_size, 1835 .get_fw_version = hclgevf_get_fw_version, 1836 .set_vlan_filter = hclgevf_set_vlan_filter, 1837 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 1838 .reset_event = hclgevf_reset_event, 1839 .get_channels = hclgevf_get_channels, 1840 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 1841 .get_status = hclgevf_get_status, 1842 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 1843 }; 1844 1845 static struct hnae3_ae_algo ae_algovf = { 1846 .ops = &hclgevf_ops, 1847 .name = HCLGEVF_NAME, 1848 .pdev_id_table = ae_algovf_pci_tbl, 1849 }; 1850 1851 static int hclgevf_init(void) 1852 { 1853 pr_info("%s is initializing\n", HCLGEVF_NAME); 1854 1855 hnae3_register_ae_algo(&ae_algovf); 1856 1857 return 0; 1858 } 1859 1860 static void hclgevf_exit(void) 1861 { 1862 hnae3_unregister_ae_algo(&ae_algovf); 1863 } 1864 module_init(hclgevf_init); 1865 module_exit(hclgevf_exit); 1866 1867 MODULE_LICENSE("GPL"); 1868 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 1869 MODULE_DESCRIPTION("HCLGEVF Driver"); 1870 MODULE_VERSION(HCLGEVF_MOD_VERSION); 1871