1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <net/rtnetlink.h> 6 #include "hclgevf_cmd.h" 7 #include "hclgevf_main.h" 8 #include "hclge_mbx.h" 9 #include "hnae3.h" 10 11 #define HCLGEVF_NAME "hclgevf" 12 13 static int hclgevf_init_hdev(struct hclgevf_dev *hdev); 14 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 25 26 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 27 struct hnae3_handle *handle) 28 { 29 return container_of(handle, struct hclgevf_dev, nic); 30 } 31 32 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 33 { 34 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 35 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 36 struct hclgevf_desc desc; 37 struct hclgevf_tqp *tqp; 38 int status; 39 int i; 40 41 for (i = 0; i < kinfo->num_tqps; i++) { 42 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 43 hclgevf_cmd_setup_basic_desc(&desc, 44 HCLGEVF_OPC_QUERY_RX_STATUS, 45 true); 46 47 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 48 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 49 if (status) { 50 dev_err(&hdev->pdev->dev, 51 "Query tqp stat fail, status = %d,queue = %d\n", 52 status, i); 53 return status; 54 } 55 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 56 le32_to_cpu(desc.data[1]); 57 58 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 59 true); 60 61 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 62 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 63 if (status) { 64 dev_err(&hdev->pdev->dev, 65 "Query tqp stat fail, status = %d,queue = %d\n", 66 status, i); 67 return status; 68 } 69 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 70 le32_to_cpu(desc.data[1]); 71 } 72 73 return 0; 74 } 75 76 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 77 { 78 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 79 struct hclgevf_tqp *tqp; 80 u64 *buff = data; 81 int i; 82 83 for (i = 0; i < kinfo->num_tqps; i++) { 84 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 85 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 86 } 87 for (i = 0; i < kinfo->num_tqps; i++) { 88 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 89 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 90 } 91 92 return buff; 93 } 94 95 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 96 { 97 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 98 99 return kinfo->num_tqps * 2; 100 } 101 102 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 103 { 104 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 105 u8 *buff = data; 106 int i = 0; 107 108 for (i = 0; i < kinfo->num_tqps; i++) { 109 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 110 struct hclgevf_tqp, q); 111 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 112 tqp->index); 113 buff += ETH_GSTRING_LEN; 114 } 115 116 for (i = 0; i < kinfo->num_tqps; i++) { 117 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 118 struct hclgevf_tqp, q); 119 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 120 tqp->index); 121 buff += ETH_GSTRING_LEN; 122 } 123 124 return buff; 125 } 126 127 static void hclgevf_update_stats(struct hnae3_handle *handle, 128 struct net_device_stats *net_stats) 129 { 130 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 131 int status; 132 133 status = hclgevf_tqps_update_stats(handle); 134 if (status) 135 dev_err(&hdev->pdev->dev, 136 "VF update of TQPS stats fail, status = %d.\n", 137 status); 138 } 139 140 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 141 { 142 if (strset == ETH_SS_TEST) 143 return -EOPNOTSUPP; 144 else if (strset == ETH_SS_STATS) 145 return hclgevf_tqps_get_sset_count(handle, strset); 146 147 return 0; 148 } 149 150 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 151 u8 *data) 152 { 153 u8 *p = (char *)data; 154 155 if (strset == ETH_SS_STATS) 156 p = hclgevf_tqps_get_strings(handle, p); 157 } 158 159 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 160 { 161 hclgevf_tqps_get_stats(handle, data); 162 } 163 164 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 165 { 166 u8 resp_msg; 167 int status; 168 169 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 170 true, &resp_msg, sizeof(u8)); 171 if (status) { 172 dev_err(&hdev->pdev->dev, 173 "VF request to get TC info from PF failed %d", 174 status); 175 return status; 176 } 177 178 hdev->hw_tc_map = resp_msg; 179 180 return 0; 181 } 182 183 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 184 { 185 #define HCLGEVF_TQPS_RSS_INFO_LEN 8 186 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 187 int status; 188 189 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 190 true, resp_msg, 191 HCLGEVF_TQPS_RSS_INFO_LEN); 192 if (status) { 193 dev_err(&hdev->pdev->dev, 194 "VF request to get tqp info from PF failed %d", 195 status); 196 return status; 197 } 198 199 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 200 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 201 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 202 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 203 204 return 0; 205 } 206 207 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 208 { 209 struct hclgevf_tqp *tqp; 210 int i; 211 212 /* if this is on going reset then we need to re-allocate the TPQs 213 * since we cannot assume we would get same number of TPQs back from PF 214 */ 215 if (hclgevf_dev_ongoing_reset(hdev)) 216 devm_kfree(&hdev->pdev->dev, hdev->htqp); 217 218 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 219 sizeof(struct hclgevf_tqp), GFP_KERNEL); 220 if (!hdev->htqp) 221 return -ENOMEM; 222 223 tqp = hdev->htqp; 224 225 for (i = 0; i < hdev->num_tqps; i++) { 226 tqp->dev = &hdev->pdev->dev; 227 tqp->index = i; 228 229 tqp->q.ae_algo = &ae_algovf; 230 tqp->q.buf_size = hdev->rx_buf_len; 231 tqp->q.desc_num = hdev->num_desc; 232 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 233 i * HCLGEVF_TQP_REG_SIZE; 234 235 tqp++; 236 } 237 238 return 0; 239 } 240 241 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 242 { 243 struct hnae3_handle *nic = &hdev->nic; 244 struct hnae3_knic_private_info *kinfo; 245 u16 new_tqps = hdev->num_tqps; 246 int i; 247 248 kinfo = &nic->kinfo; 249 kinfo->num_tc = 0; 250 kinfo->num_desc = hdev->num_desc; 251 kinfo->rx_buf_len = hdev->rx_buf_len; 252 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 253 if (hdev->hw_tc_map & BIT(i)) 254 kinfo->num_tc++; 255 256 kinfo->rss_size 257 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 258 new_tqps = kinfo->rss_size * kinfo->num_tc; 259 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 260 261 /* if this is on going reset then we need to re-allocate the hnae queues 262 * as well since number of TPQs from PF might have changed. 263 */ 264 if (hclgevf_dev_ongoing_reset(hdev)) 265 devm_kfree(&hdev->pdev->dev, kinfo->tqp); 266 267 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 268 sizeof(struct hnae3_queue *), GFP_KERNEL); 269 if (!kinfo->tqp) 270 return -ENOMEM; 271 272 for (i = 0; i < kinfo->num_tqps; i++) { 273 hdev->htqp[i].q.handle = &hdev->nic; 274 hdev->htqp[i].q.tqp_index = i; 275 kinfo->tqp[i] = &hdev->htqp[i].q; 276 } 277 278 return 0; 279 } 280 281 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 282 { 283 int status; 284 u8 resp_msg; 285 286 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 287 0, false, &resp_msg, sizeof(u8)); 288 if (status) 289 dev_err(&hdev->pdev->dev, 290 "VF failed to fetch link status(%d) from PF", status); 291 } 292 293 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 294 { 295 struct hnae3_handle *handle = &hdev->nic; 296 struct hnae3_client *client; 297 298 client = handle->client; 299 300 link_state = 301 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 302 303 if (link_state != hdev->hw.mac.link) { 304 client->ops->link_status_change(handle, !!link_state); 305 hdev->hw.mac.link = link_state; 306 } 307 } 308 309 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 310 { 311 struct hnae3_handle *nic = &hdev->nic; 312 int ret; 313 314 nic->ae_algo = &ae_algovf; 315 nic->pdev = hdev->pdev; 316 nic->numa_node_mask = hdev->numa_node_mask; 317 nic->flags |= HNAE3_SUPPORT_VF; 318 319 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 320 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 321 hdev->ae_dev->dev_type); 322 return -EINVAL; 323 } 324 325 ret = hclgevf_knic_setup(hdev); 326 if (ret) 327 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 328 ret); 329 return ret; 330 } 331 332 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 333 { 334 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 335 dev_warn(&hdev->pdev->dev, 336 "vector(vector_id %d) has been freed.\n", vector_id); 337 return; 338 } 339 340 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 341 hdev->num_msi_left += 1; 342 hdev->num_msi_used -= 1; 343 } 344 345 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 346 struct hnae3_vector_info *vector_info) 347 { 348 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 349 struct hnae3_vector_info *vector = vector_info; 350 int alloc = 0; 351 int i, j; 352 353 vector_num = min(hdev->num_msi_left, vector_num); 354 355 for (j = 0; j < vector_num; j++) { 356 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 357 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 358 vector->vector = pci_irq_vector(hdev->pdev, i); 359 vector->io_addr = hdev->hw.io_base + 360 HCLGEVF_VECTOR_REG_BASE + 361 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 362 hdev->vector_status[i] = 0; 363 hdev->vector_irq[i] = vector->vector; 364 365 vector++; 366 alloc++; 367 368 break; 369 } 370 } 371 } 372 hdev->num_msi_left -= alloc; 373 hdev->num_msi_used += alloc; 374 375 return alloc; 376 } 377 378 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 379 { 380 int i; 381 382 for (i = 0; i < hdev->num_msi; i++) 383 if (vector == hdev->vector_irq[i]) 384 return i; 385 386 return -EINVAL; 387 } 388 389 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 390 const u8 hfunc, const u8 *key) 391 { 392 struct hclgevf_rss_config_cmd *req; 393 struct hclgevf_desc desc; 394 int key_offset; 395 int key_size; 396 int ret; 397 398 req = (struct hclgevf_rss_config_cmd *)desc.data; 399 400 for (key_offset = 0; key_offset < 3; key_offset++) { 401 hclgevf_cmd_setup_basic_desc(&desc, 402 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 403 false); 404 405 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 406 req->hash_config |= 407 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 408 409 if (key_offset == 2) 410 key_size = 411 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 412 else 413 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 414 415 memcpy(req->hash_key, 416 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 417 418 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 419 if (ret) { 420 dev_err(&hdev->pdev->dev, 421 "Configure RSS config fail, status = %d\n", 422 ret); 423 return ret; 424 } 425 } 426 427 return 0; 428 } 429 430 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 431 { 432 return HCLGEVF_RSS_KEY_SIZE; 433 } 434 435 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 436 { 437 return HCLGEVF_RSS_IND_TBL_SIZE; 438 } 439 440 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 441 { 442 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 443 struct hclgevf_rss_indirection_table_cmd *req; 444 struct hclgevf_desc desc; 445 int status; 446 int i, j; 447 448 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 449 450 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 451 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 452 false); 453 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 454 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 455 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 456 req->rss_result[j] = 457 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 458 459 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 460 if (status) { 461 dev_err(&hdev->pdev->dev, 462 "VF failed(=%d) to set RSS indirection table\n", 463 status); 464 return status; 465 } 466 } 467 468 return 0; 469 } 470 471 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 472 { 473 struct hclgevf_rss_tc_mode_cmd *req; 474 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 475 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 476 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 477 struct hclgevf_desc desc; 478 u16 roundup_size; 479 int status; 480 int i; 481 482 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 483 484 roundup_size = roundup_pow_of_two(rss_size); 485 roundup_size = ilog2(roundup_size); 486 487 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 488 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 489 tc_size[i] = roundup_size; 490 tc_offset[i] = rss_size * i; 491 } 492 493 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 494 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 495 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 496 (tc_valid[i] & 0x1)); 497 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 498 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 499 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 500 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 501 } 502 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 503 if (status) 504 dev_err(&hdev->pdev->dev, 505 "VF failed(=%d) to set rss tc mode\n", status); 506 507 return status; 508 } 509 510 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 511 u8 *hfunc) 512 { 513 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 514 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 515 int i; 516 517 if (handle->pdev->revision >= 0x21) { 518 /* Get hash algorithm */ 519 if (hfunc) { 520 switch (rss_cfg->hash_algo) { 521 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 522 *hfunc = ETH_RSS_HASH_TOP; 523 break; 524 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 525 *hfunc = ETH_RSS_HASH_XOR; 526 break; 527 default: 528 *hfunc = ETH_RSS_HASH_UNKNOWN; 529 break; 530 } 531 } 532 533 /* Get the RSS Key required by the user */ 534 if (key) 535 memcpy(key, rss_cfg->rss_hash_key, 536 HCLGEVF_RSS_KEY_SIZE); 537 } 538 539 if (indir) 540 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 541 indir[i] = rss_cfg->rss_indirection_tbl[i]; 542 543 return 0; 544 } 545 546 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 547 const u8 *key, const u8 hfunc) 548 { 549 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 550 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 551 int ret, i; 552 553 if (handle->pdev->revision >= 0x21) { 554 /* Set the RSS Hash Key if specififed by the user */ 555 if (key) { 556 switch (hfunc) { 557 case ETH_RSS_HASH_TOP: 558 rss_cfg->hash_algo = 559 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 560 break; 561 case ETH_RSS_HASH_XOR: 562 rss_cfg->hash_algo = 563 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 564 break; 565 case ETH_RSS_HASH_NO_CHANGE: 566 break; 567 default: 568 return -EINVAL; 569 } 570 571 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 572 key); 573 if (ret) 574 return ret; 575 576 /* Update the shadow RSS key with user specified qids */ 577 memcpy(rss_cfg->rss_hash_key, key, 578 HCLGEVF_RSS_KEY_SIZE); 579 } 580 } 581 582 /* update the shadow RSS table with user specified qids */ 583 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 584 rss_cfg->rss_indirection_tbl[i] = indir[i]; 585 586 /* update the hardware */ 587 return hclgevf_set_rss_indir_table(hdev); 588 } 589 590 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 591 { 592 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 593 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 594 595 return rss_cfg->rss_size; 596 } 597 598 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 599 int vector_id, 600 struct hnae3_ring_chain_node *ring_chain) 601 { 602 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 603 struct hnae3_ring_chain_node *node; 604 struct hclge_mbx_vf_to_pf_cmd *req; 605 struct hclgevf_desc desc; 606 int i = 0; 607 int status; 608 u8 type; 609 610 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 611 612 for (node = ring_chain; node; node = node->next) { 613 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 614 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 615 616 if (i == 0) { 617 hclgevf_cmd_setup_basic_desc(&desc, 618 HCLGEVF_OPC_MBX_VF_TO_PF, 619 false); 620 type = en ? 621 HCLGE_MBX_MAP_RING_TO_VECTOR : 622 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 623 req->msg[0] = type; 624 req->msg[1] = vector_id; 625 } 626 627 req->msg[idx_offset] = 628 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 629 req->msg[idx_offset + 1] = node->tqp_index; 630 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 631 HNAE3_RING_GL_IDX_M, 632 HNAE3_RING_GL_IDX_S); 633 634 i++; 635 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 636 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 637 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 638 !node->next) { 639 req->msg[2] = i; 640 641 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 642 if (status) { 643 dev_err(&hdev->pdev->dev, 644 "Map TQP fail, status is %d.\n", 645 status); 646 return status; 647 } 648 i = 0; 649 hclgevf_cmd_setup_basic_desc(&desc, 650 HCLGEVF_OPC_MBX_VF_TO_PF, 651 false); 652 req->msg[0] = type; 653 req->msg[1] = vector_id; 654 } 655 } 656 657 return 0; 658 } 659 660 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 661 struct hnae3_ring_chain_node *ring_chain) 662 { 663 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 664 int vector_id; 665 666 vector_id = hclgevf_get_vector_index(hdev, vector); 667 if (vector_id < 0) { 668 dev_err(&handle->pdev->dev, 669 "Get vector index fail. ret =%d\n", vector_id); 670 return vector_id; 671 } 672 673 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 674 } 675 676 static int hclgevf_unmap_ring_from_vector( 677 struct hnae3_handle *handle, 678 int vector, 679 struct hnae3_ring_chain_node *ring_chain) 680 { 681 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 682 int ret, vector_id; 683 684 vector_id = hclgevf_get_vector_index(hdev, vector); 685 if (vector_id < 0) { 686 dev_err(&handle->pdev->dev, 687 "Get vector index fail. ret =%d\n", vector_id); 688 return vector_id; 689 } 690 691 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 692 if (ret) 693 dev_err(&handle->pdev->dev, 694 "Unmap ring from vector fail. vector=%d, ret =%d\n", 695 vector_id, 696 ret); 697 698 return ret; 699 } 700 701 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 702 { 703 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 704 int vector_id; 705 706 vector_id = hclgevf_get_vector_index(hdev, vector); 707 if (vector_id < 0) { 708 dev_err(&handle->pdev->dev, 709 "hclgevf_put_vector get vector index fail. ret =%d\n", 710 vector_id); 711 return vector_id; 712 } 713 714 hclgevf_free_vector(hdev, vector_id); 715 716 return 0; 717 } 718 719 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 720 bool en_uc_pmc, bool en_mc_pmc) 721 { 722 struct hclge_mbx_vf_to_pf_cmd *req; 723 struct hclgevf_desc desc; 724 int status; 725 726 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 727 728 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 729 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 730 req->msg[1] = en_uc_pmc ? 1 : 0; 731 req->msg[2] = en_mc_pmc ? 1 : 0; 732 733 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 734 if (status) 735 dev_err(&hdev->pdev->dev, 736 "Set promisc mode fail, status is %d.\n", status); 737 738 return status; 739 } 740 741 static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, 742 bool en_uc_pmc, bool en_mc_pmc) 743 { 744 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 745 746 hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); 747 } 748 749 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 750 int stream_id, bool enable) 751 { 752 struct hclgevf_cfg_com_tqp_queue_cmd *req; 753 struct hclgevf_desc desc; 754 int status; 755 756 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 757 758 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 759 false); 760 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 761 req->stream_id = cpu_to_le16(stream_id); 762 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 763 764 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 765 if (status) 766 dev_err(&hdev->pdev->dev, 767 "TQP enable fail, status =%d.\n", status); 768 769 return status; 770 } 771 772 static int hclgevf_get_queue_id(struct hnae3_queue *queue) 773 { 774 struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q); 775 776 return tqp->index; 777 } 778 779 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 780 { 781 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 782 struct hclgevf_tqp *tqp; 783 int i; 784 785 for (i = 0; i < kinfo->num_tqps; i++) { 786 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 787 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 788 } 789 } 790 791 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 792 { 793 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 794 795 ether_addr_copy(p, hdev->hw.mac.mac_addr); 796 } 797 798 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 799 bool is_first) 800 { 801 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 802 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 803 u8 *new_mac_addr = (u8 *)p; 804 u8 msg_data[ETH_ALEN * 2]; 805 u16 subcode; 806 int status; 807 808 ether_addr_copy(msg_data, new_mac_addr); 809 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 810 811 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 812 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 813 814 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 815 subcode, msg_data, ETH_ALEN * 2, 816 true, NULL, 0); 817 if (!status) 818 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 819 820 return status; 821 } 822 823 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 824 const unsigned char *addr) 825 { 826 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 827 828 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 829 HCLGE_MBX_MAC_VLAN_UC_ADD, 830 addr, ETH_ALEN, false, NULL, 0); 831 } 832 833 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 834 const unsigned char *addr) 835 { 836 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 837 838 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 839 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 840 addr, ETH_ALEN, false, NULL, 0); 841 } 842 843 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 844 const unsigned char *addr) 845 { 846 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 847 848 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 849 HCLGE_MBX_MAC_VLAN_MC_ADD, 850 addr, ETH_ALEN, false, NULL, 0); 851 } 852 853 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 854 const unsigned char *addr) 855 { 856 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 857 858 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 859 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 860 addr, ETH_ALEN, false, NULL, 0); 861 } 862 863 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 864 __be16 proto, u16 vlan_id, 865 bool is_kill) 866 { 867 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 868 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 869 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 870 871 if (vlan_id > 4095) 872 return -EINVAL; 873 874 if (proto != htons(ETH_P_8021Q)) 875 return -EPROTONOSUPPORT; 876 877 msg_data[0] = is_kill; 878 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 879 memcpy(&msg_data[3], &proto, sizeof(proto)); 880 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 881 HCLGE_MBX_VLAN_FILTER, msg_data, 882 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 883 } 884 885 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 886 { 887 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 888 u8 msg_data; 889 890 msg_data = enable ? 1 : 0; 891 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 892 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 893 1, false, NULL, 0); 894 } 895 896 static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 897 { 898 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 899 u8 msg_data[2]; 900 int ret; 901 902 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 903 904 /* disable vf queue before send queue reset msg to PF */ 905 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 906 if (ret) 907 return; 908 909 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 910 2, true, NULL, 0); 911 } 912 913 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 914 enum hnae3_reset_notify_type type) 915 { 916 struct hnae3_client *client = hdev->nic_client; 917 struct hnae3_handle *handle = &hdev->nic; 918 919 if (!client->ops->reset_notify) 920 return -EOPNOTSUPP; 921 922 return client->ops->reset_notify(handle, type); 923 } 924 925 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 926 { 927 #define HCLGEVF_RESET_WAIT_MS 500 928 #define HCLGEVF_RESET_WAIT_CNT 20 929 u32 val, cnt = 0; 930 931 /* wait to check the hardware reset completion status */ 932 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 933 while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) && 934 (cnt < HCLGEVF_RESET_WAIT_CNT)) { 935 msleep(HCLGEVF_RESET_WAIT_MS); 936 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 937 cnt++; 938 } 939 940 /* hardware completion status should be available by this time */ 941 if (cnt >= HCLGEVF_RESET_WAIT_CNT) { 942 dev_warn(&hdev->pdev->dev, 943 "could'nt get reset done status from h/w, timeout!\n"); 944 return -EBUSY; 945 } 946 947 /* we will wait a bit more to let reset of the stack to complete. This 948 * might happen in case reset assertion was made by PF. Yes, this also 949 * means we might end up waiting bit more even for VF reset. 950 */ 951 msleep(5000); 952 953 return 0; 954 } 955 956 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 957 { 958 int ret; 959 960 /* uninitialize the nic client */ 961 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 962 963 /* re-initialize the hclge device */ 964 ret = hclgevf_init_hdev(hdev); 965 if (ret) { 966 dev_err(&hdev->pdev->dev, 967 "hclge device re-init failed, VF is disabled!\n"); 968 return ret; 969 } 970 971 /* bring up the nic client again */ 972 hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 973 974 return 0; 975 } 976 977 static int hclgevf_reset(struct hclgevf_dev *hdev) 978 { 979 int ret; 980 981 rtnl_lock(); 982 983 /* bring down the nic to stop any ongoing TX/RX */ 984 hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 985 986 /* check if VF could successfully fetch the hardware reset completion 987 * status from the hardware 988 */ 989 ret = hclgevf_reset_wait(hdev); 990 if (ret) { 991 /* can't do much in this situation, will disable VF */ 992 dev_err(&hdev->pdev->dev, 993 "VF failed(=%d) to fetch H/W reset completion status\n", 994 ret); 995 996 dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); 997 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 998 999 rtnl_unlock(); 1000 return ret; 1001 } 1002 1003 /* now, re-initialize the nic client and ae device*/ 1004 ret = hclgevf_reset_stack(hdev); 1005 if (ret) 1006 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1007 1008 /* bring up the nic to enable TX/RX again */ 1009 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1010 1011 rtnl_unlock(); 1012 1013 return ret; 1014 } 1015 1016 static int hclgevf_do_reset(struct hclgevf_dev *hdev) 1017 { 1018 int status; 1019 u8 respmsg; 1020 1021 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1022 0, false, &respmsg, sizeof(u8)); 1023 if (status) 1024 dev_err(&hdev->pdev->dev, 1025 "VF reset request to PF failed(=%d)\n", status); 1026 1027 return status; 1028 } 1029 1030 static void hclgevf_reset_event(struct hnae3_handle *handle) 1031 { 1032 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1033 1034 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1035 1036 handle->reset_level = HNAE3_VF_RESET; 1037 1038 /* reset of this VF requested */ 1039 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1040 hclgevf_reset_task_schedule(hdev); 1041 1042 handle->last_reset_time = jiffies; 1043 } 1044 1045 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1046 { 1047 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1048 1049 return hdev->fw_version; 1050 } 1051 1052 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1053 { 1054 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1055 1056 vector->vector_irq = pci_irq_vector(hdev->pdev, 1057 HCLGEVF_MISC_VECTOR_NUM); 1058 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1059 /* vector status always valid for Vector 0 */ 1060 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1061 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1062 1063 hdev->num_msi_left -= 1; 1064 hdev->num_msi_used += 1; 1065 } 1066 1067 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1068 { 1069 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1070 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1071 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1072 schedule_work(&hdev->rst_service_task); 1073 } 1074 } 1075 1076 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1077 { 1078 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1079 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1080 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1081 schedule_work(&hdev->mbx_service_task); 1082 } 1083 } 1084 1085 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1086 { 1087 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1088 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1089 schedule_work(&hdev->service_task); 1090 } 1091 1092 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1093 { 1094 /* if we have any pending mailbox event then schedule the mbx task */ 1095 if (hdev->mbx_event_pending) 1096 hclgevf_mbx_task_schedule(hdev); 1097 1098 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1099 hclgevf_reset_task_schedule(hdev); 1100 } 1101 1102 static void hclgevf_service_timer(struct timer_list *t) 1103 { 1104 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1105 1106 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1107 1108 hclgevf_task_schedule(hdev); 1109 } 1110 1111 static void hclgevf_reset_service_task(struct work_struct *work) 1112 { 1113 struct hclgevf_dev *hdev = 1114 container_of(work, struct hclgevf_dev, rst_service_task); 1115 int ret; 1116 1117 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1118 return; 1119 1120 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1121 1122 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1123 &hdev->reset_state)) { 1124 /* PF has initmated that it is about to reset the hardware. 1125 * We now have to poll & check if harware has actually completed 1126 * the reset sequence. On hardware reset completion, VF needs to 1127 * reset the client and ae device. 1128 */ 1129 hdev->reset_attempts = 0; 1130 1131 ret = hclgevf_reset(hdev); 1132 if (ret) 1133 dev_err(&hdev->pdev->dev, "VF stack reset failed.\n"); 1134 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1135 &hdev->reset_state)) { 1136 /* we could be here when either of below happens: 1137 * 1. reset was initiated due to watchdog timeout due to 1138 * a. IMP was earlier reset and our TX got choked down and 1139 * which resulted in watchdog reacting and inducing VF 1140 * reset. This also means our cmdq would be unreliable. 1141 * b. problem in TX due to other lower layer(example link 1142 * layer not functioning properly etc.) 1143 * 2. VF reset might have been initiated due to some config 1144 * change. 1145 * 1146 * NOTE: Theres no clear way to detect above cases than to react 1147 * to the response of PF for this reset request. PF will ack the 1148 * 1b and 2. cases but we will not get any intimation about 1a 1149 * from PF as cmdq would be in unreliable state i.e. mailbox 1150 * communication between PF and VF would be broken. 1151 */ 1152 1153 /* if we are never geting into pending state it means either: 1154 * 1. PF is not receiving our request which could be due to IMP 1155 * reset 1156 * 2. PF is screwed 1157 * We cannot do much for 2. but to check first we can try reset 1158 * our PCIe + stack and see if it alleviates the problem. 1159 */ 1160 if (hdev->reset_attempts > 3) { 1161 /* prepare for full reset of stack + pcie interface */ 1162 hdev->nic.reset_level = HNAE3_VF_FULL_RESET; 1163 1164 /* "defer" schedule the reset task again */ 1165 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1166 } else { 1167 hdev->reset_attempts++; 1168 1169 /* request PF for resetting this VF via mailbox */ 1170 ret = hclgevf_do_reset(hdev); 1171 if (ret) 1172 dev_warn(&hdev->pdev->dev, 1173 "VF rst fail, stack will call\n"); 1174 } 1175 } 1176 1177 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1178 } 1179 1180 static void hclgevf_mailbox_service_task(struct work_struct *work) 1181 { 1182 struct hclgevf_dev *hdev; 1183 1184 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1185 1186 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1187 return; 1188 1189 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1190 1191 hclgevf_mbx_async_handler(hdev); 1192 1193 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1194 } 1195 1196 static void hclgevf_service_task(struct work_struct *work) 1197 { 1198 struct hclgevf_dev *hdev; 1199 1200 hdev = container_of(work, struct hclgevf_dev, service_task); 1201 1202 /* request the link status from the PF. PF would be able to tell VF 1203 * about such updates in future so we might remove this later 1204 */ 1205 hclgevf_request_link_info(hdev); 1206 1207 hclgevf_deferred_task_schedule(hdev); 1208 1209 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1210 } 1211 1212 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1213 { 1214 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1215 } 1216 1217 static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) 1218 { 1219 u32 cmdq_src_reg; 1220 1221 /* fetch the events from their corresponding regs */ 1222 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1223 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1224 1225 /* check for vector0 mailbox(=CMDQ RX) event source */ 1226 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1227 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1228 *clearval = cmdq_src_reg; 1229 return true; 1230 } 1231 1232 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1233 1234 return false; 1235 } 1236 1237 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1238 { 1239 writel(en ? 1 : 0, vector->addr); 1240 } 1241 1242 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1243 { 1244 struct hclgevf_dev *hdev = data; 1245 u32 clearval; 1246 1247 hclgevf_enable_vector(&hdev->misc_vector, false); 1248 if (!hclgevf_check_event_cause(hdev, &clearval)) 1249 goto skip_sched; 1250 1251 hclgevf_mbx_handler(hdev); 1252 1253 hclgevf_clear_event_cause(hdev, clearval); 1254 1255 skip_sched: 1256 hclgevf_enable_vector(&hdev->misc_vector, true); 1257 1258 return IRQ_HANDLED; 1259 } 1260 1261 static int hclgevf_configure(struct hclgevf_dev *hdev) 1262 { 1263 int ret; 1264 1265 hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE; 1266 1267 /* get queue configuration from PF */ 1268 ret = hclgevf_get_queue_info(hdev); 1269 if (ret) 1270 return ret; 1271 /* get tc configuration from PF */ 1272 return hclgevf_get_tc_info(hdev); 1273 } 1274 1275 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1276 { 1277 struct pci_dev *pdev = ae_dev->pdev; 1278 struct hclgevf_dev *hdev = ae_dev->priv; 1279 1280 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1281 if (!hdev) 1282 return -ENOMEM; 1283 1284 hdev->pdev = pdev; 1285 hdev->ae_dev = ae_dev; 1286 ae_dev->priv = hdev; 1287 1288 return 0; 1289 } 1290 1291 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1292 { 1293 struct hnae3_handle *roce = &hdev->roce; 1294 struct hnae3_handle *nic = &hdev->nic; 1295 1296 roce->rinfo.num_vectors = hdev->num_roce_msix; 1297 1298 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1299 hdev->num_msi_left == 0) 1300 return -EINVAL; 1301 1302 roce->rinfo.base_vector = hdev->roce_base_vector; 1303 1304 roce->rinfo.netdev = nic->kinfo.netdev; 1305 roce->rinfo.roce_io_base = hdev->hw.io_base; 1306 1307 roce->pdev = nic->pdev; 1308 roce->ae_algo = nic->ae_algo; 1309 roce->numa_node_mask = nic->numa_node_mask; 1310 1311 return 0; 1312 } 1313 1314 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1315 { 1316 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1317 int i, ret; 1318 1319 rss_cfg->rss_size = hdev->rss_size_max; 1320 1321 if (hdev->pdev->revision >= 0x21) { 1322 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 1323 netdev_rss_key_fill(rss_cfg->rss_hash_key, 1324 HCLGEVF_RSS_KEY_SIZE); 1325 1326 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 1327 rss_cfg->rss_hash_key); 1328 if (ret) 1329 return ret; 1330 } 1331 1332 /* Initialize RSS indirect table for each vport */ 1333 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1334 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1335 1336 ret = hclgevf_set_rss_indir_table(hdev); 1337 if (ret) 1338 return ret; 1339 1340 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1341 } 1342 1343 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1344 { 1345 /* other vlan config(like, VLAN TX/RX offload) would also be added 1346 * here later 1347 */ 1348 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1349 false); 1350 } 1351 1352 static int hclgevf_ae_start(struct hnae3_handle *handle) 1353 { 1354 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1355 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1356 int i, queue_id; 1357 1358 for (i = 0; i < kinfo->num_tqps; i++) { 1359 /* ring enable */ 1360 queue_id = hclgevf_get_queue_id(kinfo->tqp[i]); 1361 if (queue_id < 0) { 1362 dev_warn(&hdev->pdev->dev, 1363 "Get invalid queue id, ignore it\n"); 1364 continue; 1365 } 1366 1367 hclgevf_tqp_enable(hdev, queue_id, 0, true); 1368 } 1369 1370 /* reset tqp stats */ 1371 hclgevf_reset_tqp_stats(handle); 1372 1373 hclgevf_request_link_info(hdev); 1374 1375 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1376 mod_timer(&hdev->service_timer, jiffies + HZ); 1377 1378 return 0; 1379 } 1380 1381 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1382 { 1383 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1384 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1385 int i, queue_id; 1386 1387 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1388 1389 for (i = 0; i < kinfo->num_tqps; i++) { 1390 /* Ring disable */ 1391 queue_id = hclgevf_get_queue_id(kinfo->tqp[i]); 1392 if (queue_id < 0) { 1393 dev_warn(&hdev->pdev->dev, 1394 "Get invalid queue id, ignore it\n"); 1395 continue; 1396 } 1397 1398 hclgevf_tqp_enable(hdev, queue_id, 0, false); 1399 } 1400 1401 /* reset tqp stats */ 1402 hclgevf_reset_tqp_stats(handle); 1403 del_timer_sync(&hdev->service_timer); 1404 cancel_work_sync(&hdev->service_task); 1405 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1406 hclgevf_update_link_status(hdev, 0); 1407 } 1408 1409 static void hclgevf_state_init(struct hclgevf_dev *hdev) 1410 { 1411 /* if this is on going reset then skip this initialization */ 1412 if (hclgevf_dev_ongoing_reset(hdev)) 1413 return; 1414 1415 /* setup tasks for the MBX */ 1416 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1417 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1418 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1419 1420 /* setup tasks for service timer */ 1421 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1422 1423 INIT_WORK(&hdev->service_task, hclgevf_service_task); 1424 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1425 1426 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 1427 1428 mutex_init(&hdev->mbx_resp.mbx_mutex); 1429 1430 /* bring the device down */ 1431 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1432 } 1433 1434 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1435 { 1436 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1437 1438 if (hdev->service_timer.function) 1439 del_timer_sync(&hdev->service_timer); 1440 if (hdev->service_task.func) 1441 cancel_work_sync(&hdev->service_task); 1442 if (hdev->mbx_service_task.func) 1443 cancel_work_sync(&hdev->mbx_service_task); 1444 if (hdev->rst_service_task.func) 1445 cancel_work_sync(&hdev->rst_service_task); 1446 1447 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1448 } 1449 1450 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1451 { 1452 struct pci_dev *pdev = hdev->pdev; 1453 int vectors; 1454 int i; 1455 1456 /* if this is on going reset then skip this initialization */ 1457 if (hclgevf_dev_ongoing_reset(hdev)) 1458 return 0; 1459 1460 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 1461 vectors = pci_alloc_irq_vectors(pdev, 1462 hdev->roce_base_msix_offset + 1, 1463 hdev->num_msi, 1464 PCI_IRQ_MSIX); 1465 else 1466 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1467 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1468 1469 if (vectors < 0) { 1470 dev_err(&pdev->dev, 1471 "failed(%d) to allocate MSI/MSI-X vectors\n", 1472 vectors); 1473 return vectors; 1474 } 1475 if (vectors < hdev->num_msi) 1476 dev_warn(&hdev->pdev->dev, 1477 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1478 hdev->num_msi, vectors); 1479 1480 hdev->num_msi = vectors; 1481 hdev->num_msi_left = vectors; 1482 hdev->base_msi_vector = pdev->irq; 1483 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 1484 1485 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1486 sizeof(u16), GFP_KERNEL); 1487 if (!hdev->vector_status) { 1488 pci_free_irq_vectors(pdev); 1489 return -ENOMEM; 1490 } 1491 1492 for (i = 0; i < hdev->num_msi; i++) 1493 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 1494 1495 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 1496 sizeof(int), GFP_KERNEL); 1497 if (!hdev->vector_irq) { 1498 pci_free_irq_vectors(pdev); 1499 return -ENOMEM; 1500 } 1501 1502 return 0; 1503 } 1504 1505 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 1506 { 1507 struct pci_dev *pdev = hdev->pdev; 1508 1509 pci_free_irq_vectors(pdev); 1510 } 1511 1512 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 1513 { 1514 int ret = 0; 1515 1516 /* if this is on going reset then skip this initialization */ 1517 if (hclgevf_dev_ongoing_reset(hdev)) 1518 return 0; 1519 1520 hclgevf_get_misc_vector(hdev); 1521 1522 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 1523 0, "hclgevf_cmd", hdev); 1524 if (ret) { 1525 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 1526 hdev->misc_vector.vector_irq); 1527 return ret; 1528 } 1529 1530 hclgevf_clear_event_cause(hdev, 0); 1531 1532 /* enable misc. vector(vector 0) */ 1533 hclgevf_enable_vector(&hdev->misc_vector, true); 1534 1535 return ret; 1536 } 1537 1538 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 1539 { 1540 /* disable misc vector(vector 0) */ 1541 hclgevf_enable_vector(&hdev->misc_vector, false); 1542 synchronize_irq(hdev->misc_vector.vector_irq); 1543 free_irq(hdev->misc_vector.vector_irq, hdev); 1544 hclgevf_free_vector(hdev, 0); 1545 } 1546 1547 static int hclgevf_init_client_instance(struct hnae3_client *client, 1548 struct hnae3_ae_dev *ae_dev) 1549 { 1550 struct hclgevf_dev *hdev = ae_dev->priv; 1551 int ret; 1552 1553 switch (client->type) { 1554 case HNAE3_CLIENT_KNIC: 1555 hdev->nic_client = client; 1556 hdev->nic.client = client; 1557 1558 ret = client->ops->init_instance(&hdev->nic); 1559 if (ret) 1560 goto clear_nic; 1561 1562 hnae3_set_client_init_flag(client, ae_dev, 1); 1563 1564 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 1565 struct hnae3_client *rc = hdev->roce_client; 1566 1567 ret = hclgevf_init_roce_base_info(hdev); 1568 if (ret) 1569 goto clear_roce; 1570 ret = rc->ops->init_instance(&hdev->roce); 1571 if (ret) 1572 goto clear_roce; 1573 1574 hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 1575 1); 1576 } 1577 break; 1578 case HNAE3_CLIENT_UNIC: 1579 hdev->nic_client = client; 1580 hdev->nic.client = client; 1581 1582 ret = client->ops->init_instance(&hdev->nic); 1583 if (ret) 1584 goto clear_nic; 1585 1586 hnae3_set_client_init_flag(client, ae_dev, 1); 1587 break; 1588 case HNAE3_CLIENT_ROCE: 1589 if (hnae3_dev_roce_supported(hdev)) { 1590 hdev->roce_client = client; 1591 hdev->roce.client = client; 1592 } 1593 1594 if (hdev->roce_client && hdev->nic_client) { 1595 ret = hclgevf_init_roce_base_info(hdev); 1596 if (ret) 1597 goto clear_roce; 1598 1599 ret = client->ops->init_instance(&hdev->roce); 1600 if (ret) 1601 goto clear_roce; 1602 } 1603 1604 hnae3_set_client_init_flag(client, ae_dev, 1); 1605 break; 1606 default: 1607 return -EINVAL; 1608 } 1609 1610 return 0; 1611 1612 clear_nic: 1613 hdev->nic_client = NULL; 1614 hdev->nic.client = NULL; 1615 return ret; 1616 clear_roce: 1617 hdev->roce_client = NULL; 1618 hdev->roce.client = NULL; 1619 return ret; 1620 } 1621 1622 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 1623 struct hnae3_ae_dev *ae_dev) 1624 { 1625 struct hclgevf_dev *hdev = ae_dev->priv; 1626 1627 /* un-init roce, if it exists */ 1628 if (hdev->roce_client) { 1629 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 1630 hdev->roce_client = NULL; 1631 hdev->roce.client = NULL; 1632 } 1633 1634 /* un-init nic/unic, if this was not called by roce client */ 1635 if (client->ops->uninit_instance && hdev->nic_client && 1636 client->type != HNAE3_CLIENT_ROCE) { 1637 client->ops->uninit_instance(&hdev->nic, 0); 1638 hdev->nic_client = NULL; 1639 hdev->nic.client = NULL; 1640 } 1641 } 1642 1643 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 1644 { 1645 struct pci_dev *pdev = hdev->pdev; 1646 struct hclgevf_hw *hw; 1647 int ret; 1648 1649 /* check if we need to skip initialization of pci. This will happen if 1650 * device is undergoing VF reset. Otherwise, we would need to 1651 * re-initialize pci interface again i.e. when device is not going 1652 * through *any* reset or actually undergoing full reset. 1653 */ 1654 if (hclgevf_dev_ongoing_reset(hdev)) 1655 return 0; 1656 1657 ret = pci_enable_device(pdev); 1658 if (ret) { 1659 dev_err(&pdev->dev, "failed to enable PCI device\n"); 1660 return ret; 1661 } 1662 1663 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1664 if (ret) { 1665 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 1666 goto err_disable_device; 1667 } 1668 1669 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 1670 if (ret) { 1671 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 1672 goto err_disable_device; 1673 } 1674 1675 pci_set_master(pdev); 1676 hw = &hdev->hw; 1677 hw->hdev = hdev; 1678 hw->io_base = pci_iomap(pdev, 2, 0); 1679 if (!hw->io_base) { 1680 dev_err(&pdev->dev, "can't map configuration register space\n"); 1681 ret = -ENOMEM; 1682 goto err_clr_master; 1683 } 1684 1685 return 0; 1686 1687 err_clr_master: 1688 pci_clear_master(pdev); 1689 pci_release_regions(pdev); 1690 err_disable_device: 1691 pci_disable_device(pdev); 1692 1693 return ret; 1694 } 1695 1696 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 1697 { 1698 struct pci_dev *pdev = hdev->pdev; 1699 1700 pci_iounmap(pdev, hdev->hw.io_base); 1701 pci_clear_master(pdev); 1702 pci_release_regions(pdev); 1703 pci_disable_device(pdev); 1704 } 1705 1706 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 1707 { 1708 struct hclgevf_query_res_cmd *req; 1709 struct hclgevf_desc desc; 1710 int ret; 1711 1712 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 1713 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1714 if (ret) { 1715 dev_err(&hdev->pdev->dev, 1716 "query vf resource failed, ret = %d.\n", ret); 1717 return ret; 1718 } 1719 1720 req = (struct hclgevf_query_res_cmd *)desc.data; 1721 1722 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 1723 hdev->roce_base_msix_offset = 1724 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 1725 HCLGEVF_MSIX_OFT_ROCEE_M, 1726 HCLGEVF_MSIX_OFT_ROCEE_S); 1727 hdev->num_roce_msix = 1728 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 1729 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 1730 1731 /* VF should have NIC vectors and Roce vectors, NIC vectors 1732 * are queued before Roce vectors. The offset is fixed to 64. 1733 */ 1734 hdev->num_msi = hdev->num_roce_msix + 1735 hdev->roce_base_msix_offset; 1736 } else { 1737 hdev->num_msi = 1738 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 1739 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 1740 } 1741 1742 return 0; 1743 } 1744 1745 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 1746 { 1747 struct pci_dev *pdev = hdev->pdev; 1748 int ret; 1749 1750 /* check if device is on-going full reset(i.e. pcie as well) */ 1751 if (hclgevf_dev_ongoing_full_reset(hdev)) { 1752 dev_warn(&pdev->dev, "device is going full reset\n"); 1753 hclgevf_uninit_hdev(hdev); 1754 } 1755 1756 ret = hclgevf_pci_init(hdev); 1757 if (ret) { 1758 dev_err(&pdev->dev, "PCI initialization failed\n"); 1759 return ret; 1760 } 1761 1762 ret = hclgevf_cmd_init(hdev); 1763 if (ret) 1764 goto err_cmd_init; 1765 1766 /* Get vf resource */ 1767 ret = hclgevf_query_vf_resource(hdev); 1768 if (ret) { 1769 dev_err(&hdev->pdev->dev, 1770 "Query vf status error, ret = %d.\n", ret); 1771 goto err_query_vf; 1772 } 1773 1774 ret = hclgevf_init_msi(hdev); 1775 if (ret) { 1776 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 1777 goto err_query_vf; 1778 } 1779 1780 hclgevf_state_init(hdev); 1781 1782 ret = hclgevf_misc_irq_init(hdev); 1783 if (ret) { 1784 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 1785 ret); 1786 goto err_misc_irq_init; 1787 } 1788 1789 ret = hclgevf_configure(hdev); 1790 if (ret) { 1791 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 1792 goto err_config; 1793 } 1794 1795 ret = hclgevf_alloc_tqps(hdev); 1796 if (ret) { 1797 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 1798 goto err_config; 1799 } 1800 1801 ret = hclgevf_set_handle_info(hdev); 1802 if (ret) { 1803 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 1804 goto err_config; 1805 } 1806 1807 /* Initialize RSS for this VF */ 1808 ret = hclgevf_rss_init_hw(hdev); 1809 if (ret) { 1810 dev_err(&hdev->pdev->dev, 1811 "failed(%d) to initialize RSS\n", ret); 1812 goto err_config; 1813 } 1814 1815 ret = hclgevf_init_vlan_config(hdev); 1816 if (ret) { 1817 dev_err(&hdev->pdev->dev, 1818 "failed(%d) to initialize VLAN config\n", ret); 1819 goto err_config; 1820 } 1821 1822 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 1823 1824 return 0; 1825 1826 err_config: 1827 hclgevf_misc_irq_uninit(hdev); 1828 err_misc_irq_init: 1829 hclgevf_state_uninit(hdev); 1830 hclgevf_uninit_msi(hdev); 1831 err_query_vf: 1832 hclgevf_cmd_uninit(hdev); 1833 err_cmd_init: 1834 hclgevf_pci_uninit(hdev); 1835 return ret; 1836 } 1837 1838 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 1839 { 1840 hclgevf_state_uninit(hdev); 1841 hclgevf_misc_irq_uninit(hdev); 1842 hclgevf_cmd_uninit(hdev); 1843 hclgevf_uninit_msi(hdev); 1844 hclgevf_pci_uninit(hdev); 1845 } 1846 1847 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 1848 { 1849 struct pci_dev *pdev = ae_dev->pdev; 1850 int ret; 1851 1852 ret = hclgevf_alloc_hdev(ae_dev); 1853 if (ret) { 1854 dev_err(&pdev->dev, "hclge device allocation failed\n"); 1855 return ret; 1856 } 1857 1858 ret = hclgevf_init_hdev(ae_dev->priv); 1859 if (ret) 1860 dev_err(&pdev->dev, "hclge device initialization failed\n"); 1861 1862 return ret; 1863 } 1864 1865 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 1866 { 1867 struct hclgevf_dev *hdev = ae_dev->priv; 1868 1869 hclgevf_uninit_hdev(hdev); 1870 ae_dev->priv = NULL; 1871 } 1872 1873 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 1874 { 1875 struct hnae3_handle *nic = &hdev->nic; 1876 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1877 1878 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 1879 } 1880 1881 /** 1882 * hclgevf_get_channels - Get the current channels enabled and max supported. 1883 * @handle: hardware information for network interface 1884 * @ch: ethtool channels structure 1885 * 1886 * We don't support separate tx and rx queues as channels. The other count 1887 * represents how many queues are being used for control. max_combined counts 1888 * how many queue pairs we can support. They may not be mapped 1 to 1 with 1889 * q_vectors since we support a lot more queue pairs than q_vectors. 1890 **/ 1891 static void hclgevf_get_channels(struct hnae3_handle *handle, 1892 struct ethtool_channels *ch) 1893 { 1894 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1895 1896 ch->max_combined = hclgevf_get_max_channels(hdev); 1897 ch->other_count = 0; 1898 ch->max_other = 0; 1899 ch->combined_count = hdev->num_tqps; 1900 } 1901 1902 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 1903 u16 *alloc_tqps, u16 *max_rss_size) 1904 { 1905 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1906 1907 *alloc_tqps = hdev->num_tqps; 1908 *max_rss_size = hdev->rss_size_max; 1909 } 1910 1911 static int hclgevf_get_status(struct hnae3_handle *handle) 1912 { 1913 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1914 1915 return hdev->hw.mac.link; 1916 } 1917 1918 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 1919 u8 *auto_neg, u32 *speed, 1920 u8 *duplex) 1921 { 1922 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1923 1924 if (speed) 1925 *speed = hdev->hw.mac.speed; 1926 if (duplex) 1927 *duplex = hdev->hw.mac.duplex; 1928 if (auto_neg) 1929 *auto_neg = AUTONEG_DISABLE; 1930 } 1931 1932 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 1933 u8 duplex) 1934 { 1935 hdev->hw.mac.speed = speed; 1936 hdev->hw.mac.duplex = duplex; 1937 } 1938 1939 static void hclgevf_get_media_type(struct hnae3_handle *handle, 1940 u8 *media_type) 1941 { 1942 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1943 if (media_type) 1944 *media_type = hdev->hw.mac.media_type; 1945 } 1946 1947 static const struct hnae3_ae_ops hclgevf_ops = { 1948 .init_ae_dev = hclgevf_init_ae_dev, 1949 .uninit_ae_dev = hclgevf_uninit_ae_dev, 1950 .init_client_instance = hclgevf_init_client_instance, 1951 .uninit_client_instance = hclgevf_uninit_client_instance, 1952 .start = hclgevf_ae_start, 1953 .stop = hclgevf_ae_stop, 1954 .map_ring_to_vector = hclgevf_map_ring_to_vector, 1955 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 1956 .get_vector = hclgevf_get_vector, 1957 .put_vector = hclgevf_put_vector, 1958 .reset_queue = hclgevf_reset_tqp, 1959 .set_promisc_mode = hclgevf_set_promisc_mode, 1960 .get_mac_addr = hclgevf_get_mac_addr, 1961 .set_mac_addr = hclgevf_set_mac_addr, 1962 .add_uc_addr = hclgevf_add_uc_addr, 1963 .rm_uc_addr = hclgevf_rm_uc_addr, 1964 .add_mc_addr = hclgevf_add_mc_addr, 1965 .rm_mc_addr = hclgevf_rm_mc_addr, 1966 .get_stats = hclgevf_get_stats, 1967 .update_stats = hclgevf_update_stats, 1968 .get_strings = hclgevf_get_strings, 1969 .get_sset_count = hclgevf_get_sset_count, 1970 .get_rss_key_size = hclgevf_get_rss_key_size, 1971 .get_rss_indir_size = hclgevf_get_rss_indir_size, 1972 .get_rss = hclgevf_get_rss, 1973 .set_rss = hclgevf_set_rss, 1974 .get_tc_size = hclgevf_get_tc_size, 1975 .get_fw_version = hclgevf_get_fw_version, 1976 .set_vlan_filter = hclgevf_set_vlan_filter, 1977 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 1978 .reset_event = hclgevf_reset_event, 1979 .get_channels = hclgevf_get_channels, 1980 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 1981 .get_status = hclgevf_get_status, 1982 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 1983 .get_media_type = hclgevf_get_media_type, 1984 }; 1985 1986 static struct hnae3_ae_algo ae_algovf = { 1987 .ops = &hclgevf_ops, 1988 .pdev_id_table = ae_algovf_pci_tbl, 1989 }; 1990 1991 static int hclgevf_init(void) 1992 { 1993 pr_info("%s is initializing\n", HCLGEVF_NAME); 1994 1995 hnae3_register_ae_algo(&ae_algovf); 1996 1997 return 0; 1998 } 1999 2000 static void hclgevf_exit(void) 2001 { 2002 hnae3_unregister_ae_algo(&ae_algovf); 2003 } 2004 module_init(hclgevf_init); 2005 module_exit(hclgevf_exit); 2006 2007 MODULE_LICENSE("GPL"); 2008 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2009 MODULE_DESCRIPTION("HCLGEVF Driver"); 2010 MODULE_VERSION(HCLGEVF_MOD_VERSION); 2011