1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <net/rtnetlink.h> 6 #include "hclgevf_cmd.h" 7 #include "hclgevf_main.h" 8 #include "hclge_mbx.h" 9 #include "hnae3.h" 10 11 #define HCLGEVF_NAME "hclgevf" 12 13 static int hclgevf_init_hdev(struct hclgevf_dev *hdev); 14 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 25 26 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 27 struct hnae3_handle *handle) 28 { 29 return container_of(handle, struct hclgevf_dev, nic); 30 } 31 32 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 33 { 34 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 35 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 36 struct hclgevf_desc desc; 37 struct hclgevf_tqp *tqp; 38 int status; 39 int i; 40 41 for (i = 0; i < kinfo->num_tqps; i++) { 42 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 43 hclgevf_cmd_setup_basic_desc(&desc, 44 HCLGEVF_OPC_QUERY_RX_STATUS, 45 true); 46 47 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 48 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 49 if (status) { 50 dev_err(&hdev->pdev->dev, 51 "Query tqp stat fail, status = %d,queue = %d\n", 52 status, i); 53 return status; 54 } 55 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 56 le32_to_cpu(desc.data[1]); 57 58 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 59 true); 60 61 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 62 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 63 if (status) { 64 dev_err(&hdev->pdev->dev, 65 "Query tqp stat fail, status = %d,queue = %d\n", 66 status, i); 67 return status; 68 } 69 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 70 le32_to_cpu(desc.data[1]); 71 } 72 73 return 0; 74 } 75 76 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 77 { 78 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 79 struct hclgevf_tqp *tqp; 80 u64 *buff = data; 81 int i; 82 83 for (i = 0; i < kinfo->num_tqps; i++) { 84 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 85 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 86 } 87 for (i = 0; i < kinfo->num_tqps; i++) { 88 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 89 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 90 } 91 92 return buff; 93 } 94 95 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 96 { 97 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 98 99 return kinfo->num_tqps * 2; 100 } 101 102 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 103 { 104 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 105 u8 *buff = data; 106 int i = 0; 107 108 for (i = 0; i < kinfo->num_tqps; i++) { 109 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 110 struct hclgevf_tqp, q); 111 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 112 tqp->index); 113 buff += ETH_GSTRING_LEN; 114 } 115 116 for (i = 0; i < kinfo->num_tqps; i++) { 117 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 118 struct hclgevf_tqp, q); 119 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 120 tqp->index); 121 buff += ETH_GSTRING_LEN; 122 } 123 124 return buff; 125 } 126 127 static void hclgevf_update_stats(struct hnae3_handle *handle, 128 struct net_device_stats *net_stats) 129 { 130 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 131 int status; 132 133 status = hclgevf_tqps_update_stats(handle); 134 if (status) 135 dev_err(&hdev->pdev->dev, 136 "VF update of TQPS stats fail, status = %d.\n", 137 status); 138 } 139 140 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 141 { 142 if (strset == ETH_SS_TEST) 143 return -EOPNOTSUPP; 144 else if (strset == ETH_SS_STATS) 145 return hclgevf_tqps_get_sset_count(handle, strset); 146 147 return 0; 148 } 149 150 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 151 u8 *data) 152 { 153 u8 *p = (char *)data; 154 155 if (strset == ETH_SS_STATS) 156 p = hclgevf_tqps_get_strings(handle, p); 157 } 158 159 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 160 { 161 hclgevf_tqps_get_stats(handle, data); 162 } 163 164 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 165 { 166 u8 resp_msg; 167 int status; 168 169 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 170 true, &resp_msg, sizeof(u8)); 171 if (status) { 172 dev_err(&hdev->pdev->dev, 173 "VF request to get TC info from PF failed %d", 174 status); 175 return status; 176 } 177 178 hdev->hw_tc_map = resp_msg; 179 180 return 0; 181 } 182 183 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 184 { 185 #define HCLGEVF_TQPS_RSS_INFO_LEN 8 186 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 187 int status; 188 189 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 190 true, resp_msg, 191 HCLGEVF_TQPS_RSS_INFO_LEN); 192 if (status) { 193 dev_err(&hdev->pdev->dev, 194 "VF request to get tqp info from PF failed %d", 195 status); 196 return status; 197 } 198 199 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 200 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 201 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 202 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 203 204 return 0; 205 } 206 207 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 208 { 209 struct hclgevf_tqp *tqp; 210 int i; 211 212 /* if this is on going reset then we need to re-allocate the TPQs 213 * since we cannot assume we would get same number of TPQs back from PF 214 */ 215 if (hclgevf_dev_ongoing_reset(hdev)) 216 devm_kfree(&hdev->pdev->dev, hdev->htqp); 217 218 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 219 sizeof(struct hclgevf_tqp), GFP_KERNEL); 220 if (!hdev->htqp) 221 return -ENOMEM; 222 223 tqp = hdev->htqp; 224 225 for (i = 0; i < hdev->num_tqps; i++) { 226 tqp->dev = &hdev->pdev->dev; 227 tqp->index = i; 228 229 tqp->q.ae_algo = &ae_algovf; 230 tqp->q.buf_size = hdev->rx_buf_len; 231 tqp->q.desc_num = hdev->num_desc; 232 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 233 i * HCLGEVF_TQP_REG_SIZE; 234 235 tqp++; 236 } 237 238 return 0; 239 } 240 241 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 242 { 243 struct hnae3_handle *nic = &hdev->nic; 244 struct hnae3_knic_private_info *kinfo; 245 u16 new_tqps = hdev->num_tqps; 246 int i; 247 248 kinfo = &nic->kinfo; 249 kinfo->num_tc = 0; 250 kinfo->num_desc = hdev->num_desc; 251 kinfo->rx_buf_len = hdev->rx_buf_len; 252 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 253 if (hdev->hw_tc_map & BIT(i)) 254 kinfo->num_tc++; 255 256 kinfo->rss_size 257 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 258 new_tqps = kinfo->rss_size * kinfo->num_tc; 259 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 260 261 /* if this is on going reset then we need to re-allocate the hnae queues 262 * as well since number of TPQs from PF might have changed. 263 */ 264 if (hclgevf_dev_ongoing_reset(hdev)) 265 devm_kfree(&hdev->pdev->dev, kinfo->tqp); 266 267 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 268 sizeof(struct hnae3_queue *), GFP_KERNEL); 269 if (!kinfo->tqp) 270 return -ENOMEM; 271 272 for (i = 0; i < kinfo->num_tqps; i++) { 273 hdev->htqp[i].q.handle = &hdev->nic; 274 hdev->htqp[i].q.tqp_index = i; 275 kinfo->tqp[i] = &hdev->htqp[i].q; 276 } 277 278 return 0; 279 } 280 281 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 282 { 283 int status; 284 u8 resp_msg; 285 286 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 287 0, false, &resp_msg, sizeof(u8)); 288 if (status) 289 dev_err(&hdev->pdev->dev, 290 "VF failed to fetch link status(%d) from PF", status); 291 } 292 293 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 294 { 295 struct hnae3_handle *handle = &hdev->nic; 296 struct hnae3_client *client; 297 298 client = handle->client; 299 300 link_state = 301 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 302 303 if (link_state != hdev->hw.mac.link) { 304 client->ops->link_status_change(handle, !!link_state); 305 hdev->hw.mac.link = link_state; 306 } 307 } 308 309 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 310 { 311 struct hnae3_handle *nic = &hdev->nic; 312 int ret; 313 314 nic->ae_algo = &ae_algovf; 315 nic->pdev = hdev->pdev; 316 nic->numa_node_mask = hdev->numa_node_mask; 317 nic->flags |= HNAE3_SUPPORT_VF; 318 319 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 320 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 321 hdev->ae_dev->dev_type); 322 return -EINVAL; 323 } 324 325 ret = hclgevf_knic_setup(hdev); 326 if (ret) 327 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 328 ret); 329 return ret; 330 } 331 332 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 333 { 334 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 335 dev_warn(&hdev->pdev->dev, 336 "vector(vector_id %d) has been freed.\n", vector_id); 337 return; 338 } 339 340 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 341 hdev->num_msi_left += 1; 342 hdev->num_msi_used -= 1; 343 } 344 345 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 346 struct hnae3_vector_info *vector_info) 347 { 348 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 349 struct hnae3_vector_info *vector = vector_info; 350 int alloc = 0; 351 int i, j; 352 353 vector_num = min(hdev->num_msi_left, vector_num); 354 355 for (j = 0; j < vector_num; j++) { 356 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 357 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 358 vector->vector = pci_irq_vector(hdev->pdev, i); 359 vector->io_addr = hdev->hw.io_base + 360 HCLGEVF_VECTOR_REG_BASE + 361 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 362 hdev->vector_status[i] = 0; 363 hdev->vector_irq[i] = vector->vector; 364 365 vector++; 366 alloc++; 367 368 break; 369 } 370 } 371 } 372 hdev->num_msi_left -= alloc; 373 hdev->num_msi_used += alloc; 374 375 return alloc; 376 } 377 378 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 379 { 380 int i; 381 382 for (i = 0; i < hdev->num_msi; i++) 383 if (vector == hdev->vector_irq[i]) 384 return i; 385 386 return -EINVAL; 387 } 388 389 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 390 const u8 hfunc, const u8 *key) 391 { 392 struct hclgevf_rss_config_cmd *req; 393 struct hclgevf_desc desc; 394 int key_offset; 395 int key_size; 396 int ret; 397 398 req = (struct hclgevf_rss_config_cmd *)desc.data; 399 400 for (key_offset = 0; key_offset < 3; key_offset++) { 401 hclgevf_cmd_setup_basic_desc(&desc, 402 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 403 false); 404 405 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 406 req->hash_config |= 407 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 408 409 if (key_offset == 2) 410 key_size = 411 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 412 else 413 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 414 415 memcpy(req->hash_key, 416 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 417 418 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 419 if (ret) { 420 dev_err(&hdev->pdev->dev, 421 "Configure RSS config fail, status = %d\n", 422 ret); 423 return ret; 424 } 425 } 426 427 return 0; 428 } 429 430 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 431 { 432 return HCLGEVF_RSS_KEY_SIZE; 433 } 434 435 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 436 { 437 return HCLGEVF_RSS_IND_TBL_SIZE; 438 } 439 440 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 441 { 442 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 443 struct hclgevf_rss_indirection_table_cmd *req; 444 struct hclgevf_desc desc; 445 int status; 446 int i, j; 447 448 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 449 450 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 451 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 452 false); 453 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 454 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 455 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 456 req->rss_result[j] = 457 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 458 459 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 460 if (status) { 461 dev_err(&hdev->pdev->dev, 462 "VF failed(=%d) to set RSS indirection table\n", 463 status); 464 return status; 465 } 466 } 467 468 return 0; 469 } 470 471 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 472 { 473 struct hclgevf_rss_tc_mode_cmd *req; 474 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 475 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 476 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 477 struct hclgevf_desc desc; 478 u16 roundup_size; 479 int status; 480 int i; 481 482 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 483 484 roundup_size = roundup_pow_of_two(rss_size); 485 roundup_size = ilog2(roundup_size); 486 487 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 488 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 489 tc_size[i] = roundup_size; 490 tc_offset[i] = rss_size * i; 491 } 492 493 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 494 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 495 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 496 (tc_valid[i] & 0x1)); 497 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 498 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 499 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 500 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 501 } 502 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 503 if (status) 504 dev_err(&hdev->pdev->dev, 505 "VF failed(=%d) to set rss tc mode\n", status); 506 507 return status; 508 } 509 510 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 511 u8 *hfunc) 512 { 513 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 514 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 515 int i; 516 517 if (handle->pdev->revision >= 0x21) { 518 /* Get hash algorithm */ 519 if (hfunc) { 520 switch (rss_cfg->hash_algo) { 521 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 522 *hfunc = ETH_RSS_HASH_TOP; 523 break; 524 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 525 *hfunc = ETH_RSS_HASH_XOR; 526 break; 527 default: 528 *hfunc = ETH_RSS_HASH_UNKNOWN; 529 break; 530 } 531 } 532 533 /* Get the RSS Key required by the user */ 534 if (key) 535 memcpy(key, rss_cfg->rss_hash_key, 536 HCLGEVF_RSS_KEY_SIZE); 537 } 538 539 if (indir) 540 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 541 indir[i] = rss_cfg->rss_indirection_tbl[i]; 542 543 return 0; 544 } 545 546 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 547 const u8 *key, const u8 hfunc) 548 { 549 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 550 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 551 int ret, i; 552 553 if (handle->pdev->revision >= 0x21) { 554 /* Set the RSS Hash Key if specififed by the user */ 555 if (key) { 556 switch (hfunc) { 557 case ETH_RSS_HASH_TOP: 558 rss_cfg->hash_algo = 559 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 560 break; 561 case ETH_RSS_HASH_XOR: 562 rss_cfg->hash_algo = 563 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 564 break; 565 case ETH_RSS_HASH_NO_CHANGE: 566 break; 567 default: 568 return -EINVAL; 569 } 570 571 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 572 key); 573 if (ret) 574 return ret; 575 576 /* Update the shadow RSS key with user specified qids */ 577 memcpy(rss_cfg->rss_hash_key, key, 578 HCLGEVF_RSS_KEY_SIZE); 579 } 580 } 581 582 /* update the shadow RSS table with user specified qids */ 583 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 584 rss_cfg->rss_indirection_tbl[i] = indir[i]; 585 586 /* update the hardware */ 587 return hclgevf_set_rss_indir_table(hdev); 588 } 589 590 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 591 { 592 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 593 594 if (nfc->data & RXH_L4_B_2_3) 595 hash_sets |= HCLGEVF_D_PORT_BIT; 596 else 597 hash_sets &= ~HCLGEVF_D_PORT_BIT; 598 599 if (nfc->data & RXH_IP_SRC) 600 hash_sets |= HCLGEVF_S_IP_BIT; 601 else 602 hash_sets &= ~HCLGEVF_S_IP_BIT; 603 604 if (nfc->data & RXH_IP_DST) 605 hash_sets |= HCLGEVF_D_IP_BIT; 606 else 607 hash_sets &= ~HCLGEVF_D_IP_BIT; 608 609 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 610 hash_sets |= HCLGEVF_V_TAG_BIT; 611 612 return hash_sets; 613 } 614 615 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 616 struct ethtool_rxnfc *nfc) 617 { 618 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 619 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 620 struct hclgevf_rss_input_tuple_cmd *req; 621 struct hclgevf_desc desc; 622 u8 tuple_sets; 623 int ret; 624 625 if (handle->pdev->revision == 0x20) 626 return -EOPNOTSUPP; 627 628 if (nfc->data & 629 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 630 return -EINVAL; 631 632 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 633 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 634 635 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 636 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 637 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 638 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 639 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 640 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 641 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 642 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 643 644 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 645 switch (nfc->flow_type) { 646 case TCP_V4_FLOW: 647 req->ipv4_tcp_en = tuple_sets; 648 break; 649 case TCP_V6_FLOW: 650 req->ipv6_tcp_en = tuple_sets; 651 break; 652 case UDP_V4_FLOW: 653 req->ipv4_udp_en = tuple_sets; 654 break; 655 case UDP_V6_FLOW: 656 req->ipv6_udp_en = tuple_sets; 657 break; 658 case SCTP_V4_FLOW: 659 req->ipv4_sctp_en = tuple_sets; 660 break; 661 case SCTP_V6_FLOW: 662 if ((nfc->data & RXH_L4_B_0_1) || 663 (nfc->data & RXH_L4_B_2_3)) 664 return -EINVAL; 665 666 req->ipv6_sctp_en = tuple_sets; 667 break; 668 case IPV4_FLOW: 669 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 670 break; 671 case IPV6_FLOW: 672 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 673 break; 674 default: 675 return -EINVAL; 676 } 677 678 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 679 if (ret) { 680 dev_err(&hdev->pdev->dev, 681 "Set rss tuple fail, status = %d\n", ret); 682 return ret; 683 } 684 685 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 686 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 687 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 688 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 689 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 690 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 691 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 692 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 693 return 0; 694 } 695 696 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 697 struct ethtool_rxnfc *nfc) 698 { 699 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 700 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 701 u8 tuple_sets; 702 703 if (handle->pdev->revision == 0x20) 704 return -EOPNOTSUPP; 705 706 nfc->data = 0; 707 708 switch (nfc->flow_type) { 709 case TCP_V4_FLOW: 710 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 711 break; 712 case UDP_V4_FLOW: 713 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 714 break; 715 case TCP_V6_FLOW: 716 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 717 break; 718 case UDP_V6_FLOW: 719 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 720 break; 721 case SCTP_V4_FLOW: 722 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 723 break; 724 case SCTP_V6_FLOW: 725 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 726 break; 727 case IPV4_FLOW: 728 case IPV6_FLOW: 729 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 730 break; 731 default: 732 return -EINVAL; 733 } 734 735 if (!tuple_sets) 736 return 0; 737 738 if (tuple_sets & HCLGEVF_D_PORT_BIT) 739 nfc->data |= RXH_L4_B_2_3; 740 if (tuple_sets & HCLGEVF_S_PORT_BIT) 741 nfc->data |= RXH_L4_B_0_1; 742 if (tuple_sets & HCLGEVF_D_IP_BIT) 743 nfc->data |= RXH_IP_DST; 744 if (tuple_sets & HCLGEVF_S_IP_BIT) 745 nfc->data |= RXH_IP_SRC; 746 747 return 0; 748 } 749 750 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 751 struct hclgevf_rss_cfg *rss_cfg) 752 { 753 struct hclgevf_rss_input_tuple_cmd *req; 754 struct hclgevf_desc desc; 755 int ret; 756 757 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 758 759 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 760 761 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 762 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 763 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 764 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 765 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 766 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 767 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 768 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 769 770 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 771 if (ret) 772 dev_err(&hdev->pdev->dev, 773 "Configure rss input fail, status = %d\n", ret); 774 return ret; 775 } 776 777 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 778 { 779 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 780 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 781 782 return rss_cfg->rss_size; 783 } 784 785 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 786 int vector_id, 787 struct hnae3_ring_chain_node *ring_chain) 788 { 789 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 790 struct hnae3_ring_chain_node *node; 791 struct hclge_mbx_vf_to_pf_cmd *req; 792 struct hclgevf_desc desc; 793 int i = 0; 794 int status; 795 u8 type; 796 797 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 798 799 for (node = ring_chain; node; node = node->next) { 800 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 801 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 802 803 if (i == 0) { 804 hclgevf_cmd_setup_basic_desc(&desc, 805 HCLGEVF_OPC_MBX_VF_TO_PF, 806 false); 807 type = en ? 808 HCLGE_MBX_MAP_RING_TO_VECTOR : 809 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 810 req->msg[0] = type; 811 req->msg[1] = vector_id; 812 } 813 814 req->msg[idx_offset] = 815 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 816 req->msg[idx_offset + 1] = node->tqp_index; 817 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 818 HNAE3_RING_GL_IDX_M, 819 HNAE3_RING_GL_IDX_S); 820 821 i++; 822 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 823 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 824 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 825 !node->next) { 826 req->msg[2] = i; 827 828 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 829 if (status) { 830 dev_err(&hdev->pdev->dev, 831 "Map TQP fail, status is %d.\n", 832 status); 833 return status; 834 } 835 i = 0; 836 hclgevf_cmd_setup_basic_desc(&desc, 837 HCLGEVF_OPC_MBX_VF_TO_PF, 838 false); 839 req->msg[0] = type; 840 req->msg[1] = vector_id; 841 } 842 } 843 844 return 0; 845 } 846 847 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 848 struct hnae3_ring_chain_node *ring_chain) 849 { 850 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 851 int vector_id; 852 853 vector_id = hclgevf_get_vector_index(hdev, vector); 854 if (vector_id < 0) { 855 dev_err(&handle->pdev->dev, 856 "Get vector index fail. ret =%d\n", vector_id); 857 return vector_id; 858 } 859 860 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 861 } 862 863 static int hclgevf_unmap_ring_from_vector( 864 struct hnae3_handle *handle, 865 int vector, 866 struct hnae3_ring_chain_node *ring_chain) 867 { 868 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 869 int ret, vector_id; 870 871 vector_id = hclgevf_get_vector_index(hdev, vector); 872 if (vector_id < 0) { 873 dev_err(&handle->pdev->dev, 874 "Get vector index fail. ret =%d\n", vector_id); 875 return vector_id; 876 } 877 878 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 879 if (ret) 880 dev_err(&handle->pdev->dev, 881 "Unmap ring from vector fail. vector=%d, ret =%d\n", 882 vector_id, 883 ret); 884 885 return ret; 886 } 887 888 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 889 { 890 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 891 int vector_id; 892 893 vector_id = hclgevf_get_vector_index(hdev, vector); 894 if (vector_id < 0) { 895 dev_err(&handle->pdev->dev, 896 "hclgevf_put_vector get vector index fail. ret =%d\n", 897 vector_id); 898 return vector_id; 899 } 900 901 hclgevf_free_vector(hdev, vector_id); 902 903 return 0; 904 } 905 906 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 907 bool en_uc_pmc, bool en_mc_pmc) 908 { 909 struct hclge_mbx_vf_to_pf_cmd *req; 910 struct hclgevf_desc desc; 911 int status; 912 913 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 914 915 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 916 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 917 req->msg[1] = en_uc_pmc ? 1 : 0; 918 req->msg[2] = en_mc_pmc ? 1 : 0; 919 920 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 921 if (status) 922 dev_err(&hdev->pdev->dev, 923 "Set promisc mode fail, status is %d.\n", status); 924 925 return status; 926 } 927 928 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, 929 bool en_uc_pmc, bool en_mc_pmc) 930 { 931 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 932 933 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); 934 } 935 936 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 937 int stream_id, bool enable) 938 { 939 struct hclgevf_cfg_com_tqp_queue_cmd *req; 940 struct hclgevf_desc desc; 941 int status; 942 943 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 944 945 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 946 false); 947 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 948 req->stream_id = cpu_to_le16(stream_id); 949 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 950 951 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 952 if (status) 953 dev_err(&hdev->pdev->dev, 954 "TQP enable fail, status =%d.\n", status); 955 956 return status; 957 } 958 959 static int hclgevf_get_queue_id(struct hnae3_queue *queue) 960 { 961 struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q); 962 963 return tqp->index; 964 } 965 966 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 967 { 968 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 969 struct hclgevf_tqp *tqp; 970 int i; 971 972 for (i = 0; i < kinfo->num_tqps; i++) { 973 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 974 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 975 } 976 } 977 978 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 979 { 980 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 981 982 ether_addr_copy(p, hdev->hw.mac.mac_addr); 983 } 984 985 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 986 bool is_first) 987 { 988 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 989 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 990 u8 *new_mac_addr = (u8 *)p; 991 u8 msg_data[ETH_ALEN * 2]; 992 u16 subcode; 993 int status; 994 995 ether_addr_copy(msg_data, new_mac_addr); 996 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 997 998 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 999 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1000 1001 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1002 subcode, msg_data, ETH_ALEN * 2, 1003 true, NULL, 0); 1004 if (!status) 1005 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1006 1007 return status; 1008 } 1009 1010 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1011 const unsigned char *addr) 1012 { 1013 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1014 1015 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1016 HCLGE_MBX_MAC_VLAN_UC_ADD, 1017 addr, ETH_ALEN, false, NULL, 0); 1018 } 1019 1020 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1021 const unsigned char *addr) 1022 { 1023 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1024 1025 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1026 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1027 addr, ETH_ALEN, false, NULL, 0); 1028 } 1029 1030 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1031 const unsigned char *addr) 1032 { 1033 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1034 1035 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1036 HCLGE_MBX_MAC_VLAN_MC_ADD, 1037 addr, ETH_ALEN, false, NULL, 0); 1038 } 1039 1040 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1041 const unsigned char *addr) 1042 { 1043 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1044 1045 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1046 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1047 addr, ETH_ALEN, false, NULL, 0); 1048 } 1049 1050 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1051 __be16 proto, u16 vlan_id, 1052 bool is_kill) 1053 { 1054 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1055 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1056 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1057 1058 if (vlan_id > 4095) 1059 return -EINVAL; 1060 1061 if (proto != htons(ETH_P_8021Q)) 1062 return -EPROTONOSUPPORT; 1063 1064 msg_data[0] = is_kill; 1065 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1066 memcpy(&msg_data[3], &proto, sizeof(proto)); 1067 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1068 HCLGE_MBX_VLAN_FILTER, msg_data, 1069 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 1070 } 1071 1072 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1073 { 1074 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1075 u8 msg_data; 1076 1077 msg_data = enable ? 1 : 0; 1078 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1079 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1080 1, false, NULL, 0); 1081 } 1082 1083 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1084 { 1085 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1086 u8 msg_data[2]; 1087 int ret; 1088 1089 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 1090 1091 /* disable vf queue before send queue reset msg to PF */ 1092 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1093 if (ret) 1094 return ret; 1095 1096 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1097 2, true, NULL, 0); 1098 } 1099 1100 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1101 enum hnae3_reset_notify_type type) 1102 { 1103 struct hnae3_client *client = hdev->nic_client; 1104 struct hnae3_handle *handle = &hdev->nic; 1105 1106 if (!client->ops->reset_notify) 1107 return -EOPNOTSUPP; 1108 1109 return client->ops->reset_notify(handle, type); 1110 } 1111 1112 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1113 { 1114 #define HCLGEVF_RESET_WAIT_MS 500 1115 #define HCLGEVF_RESET_WAIT_CNT 20 1116 u32 val, cnt = 0; 1117 1118 /* wait to check the hardware reset completion status */ 1119 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 1120 while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) && 1121 (cnt < HCLGEVF_RESET_WAIT_CNT)) { 1122 msleep(HCLGEVF_RESET_WAIT_MS); 1123 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 1124 cnt++; 1125 } 1126 1127 /* hardware completion status should be available by this time */ 1128 if (cnt >= HCLGEVF_RESET_WAIT_CNT) { 1129 dev_warn(&hdev->pdev->dev, 1130 "could'nt get reset done status from h/w, timeout!\n"); 1131 return -EBUSY; 1132 } 1133 1134 /* we will wait a bit more to let reset of the stack to complete. This 1135 * might happen in case reset assertion was made by PF. Yes, this also 1136 * means we might end up waiting bit more even for VF reset. 1137 */ 1138 msleep(5000); 1139 1140 return 0; 1141 } 1142 1143 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1144 { 1145 int ret; 1146 1147 /* uninitialize the nic client */ 1148 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1149 1150 /* re-initialize the hclge device */ 1151 ret = hclgevf_init_hdev(hdev); 1152 if (ret) { 1153 dev_err(&hdev->pdev->dev, 1154 "hclge device re-init failed, VF is disabled!\n"); 1155 return ret; 1156 } 1157 1158 /* bring up the nic client again */ 1159 hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1160 1161 return 0; 1162 } 1163 1164 static int hclgevf_reset(struct hclgevf_dev *hdev) 1165 { 1166 int ret; 1167 1168 rtnl_lock(); 1169 1170 /* bring down the nic to stop any ongoing TX/RX */ 1171 hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1172 1173 rtnl_unlock(); 1174 1175 /* check if VF could successfully fetch the hardware reset completion 1176 * status from the hardware 1177 */ 1178 ret = hclgevf_reset_wait(hdev); 1179 if (ret) { 1180 /* can't do much in this situation, will disable VF */ 1181 dev_err(&hdev->pdev->dev, 1182 "VF failed(=%d) to fetch H/W reset completion status\n", 1183 ret); 1184 1185 dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); 1186 rtnl_lock(); 1187 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1188 1189 rtnl_unlock(); 1190 return ret; 1191 } 1192 1193 rtnl_lock(); 1194 1195 /* now, re-initialize the nic client and ae device*/ 1196 ret = hclgevf_reset_stack(hdev); 1197 if (ret) 1198 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1199 1200 /* bring up the nic to enable TX/RX again */ 1201 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1202 1203 rtnl_unlock(); 1204 1205 return ret; 1206 } 1207 1208 static int hclgevf_do_reset(struct hclgevf_dev *hdev) 1209 { 1210 int status; 1211 u8 respmsg; 1212 1213 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1214 0, false, &respmsg, sizeof(u8)); 1215 if (status) 1216 dev_err(&hdev->pdev->dev, 1217 "VF reset request to PF failed(=%d)\n", status); 1218 1219 return status; 1220 } 1221 1222 static void hclgevf_reset_event(struct pci_dev *pdev, 1223 struct hnae3_handle *handle) 1224 { 1225 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1226 1227 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1228 1229 handle->reset_level = HNAE3_VF_RESET; 1230 1231 /* reset of this VF requested */ 1232 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1233 hclgevf_reset_task_schedule(hdev); 1234 1235 handle->last_reset_time = jiffies; 1236 } 1237 1238 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1239 { 1240 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1241 1242 return hdev->fw_version; 1243 } 1244 1245 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1246 { 1247 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1248 1249 vector->vector_irq = pci_irq_vector(hdev->pdev, 1250 HCLGEVF_MISC_VECTOR_NUM); 1251 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1252 /* vector status always valid for Vector 0 */ 1253 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1254 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1255 1256 hdev->num_msi_left -= 1; 1257 hdev->num_msi_used += 1; 1258 } 1259 1260 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1261 { 1262 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1263 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1264 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1265 schedule_work(&hdev->rst_service_task); 1266 } 1267 } 1268 1269 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1270 { 1271 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1272 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1273 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1274 schedule_work(&hdev->mbx_service_task); 1275 } 1276 } 1277 1278 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1279 { 1280 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1281 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1282 schedule_work(&hdev->service_task); 1283 } 1284 1285 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1286 { 1287 /* if we have any pending mailbox event then schedule the mbx task */ 1288 if (hdev->mbx_event_pending) 1289 hclgevf_mbx_task_schedule(hdev); 1290 1291 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1292 hclgevf_reset_task_schedule(hdev); 1293 } 1294 1295 static void hclgevf_service_timer(struct timer_list *t) 1296 { 1297 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1298 1299 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1300 1301 hclgevf_task_schedule(hdev); 1302 } 1303 1304 static void hclgevf_reset_service_task(struct work_struct *work) 1305 { 1306 struct hclgevf_dev *hdev = 1307 container_of(work, struct hclgevf_dev, rst_service_task); 1308 int ret; 1309 1310 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1311 return; 1312 1313 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1314 1315 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1316 &hdev->reset_state)) { 1317 /* PF has initmated that it is about to reset the hardware. 1318 * We now have to poll & check if harware has actually completed 1319 * the reset sequence. On hardware reset completion, VF needs to 1320 * reset the client and ae device. 1321 */ 1322 hdev->reset_attempts = 0; 1323 1324 ret = hclgevf_reset(hdev); 1325 if (ret) 1326 dev_err(&hdev->pdev->dev, "VF stack reset failed.\n"); 1327 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1328 &hdev->reset_state)) { 1329 /* we could be here when either of below happens: 1330 * 1. reset was initiated due to watchdog timeout due to 1331 * a. IMP was earlier reset and our TX got choked down and 1332 * which resulted in watchdog reacting and inducing VF 1333 * reset. This also means our cmdq would be unreliable. 1334 * b. problem in TX due to other lower layer(example link 1335 * layer not functioning properly etc.) 1336 * 2. VF reset might have been initiated due to some config 1337 * change. 1338 * 1339 * NOTE: Theres no clear way to detect above cases than to react 1340 * to the response of PF for this reset request. PF will ack the 1341 * 1b and 2. cases but we will not get any intimation about 1a 1342 * from PF as cmdq would be in unreliable state i.e. mailbox 1343 * communication between PF and VF would be broken. 1344 */ 1345 1346 /* if we are never geting into pending state it means either: 1347 * 1. PF is not receiving our request which could be due to IMP 1348 * reset 1349 * 2. PF is screwed 1350 * We cannot do much for 2. but to check first we can try reset 1351 * our PCIe + stack and see if it alleviates the problem. 1352 */ 1353 if (hdev->reset_attempts > 3) { 1354 /* prepare for full reset of stack + pcie interface */ 1355 hdev->nic.reset_level = HNAE3_VF_FULL_RESET; 1356 1357 /* "defer" schedule the reset task again */ 1358 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1359 } else { 1360 hdev->reset_attempts++; 1361 1362 /* request PF for resetting this VF via mailbox */ 1363 ret = hclgevf_do_reset(hdev); 1364 if (ret) 1365 dev_warn(&hdev->pdev->dev, 1366 "VF rst fail, stack will call\n"); 1367 } 1368 } 1369 1370 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1371 } 1372 1373 static void hclgevf_mailbox_service_task(struct work_struct *work) 1374 { 1375 struct hclgevf_dev *hdev; 1376 1377 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1378 1379 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1380 return; 1381 1382 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1383 1384 hclgevf_mbx_async_handler(hdev); 1385 1386 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1387 } 1388 1389 static void hclgevf_service_task(struct work_struct *work) 1390 { 1391 struct hclgevf_dev *hdev; 1392 1393 hdev = container_of(work, struct hclgevf_dev, service_task); 1394 1395 /* request the link status from the PF. PF would be able to tell VF 1396 * about such updates in future so we might remove this later 1397 */ 1398 hclgevf_request_link_info(hdev); 1399 1400 hclgevf_deferred_task_schedule(hdev); 1401 1402 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1403 } 1404 1405 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1406 { 1407 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1408 } 1409 1410 static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) 1411 { 1412 u32 cmdq_src_reg; 1413 1414 /* fetch the events from their corresponding regs */ 1415 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1416 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1417 1418 /* check for vector0 mailbox(=CMDQ RX) event source */ 1419 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1420 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1421 *clearval = cmdq_src_reg; 1422 return true; 1423 } 1424 1425 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1426 1427 return false; 1428 } 1429 1430 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1431 { 1432 writel(en ? 1 : 0, vector->addr); 1433 } 1434 1435 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1436 { 1437 struct hclgevf_dev *hdev = data; 1438 u32 clearval; 1439 1440 hclgevf_enable_vector(&hdev->misc_vector, false); 1441 if (!hclgevf_check_event_cause(hdev, &clearval)) 1442 goto skip_sched; 1443 1444 hclgevf_mbx_handler(hdev); 1445 1446 hclgevf_clear_event_cause(hdev, clearval); 1447 1448 skip_sched: 1449 hclgevf_enable_vector(&hdev->misc_vector, true); 1450 1451 return IRQ_HANDLED; 1452 } 1453 1454 static int hclgevf_configure(struct hclgevf_dev *hdev) 1455 { 1456 int ret; 1457 1458 hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE; 1459 1460 /* get queue configuration from PF */ 1461 ret = hclgevf_get_queue_info(hdev); 1462 if (ret) 1463 return ret; 1464 /* get tc configuration from PF */ 1465 return hclgevf_get_tc_info(hdev); 1466 } 1467 1468 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1469 { 1470 struct pci_dev *pdev = ae_dev->pdev; 1471 struct hclgevf_dev *hdev = ae_dev->priv; 1472 1473 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1474 if (!hdev) 1475 return -ENOMEM; 1476 1477 hdev->pdev = pdev; 1478 hdev->ae_dev = ae_dev; 1479 ae_dev->priv = hdev; 1480 1481 return 0; 1482 } 1483 1484 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1485 { 1486 struct hnae3_handle *roce = &hdev->roce; 1487 struct hnae3_handle *nic = &hdev->nic; 1488 1489 roce->rinfo.num_vectors = hdev->num_roce_msix; 1490 1491 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1492 hdev->num_msi_left == 0) 1493 return -EINVAL; 1494 1495 roce->rinfo.base_vector = hdev->roce_base_vector; 1496 1497 roce->rinfo.netdev = nic->kinfo.netdev; 1498 roce->rinfo.roce_io_base = hdev->hw.io_base; 1499 1500 roce->pdev = nic->pdev; 1501 roce->ae_algo = nic->ae_algo; 1502 roce->numa_node_mask = nic->numa_node_mask; 1503 1504 return 0; 1505 } 1506 1507 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1508 { 1509 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1510 int i, ret; 1511 1512 rss_cfg->rss_size = hdev->rss_size_max; 1513 1514 if (hdev->pdev->revision >= 0x21) { 1515 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 1516 netdev_rss_key_fill(rss_cfg->rss_hash_key, 1517 HCLGEVF_RSS_KEY_SIZE); 1518 1519 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 1520 rss_cfg->rss_hash_key); 1521 if (ret) 1522 return ret; 1523 1524 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 1525 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1526 rss_cfg->rss_tuple_sets.ipv4_udp_en = 1527 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1528 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 1529 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1530 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 1531 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1532 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 1533 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1534 rss_cfg->rss_tuple_sets.ipv6_udp_en = 1535 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1536 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 1537 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1538 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 1539 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1540 1541 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 1542 if (ret) 1543 return ret; 1544 1545 } 1546 1547 /* Initialize RSS indirect table for each vport */ 1548 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1549 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1550 1551 ret = hclgevf_set_rss_indir_table(hdev); 1552 if (ret) 1553 return ret; 1554 1555 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1556 } 1557 1558 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1559 { 1560 /* other vlan config(like, VLAN TX/RX offload) would also be added 1561 * here later 1562 */ 1563 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1564 false); 1565 } 1566 1567 static int hclgevf_ae_start(struct hnae3_handle *handle) 1568 { 1569 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1570 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1571 int i, queue_id; 1572 1573 for (i = 0; i < kinfo->num_tqps; i++) { 1574 /* ring enable */ 1575 queue_id = hclgevf_get_queue_id(kinfo->tqp[i]); 1576 if (queue_id < 0) { 1577 dev_warn(&hdev->pdev->dev, 1578 "Get invalid queue id, ignore it\n"); 1579 continue; 1580 } 1581 1582 hclgevf_tqp_enable(hdev, queue_id, 0, true); 1583 } 1584 1585 /* reset tqp stats */ 1586 hclgevf_reset_tqp_stats(handle); 1587 1588 hclgevf_request_link_info(hdev); 1589 1590 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1591 mod_timer(&hdev->service_timer, jiffies + HZ); 1592 1593 return 0; 1594 } 1595 1596 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1597 { 1598 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1599 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1600 int i, queue_id; 1601 1602 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1603 1604 for (i = 0; i < kinfo->num_tqps; i++) { 1605 /* Ring disable */ 1606 queue_id = hclgevf_get_queue_id(kinfo->tqp[i]); 1607 if (queue_id < 0) { 1608 dev_warn(&hdev->pdev->dev, 1609 "Get invalid queue id, ignore it\n"); 1610 continue; 1611 } 1612 1613 hclgevf_tqp_enable(hdev, queue_id, 0, false); 1614 } 1615 1616 /* reset tqp stats */ 1617 hclgevf_reset_tqp_stats(handle); 1618 del_timer_sync(&hdev->service_timer); 1619 cancel_work_sync(&hdev->service_task); 1620 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1621 hclgevf_update_link_status(hdev, 0); 1622 } 1623 1624 static void hclgevf_state_init(struct hclgevf_dev *hdev) 1625 { 1626 /* if this is on going reset then skip this initialization */ 1627 if (hclgevf_dev_ongoing_reset(hdev)) 1628 return; 1629 1630 /* setup tasks for the MBX */ 1631 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1632 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1633 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1634 1635 /* setup tasks for service timer */ 1636 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1637 1638 INIT_WORK(&hdev->service_task, hclgevf_service_task); 1639 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1640 1641 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 1642 1643 mutex_init(&hdev->mbx_resp.mbx_mutex); 1644 1645 /* bring the device down */ 1646 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1647 } 1648 1649 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1650 { 1651 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1652 1653 if (hdev->service_timer.function) 1654 del_timer_sync(&hdev->service_timer); 1655 if (hdev->service_task.func) 1656 cancel_work_sync(&hdev->service_task); 1657 if (hdev->mbx_service_task.func) 1658 cancel_work_sync(&hdev->mbx_service_task); 1659 if (hdev->rst_service_task.func) 1660 cancel_work_sync(&hdev->rst_service_task); 1661 1662 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1663 } 1664 1665 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1666 { 1667 struct pci_dev *pdev = hdev->pdev; 1668 int vectors; 1669 int i; 1670 1671 /* if this is on going reset then skip this initialization */ 1672 if (hclgevf_dev_ongoing_reset(hdev)) 1673 return 0; 1674 1675 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 1676 vectors = pci_alloc_irq_vectors(pdev, 1677 hdev->roce_base_msix_offset + 1, 1678 hdev->num_msi, 1679 PCI_IRQ_MSIX); 1680 else 1681 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1682 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1683 1684 if (vectors < 0) { 1685 dev_err(&pdev->dev, 1686 "failed(%d) to allocate MSI/MSI-X vectors\n", 1687 vectors); 1688 return vectors; 1689 } 1690 if (vectors < hdev->num_msi) 1691 dev_warn(&hdev->pdev->dev, 1692 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1693 hdev->num_msi, vectors); 1694 1695 hdev->num_msi = vectors; 1696 hdev->num_msi_left = vectors; 1697 hdev->base_msi_vector = pdev->irq; 1698 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 1699 1700 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1701 sizeof(u16), GFP_KERNEL); 1702 if (!hdev->vector_status) { 1703 pci_free_irq_vectors(pdev); 1704 return -ENOMEM; 1705 } 1706 1707 for (i = 0; i < hdev->num_msi; i++) 1708 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 1709 1710 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 1711 sizeof(int), GFP_KERNEL); 1712 if (!hdev->vector_irq) { 1713 pci_free_irq_vectors(pdev); 1714 return -ENOMEM; 1715 } 1716 1717 return 0; 1718 } 1719 1720 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 1721 { 1722 struct pci_dev *pdev = hdev->pdev; 1723 1724 pci_free_irq_vectors(pdev); 1725 } 1726 1727 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 1728 { 1729 int ret = 0; 1730 1731 /* if this is on going reset then skip this initialization */ 1732 if (hclgevf_dev_ongoing_reset(hdev)) 1733 return 0; 1734 1735 hclgevf_get_misc_vector(hdev); 1736 1737 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 1738 0, "hclgevf_cmd", hdev); 1739 if (ret) { 1740 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 1741 hdev->misc_vector.vector_irq); 1742 return ret; 1743 } 1744 1745 hclgevf_clear_event_cause(hdev, 0); 1746 1747 /* enable misc. vector(vector 0) */ 1748 hclgevf_enable_vector(&hdev->misc_vector, true); 1749 1750 return ret; 1751 } 1752 1753 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 1754 { 1755 /* disable misc vector(vector 0) */ 1756 hclgevf_enable_vector(&hdev->misc_vector, false); 1757 synchronize_irq(hdev->misc_vector.vector_irq); 1758 free_irq(hdev->misc_vector.vector_irq, hdev); 1759 hclgevf_free_vector(hdev, 0); 1760 } 1761 1762 static int hclgevf_init_client_instance(struct hnae3_client *client, 1763 struct hnae3_ae_dev *ae_dev) 1764 { 1765 struct hclgevf_dev *hdev = ae_dev->priv; 1766 int ret; 1767 1768 switch (client->type) { 1769 case HNAE3_CLIENT_KNIC: 1770 hdev->nic_client = client; 1771 hdev->nic.client = client; 1772 1773 ret = client->ops->init_instance(&hdev->nic); 1774 if (ret) 1775 goto clear_nic; 1776 1777 hnae3_set_client_init_flag(client, ae_dev, 1); 1778 1779 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 1780 struct hnae3_client *rc = hdev->roce_client; 1781 1782 ret = hclgevf_init_roce_base_info(hdev); 1783 if (ret) 1784 goto clear_roce; 1785 ret = rc->ops->init_instance(&hdev->roce); 1786 if (ret) 1787 goto clear_roce; 1788 1789 hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 1790 1); 1791 } 1792 break; 1793 case HNAE3_CLIENT_UNIC: 1794 hdev->nic_client = client; 1795 hdev->nic.client = client; 1796 1797 ret = client->ops->init_instance(&hdev->nic); 1798 if (ret) 1799 goto clear_nic; 1800 1801 hnae3_set_client_init_flag(client, ae_dev, 1); 1802 break; 1803 case HNAE3_CLIENT_ROCE: 1804 if (hnae3_dev_roce_supported(hdev)) { 1805 hdev->roce_client = client; 1806 hdev->roce.client = client; 1807 } 1808 1809 if (hdev->roce_client && hdev->nic_client) { 1810 ret = hclgevf_init_roce_base_info(hdev); 1811 if (ret) 1812 goto clear_roce; 1813 1814 ret = client->ops->init_instance(&hdev->roce); 1815 if (ret) 1816 goto clear_roce; 1817 } 1818 1819 hnae3_set_client_init_flag(client, ae_dev, 1); 1820 break; 1821 default: 1822 return -EINVAL; 1823 } 1824 1825 return 0; 1826 1827 clear_nic: 1828 hdev->nic_client = NULL; 1829 hdev->nic.client = NULL; 1830 return ret; 1831 clear_roce: 1832 hdev->roce_client = NULL; 1833 hdev->roce.client = NULL; 1834 return ret; 1835 } 1836 1837 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 1838 struct hnae3_ae_dev *ae_dev) 1839 { 1840 struct hclgevf_dev *hdev = ae_dev->priv; 1841 1842 /* un-init roce, if it exists */ 1843 if (hdev->roce_client) { 1844 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 1845 hdev->roce_client = NULL; 1846 hdev->roce.client = NULL; 1847 } 1848 1849 /* un-init nic/unic, if this was not called by roce client */ 1850 if (client->ops->uninit_instance && hdev->nic_client && 1851 client->type != HNAE3_CLIENT_ROCE) { 1852 client->ops->uninit_instance(&hdev->nic, 0); 1853 hdev->nic_client = NULL; 1854 hdev->nic.client = NULL; 1855 } 1856 } 1857 1858 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 1859 { 1860 struct pci_dev *pdev = hdev->pdev; 1861 struct hclgevf_hw *hw; 1862 int ret; 1863 1864 /* check if we need to skip initialization of pci. This will happen if 1865 * device is undergoing VF reset. Otherwise, we would need to 1866 * re-initialize pci interface again i.e. when device is not going 1867 * through *any* reset or actually undergoing full reset. 1868 */ 1869 if (hclgevf_dev_ongoing_reset(hdev)) 1870 return 0; 1871 1872 ret = pci_enable_device(pdev); 1873 if (ret) { 1874 dev_err(&pdev->dev, "failed to enable PCI device\n"); 1875 return ret; 1876 } 1877 1878 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1879 if (ret) { 1880 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 1881 goto err_disable_device; 1882 } 1883 1884 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 1885 if (ret) { 1886 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 1887 goto err_disable_device; 1888 } 1889 1890 pci_set_master(pdev); 1891 hw = &hdev->hw; 1892 hw->hdev = hdev; 1893 hw->io_base = pci_iomap(pdev, 2, 0); 1894 if (!hw->io_base) { 1895 dev_err(&pdev->dev, "can't map configuration register space\n"); 1896 ret = -ENOMEM; 1897 goto err_clr_master; 1898 } 1899 1900 return 0; 1901 1902 err_clr_master: 1903 pci_clear_master(pdev); 1904 pci_release_regions(pdev); 1905 err_disable_device: 1906 pci_disable_device(pdev); 1907 1908 return ret; 1909 } 1910 1911 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 1912 { 1913 struct pci_dev *pdev = hdev->pdev; 1914 1915 pci_iounmap(pdev, hdev->hw.io_base); 1916 pci_clear_master(pdev); 1917 pci_release_regions(pdev); 1918 pci_disable_device(pdev); 1919 } 1920 1921 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 1922 { 1923 struct hclgevf_query_res_cmd *req; 1924 struct hclgevf_desc desc; 1925 int ret; 1926 1927 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 1928 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1929 if (ret) { 1930 dev_err(&hdev->pdev->dev, 1931 "query vf resource failed, ret = %d.\n", ret); 1932 return ret; 1933 } 1934 1935 req = (struct hclgevf_query_res_cmd *)desc.data; 1936 1937 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 1938 hdev->roce_base_msix_offset = 1939 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 1940 HCLGEVF_MSIX_OFT_ROCEE_M, 1941 HCLGEVF_MSIX_OFT_ROCEE_S); 1942 hdev->num_roce_msix = 1943 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 1944 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 1945 1946 /* VF should have NIC vectors and Roce vectors, NIC vectors 1947 * are queued before Roce vectors. The offset is fixed to 64. 1948 */ 1949 hdev->num_msi = hdev->num_roce_msix + 1950 hdev->roce_base_msix_offset; 1951 } else { 1952 hdev->num_msi = 1953 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 1954 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 1955 } 1956 1957 return 0; 1958 } 1959 1960 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 1961 { 1962 struct pci_dev *pdev = hdev->pdev; 1963 int ret; 1964 1965 /* check if device is on-going full reset(i.e. pcie as well) */ 1966 if (hclgevf_dev_ongoing_full_reset(hdev)) { 1967 dev_warn(&pdev->dev, "device is going full reset\n"); 1968 hclgevf_uninit_hdev(hdev); 1969 } 1970 1971 ret = hclgevf_pci_init(hdev); 1972 if (ret) { 1973 dev_err(&pdev->dev, "PCI initialization failed\n"); 1974 return ret; 1975 } 1976 1977 ret = hclgevf_cmd_init(hdev); 1978 if (ret) 1979 goto err_cmd_init; 1980 1981 /* Get vf resource */ 1982 ret = hclgevf_query_vf_resource(hdev); 1983 if (ret) { 1984 dev_err(&hdev->pdev->dev, 1985 "Query vf status error, ret = %d.\n", ret); 1986 goto err_query_vf; 1987 } 1988 1989 ret = hclgevf_init_msi(hdev); 1990 if (ret) { 1991 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 1992 goto err_query_vf; 1993 } 1994 1995 hclgevf_state_init(hdev); 1996 1997 ret = hclgevf_misc_irq_init(hdev); 1998 if (ret) { 1999 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2000 ret); 2001 goto err_misc_irq_init; 2002 } 2003 2004 ret = hclgevf_configure(hdev); 2005 if (ret) { 2006 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2007 goto err_config; 2008 } 2009 2010 ret = hclgevf_alloc_tqps(hdev); 2011 if (ret) { 2012 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2013 goto err_config; 2014 } 2015 2016 ret = hclgevf_set_handle_info(hdev); 2017 if (ret) { 2018 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2019 goto err_config; 2020 } 2021 2022 /* Initialize RSS for this VF */ 2023 ret = hclgevf_rss_init_hw(hdev); 2024 if (ret) { 2025 dev_err(&hdev->pdev->dev, 2026 "failed(%d) to initialize RSS\n", ret); 2027 goto err_config; 2028 } 2029 2030 ret = hclgevf_init_vlan_config(hdev); 2031 if (ret) { 2032 dev_err(&hdev->pdev->dev, 2033 "failed(%d) to initialize VLAN config\n", ret); 2034 goto err_config; 2035 } 2036 2037 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 2038 2039 return 0; 2040 2041 err_config: 2042 hclgevf_misc_irq_uninit(hdev); 2043 err_misc_irq_init: 2044 hclgevf_state_uninit(hdev); 2045 hclgevf_uninit_msi(hdev); 2046 err_query_vf: 2047 hclgevf_cmd_uninit(hdev); 2048 err_cmd_init: 2049 hclgevf_pci_uninit(hdev); 2050 return ret; 2051 } 2052 2053 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2054 { 2055 hclgevf_state_uninit(hdev); 2056 hclgevf_misc_irq_uninit(hdev); 2057 hclgevf_cmd_uninit(hdev); 2058 hclgevf_uninit_msi(hdev); 2059 hclgevf_pci_uninit(hdev); 2060 } 2061 2062 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2063 { 2064 struct pci_dev *pdev = ae_dev->pdev; 2065 int ret; 2066 2067 ret = hclgevf_alloc_hdev(ae_dev); 2068 if (ret) { 2069 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2070 return ret; 2071 } 2072 2073 ret = hclgevf_init_hdev(ae_dev->priv); 2074 if (ret) 2075 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2076 2077 return ret; 2078 } 2079 2080 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2081 { 2082 struct hclgevf_dev *hdev = ae_dev->priv; 2083 2084 hclgevf_uninit_hdev(hdev); 2085 ae_dev->priv = NULL; 2086 } 2087 2088 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2089 { 2090 struct hnae3_handle *nic = &hdev->nic; 2091 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2092 2093 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 2094 } 2095 2096 /** 2097 * hclgevf_get_channels - Get the current channels enabled and max supported. 2098 * @handle: hardware information for network interface 2099 * @ch: ethtool channels structure 2100 * 2101 * We don't support separate tx and rx queues as channels. The other count 2102 * represents how many queues are being used for control. max_combined counts 2103 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2104 * q_vectors since we support a lot more queue pairs than q_vectors. 2105 **/ 2106 static void hclgevf_get_channels(struct hnae3_handle *handle, 2107 struct ethtool_channels *ch) 2108 { 2109 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2110 2111 ch->max_combined = hclgevf_get_max_channels(hdev); 2112 ch->other_count = 0; 2113 ch->max_other = 0; 2114 ch->combined_count = hdev->num_tqps; 2115 } 2116 2117 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2118 u16 *alloc_tqps, u16 *max_rss_size) 2119 { 2120 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2121 2122 *alloc_tqps = hdev->num_tqps; 2123 *max_rss_size = hdev->rss_size_max; 2124 } 2125 2126 static int hclgevf_get_status(struct hnae3_handle *handle) 2127 { 2128 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2129 2130 return hdev->hw.mac.link; 2131 } 2132 2133 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2134 u8 *auto_neg, u32 *speed, 2135 u8 *duplex) 2136 { 2137 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2138 2139 if (speed) 2140 *speed = hdev->hw.mac.speed; 2141 if (duplex) 2142 *duplex = hdev->hw.mac.duplex; 2143 if (auto_neg) 2144 *auto_neg = AUTONEG_DISABLE; 2145 } 2146 2147 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 2148 u8 duplex) 2149 { 2150 hdev->hw.mac.speed = speed; 2151 hdev->hw.mac.duplex = duplex; 2152 } 2153 2154 static void hclgevf_get_media_type(struct hnae3_handle *handle, 2155 u8 *media_type) 2156 { 2157 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2158 if (media_type) 2159 *media_type = hdev->hw.mac.media_type; 2160 } 2161 2162 static const struct hnae3_ae_ops hclgevf_ops = { 2163 .init_ae_dev = hclgevf_init_ae_dev, 2164 .uninit_ae_dev = hclgevf_uninit_ae_dev, 2165 .init_client_instance = hclgevf_init_client_instance, 2166 .uninit_client_instance = hclgevf_uninit_client_instance, 2167 .start = hclgevf_ae_start, 2168 .stop = hclgevf_ae_stop, 2169 .map_ring_to_vector = hclgevf_map_ring_to_vector, 2170 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2171 .get_vector = hclgevf_get_vector, 2172 .put_vector = hclgevf_put_vector, 2173 .reset_queue = hclgevf_reset_tqp, 2174 .set_promisc_mode = hclgevf_set_promisc_mode, 2175 .get_mac_addr = hclgevf_get_mac_addr, 2176 .set_mac_addr = hclgevf_set_mac_addr, 2177 .add_uc_addr = hclgevf_add_uc_addr, 2178 .rm_uc_addr = hclgevf_rm_uc_addr, 2179 .add_mc_addr = hclgevf_add_mc_addr, 2180 .rm_mc_addr = hclgevf_rm_mc_addr, 2181 .get_stats = hclgevf_get_stats, 2182 .update_stats = hclgevf_update_stats, 2183 .get_strings = hclgevf_get_strings, 2184 .get_sset_count = hclgevf_get_sset_count, 2185 .get_rss_key_size = hclgevf_get_rss_key_size, 2186 .get_rss_indir_size = hclgevf_get_rss_indir_size, 2187 .get_rss = hclgevf_get_rss, 2188 .set_rss = hclgevf_set_rss, 2189 .get_rss_tuple = hclgevf_get_rss_tuple, 2190 .set_rss_tuple = hclgevf_set_rss_tuple, 2191 .get_tc_size = hclgevf_get_tc_size, 2192 .get_fw_version = hclgevf_get_fw_version, 2193 .set_vlan_filter = hclgevf_set_vlan_filter, 2194 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 2195 .reset_event = hclgevf_reset_event, 2196 .get_channels = hclgevf_get_channels, 2197 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2198 .get_status = hclgevf_get_status, 2199 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2200 .get_media_type = hclgevf_get_media_type, 2201 }; 2202 2203 static struct hnae3_ae_algo ae_algovf = { 2204 .ops = &hclgevf_ops, 2205 .pdev_id_table = ae_algovf_pci_tbl, 2206 }; 2207 2208 static int hclgevf_init(void) 2209 { 2210 pr_info("%s is initializing\n", HCLGEVF_NAME); 2211 2212 hnae3_register_ae_algo(&ae_algovf); 2213 2214 return 0; 2215 } 2216 2217 static void hclgevf_exit(void) 2218 { 2219 hnae3_unregister_ae_algo(&ae_algovf); 2220 } 2221 module_init(hclgevf_init); 2222 module_exit(hclgevf_exit); 2223 2224 MODULE_LICENSE("GPL"); 2225 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2226 MODULE_DESCRIPTION("HCLGEVF Driver"); 2227 MODULE_VERSION(HCLGEVF_MOD_VERSION); 2228