1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <net/rtnetlink.h> 6 #include "hclgevf_cmd.h" 7 #include "hclgevf_main.h" 8 #include "hclge_mbx.h" 9 #include "hnae3.h" 10 11 #define HCLGEVF_NAME "hclgevf" 12 13 static int hclgevf_init_hdev(struct hclgevf_dev *hdev); 14 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 25 26 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 27 struct hnae3_handle *handle) 28 { 29 return container_of(handle, struct hclgevf_dev, nic); 30 } 31 32 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 33 { 34 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 35 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 36 struct hclgevf_desc desc; 37 struct hclgevf_tqp *tqp; 38 int status; 39 int i; 40 41 for (i = 0; i < kinfo->num_tqps; i++) { 42 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 43 hclgevf_cmd_setup_basic_desc(&desc, 44 HCLGEVF_OPC_QUERY_RX_STATUS, 45 true); 46 47 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 48 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 49 if (status) { 50 dev_err(&hdev->pdev->dev, 51 "Query tqp stat fail, status = %d,queue = %d\n", 52 status, i); 53 return status; 54 } 55 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 56 le32_to_cpu(desc.data[1]); 57 58 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 59 true); 60 61 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 62 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 63 if (status) { 64 dev_err(&hdev->pdev->dev, 65 "Query tqp stat fail, status = %d,queue = %d\n", 66 status, i); 67 return status; 68 } 69 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 70 le32_to_cpu(desc.data[1]); 71 } 72 73 return 0; 74 } 75 76 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 77 { 78 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 79 struct hclgevf_tqp *tqp; 80 u64 *buff = data; 81 int i; 82 83 for (i = 0; i < kinfo->num_tqps; i++) { 84 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 85 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 86 } 87 for (i = 0; i < kinfo->num_tqps; i++) { 88 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 89 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 90 } 91 92 return buff; 93 } 94 95 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 96 { 97 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 98 99 return kinfo->num_tqps * 2; 100 } 101 102 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 103 { 104 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 105 u8 *buff = data; 106 int i = 0; 107 108 for (i = 0; i < kinfo->num_tqps; i++) { 109 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 110 struct hclgevf_tqp, q); 111 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 112 tqp->index); 113 buff += ETH_GSTRING_LEN; 114 } 115 116 for (i = 0; i < kinfo->num_tqps; i++) { 117 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 118 struct hclgevf_tqp, q); 119 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 120 tqp->index); 121 buff += ETH_GSTRING_LEN; 122 } 123 124 return buff; 125 } 126 127 static void hclgevf_update_stats(struct hnae3_handle *handle, 128 struct net_device_stats *net_stats) 129 { 130 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 131 int status; 132 133 status = hclgevf_tqps_update_stats(handle); 134 if (status) 135 dev_err(&hdev->pdev->dev, 136 "VF update of TQPS stats fail, status = %d.\n", 137 status); 138 } 139 140 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 141 { 142 if (strset == ETH_SS_TEST) 143 return -EOPNOTSUPP; 144 else if (strset == ETH_SS_STATS) 145 return hclgevf_tqps_get_sset_count(handle, strset); 146 147 return 0; 148 } 149 150 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 151 u8 *data) 152 { 153 u8 *p = (char *)data; 154 155 if (strset == ETH_SS_STATS) 156 p = hclgevf_tqps_get_strings(handle, p); 157 } 158 159 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 160 { 161 hclgevf_tqps_get_stats(handle, data); 162 } 163 164 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 165 { 166 u8 resp_msg; 167 int status; 168 169 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 170 true, &resp_msg, sizeof(u8)); 171 if (status) { 172 dev_err(&hdev->pdev->dev, 173 "VF request to get TC info from PF failed %d", 174 status); 175 return status; 176 } 177 178 hdev->hw_tc_map = resp_msg; 179 180 return 0; 181 } 182 183 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 184 { 185 #define HCLGEVF_TQPS_RSS_INFO_LEN 8 186 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 187 int status; 188 189 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 190 true, resp_msg, 191 HCLGEVF_TQPS_RSS_INFO_LEN); 192 if (status) { 193 dev_err(&hdev->pdev->dev, 194 "VF request to get tqp info from PF failed %d", 195 status); 196 return status; 197 } 198 199 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 200 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 201 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 202 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 203 204 return 0; 205 } 206 207 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 208 { 209 struct hclgevf_tqp *tqp; 210 int i; 211 212 /* if this is on going reset then we need to re-allocate the TPQs 213 * since we cannot assume we would get same number of TPQs back from PF 214 */ 215 if (hclgevf_dev_ongoing_reset(hdev)) 216 devm_kfree(&hdev->pdev->dev, hdev->htqp); 217 218 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 219 sizeof(struct hclgevf_tqp), GFP_KERNEL); 220 if (!hdev->htqp) 221 return -ENOMEM; 222 223 tqp = hdev->htqp; 224 225 for (i = 0; i < hdev->num_tqps; i++) { 226 tqp->dev = &hdev->pdev->dev; 227 tqp->index = i; 228 229 tqp->q.ae_algo = &ae_algovf; 230 tqp->q.buf_size = hdev->rx_buf_len; 231 tqp->q.desc_num = hdev->num_desc; 232 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 233 i * HCLGEVF_TQP_REG_SIZE; 234 235 tqp++; 236 } 237 238 return 0; 239 } 240 241 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 242 { 243 struct hnae3_handle *nic = &hdev->nic; 244 struct hnae3_knic_private_info *kinfo; 245 u16 new_tqps = hdev->num_tqps; 246 int i; 247 248 kinfo = &nic->kinfo; 249 kinfo->num_tc = 0; 250 kinfo->num_desc = hdev->num_desc; 251 kinfo->rx_buf_len = hdev->rx_buf_len; 252 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 253 if (hdev->hw_tc_map & BIT(i)) 254 kinfo->num_tc++; 255 256 kinfo->rss_size 257 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 258 new_tqps = kinfo->rss_size * kinfo->num_tc; 259 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 260 261 /* if this is on going reset then we need to re-allocate the hnae queues 262 * as well since number of TPQs from PF might have changed. 263 */ 264 if (hclgevf_dev_ongoing_reset(hdev)) 265 devm_kfree(&hdev->pdev->dev, kinfo->tqp); 266 267 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 268 sizeof(struct hnae3_queue *), GFP_KERNEL); 269 if (!kinfo->tqp) 270 return -ENOMEM; 271 272 for (i = 0; i < kinfo->num_tqps; i++) { 273 hdev->htqp[i].q.handle = &hdev->nic; 274 hdev->htqp[i].q.tqp_index = i; 275 kinfo->tqp[i] = &hdev->htqp[i].q; 276 } 277 278 return 0; 279 } 280 281 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 282 { 283 int status; 284 u8 resp_msg; 285 286 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 287 0, false, &resp_msg, sizeof(u8)); 288 if (status) 289 dev_err(&hdev->pdev->dev, 290 "VF failed to fetch link status(%d) from PF", status); 291 } 292 293 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 294 { 295 struct hnae3_handle *handle = &hdev->nic; 296 struct hnae3_client *client; 297 298 client = handle->client; 299 300 link_state = 301 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 302 303 if (link_state != hdev->hw.mac.link) { 304 client->ops->link_status_change(handle, !!link_state); 305 hdev->hw.mac.link = link_state; 306 } 307 } 308 309 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 310 { 311 struct hnae3_handle *nic = &hdev->nic; 312 int ret; 313 314 nic->ae_algo = &ae_algovf; 315 nic->pdev = hdev->pdev; 316 nic->numa_node_mask = hdev->numa_node_mask; 317 nic->flags |= HNAE3_SUPPORT_VF; 318 319 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 320 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 321 hdev->ae_dev->dev_type); 322 return -EINVAL; 323 } 324 325 ret = hclgevf_knic_setup(hdev); 326 if (ret) 327 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 328 ret); 329 return ret; 330 } 331 332 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 333 { 334 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 335 dev_warn(&hdev->pdev->dev, 336 "vector(vector_id %d) has been freed.\n", vector_id); 337 return; 338 } 339 340 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 341 hdev->num_msi_left += 1; 342 hdev->num_msi_used -= 1; 343 } 344 345 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 346 struct hnae3_vector_info *vector_info) 347 { 348 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 349 struct hnae3_vector_info *vector = vector_info; 350 int alloc = 0; 351 int i, j; 352 353 vector_num = min(hdev->num_msi_left, vector_num); 354 355 for (j = 0; j < vector_num; j++) { 356 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 357 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 358 vector->vector = pci_irq_vector(hdev->pdev, i); 359 vector->io_addr = hdev->hw.io_base + 360 HCLGEVF_VECTOR_REG_BASE + 361 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 362 hdev->vector_status[i] = 0; 363 hdev->vector_irq[i] = vector->vector; 364 365 vector++; 366 alloc++; 367 368 break; 369 } 370 } 371 } 372 hdev->num_msi_left -= alloc; 373 hdev->num_msi_used += alloc; 374 375 return alloc; 376 } 377 378 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 379 { 380 int i; 381 382 for (i = 0; i < hdev->num_msi; i++) 383 if (vector == hdev->vector_irq[i]) 384 return i; 385 386 return -EINVAL; 387 } 388 389 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 390 const u8 hfunc, const u8 *key) 391 { 392 struct hclgevf_rss_config_cmd *req; 393 struct hclgevf_desc desc; 394 int key_offset; 395 int key_size; 396 int ret; 397 398 req = (struct hclgevf_rss_config_cmd *)desc.data; 399 400 for (key_offset = 0; key_offset < 3; key_offset++) { 401 hclgevf_cmd_setup_basic_desc(&desc, 402 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 403 false); 404 405 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 406 req->hash_config |= 407 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 408 409 if (key_offset == 2) 410 key_size = 411 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 412 else 413 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 414 415 memcpy(req->hash_key, 416 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 417 418 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 419 if (ret) { 420 dev_err(&hdev->pdev->dev, 421 "Configure RSS config fail, status = %d\n", 422 ret); 423 return ret; 424 } 425 } 426 427 return 0; 428 } 429 430 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 431 { 432 return HCLGEVF_RSS_KEY_SIZE; 433 } 434 435 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 436 { 437 return HCLGEVF_RSS_IND_TBL_SIZE; 438 } 439 440 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 441 { 442 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 443 struct hclgevf_rss_indirection_table_cmd *req; 444 struct hclgevf_desc desc; 445 int status; 446 int i, j; 447 448 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 449 450 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 451 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 452 false); 453 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 454 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 455 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 456 req->rss_result[j] = 457 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 458 459 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 460 if (status) { 461 dev_err(&hdev->pdev->dev, 462 "VF failed(=%d) to set RSS indirection table\n", 463 status); 464 return status; 465 } 466 } 467 468 return 0; 469 } 470 471 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 472 { 473 struct hclgevf_rss_tc_mode_cmd *req; 474 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 475 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 476 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 477 struct hclgevf_desc desc; 478 u16 roundup_size; 479 int status; 480 int i; 481 482 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 483 484 roundup_size = roundup_pow_of_two(rss_size); 485 roundup_size = ilog2(roundup_size); 486 487 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 488 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 489 tc_size[i] = roundup_size; 490 tc_offset[i] = rss_size * i; 491 } 492 493 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 494 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 495 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 496 (tc_valid[i] & 0x1)); 497 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 498 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 499 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 500 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 501 } 502 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 503 if (status) 504 dev_err(&hdev->pdev->dev, 505 "VF failed(=%d) to set rss tc mode\n", status); 506 507 return status; 508 } 509 510 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 511 u8 *hfunc) 512 { 513 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 514 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 515 int i; 516 517 if (handle->pdev->revision >= 0x21) { 518 /* Get hash algorithm */ 519 if (hfunc) { 520 switch (rss_cfg->hash_algo) { 521 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 522 *hfunc = ETH_RSS_HASH_TOP; 523 break; 524 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 525 *hfunc = ETH_RSS_HASH_XOR; 526 break; 527 default: 528 *hfunc = ETH_RSS_HASH_UNKNOWN; 529 break; 530 } 531 } 532 533 /* Get the RSS Key required by the user */ 534 if (key) 535 memcpy(key, rss_cfg->rss_hash_key, 536 HCLGEVF_RSS_KEY_SIZE); 537 } 538 539 if (indir) 540 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 541 indir[i] = rss_cfg->rss_indirection_tbl[i]; 542 543 return 0; 544 } 545 546 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 547 const u8 *key, const u8 hfunc) 548 { 549 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 550 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 551 int ret, i; 552 553 if (handle->pdev->revision >= 0x21) { 554 /* Set the RSS Hash Key if specififed by the user */ 555 if (key) { 556 switch (hfunc) { 557 case ETH_RSS_HASH_TOP: 558 rss_cfg->hash_algo = 559 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 560 break; 561 case ETH_RSS_HASH_XOR: 562 rss_cfg->hash_algo = 563 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 564 break; 565 case ETH_RSS_HASH_NO_CHANGE: 566 break; 567 default: 568 return -EINVAL; 569 } 570 571 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 572 key); 573 if (ret) 574 return ret; 575 576 /* Update the shadow RSS key with user specified qids */ 577 memcpy(rss_cfg->rss_hash_key, key, 578 HCLGEVF_RSS_KEY_SIZE); 579 } 580 } 581 582 /* update the shadow RSS table with user specified qids */ 583 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 584 rss_cfg->rss_indirection_tbl[i] = indir[i]; 585 586 /* update the hardware */ 587 return hclgevf_set_rss_indir_table(hdev); 588 } 589 590 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 591 { 592 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 593 594 if (nfc->data & RXH_L4_B_2_3) 595 hash_sets |= HCLGEVF_D_PORT_BIT; 596 else 597 hash_sets &= ~HCLGEVF_D_PORT_BIT; 598 599 if (nfc->data & RXH_IP_SRC) 600 hash_sets |= HCLGEVF_S_IP_BIT; 601 else 602 hash_sets &= ~HCLGEVF_S_IP_BIT; 603 604 if (nfc->data & RXH_IP_DST) 605 hash_sets |= HCLGEVF_D_IP_BIT; 606 else 607 hash_sets &= ~HCLGEVF_D_IP_BIT; 608 609 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 610 hash_sets |= HCLGEVF_V_TAG_BIT; 611 612 return hash_sets; 613 } 614 615 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 616 struct ethtool_rxnfc *nfc) 617 { 618 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 619 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 620 struct hclgevf_rss_input_tuple_cmd *req; 621 struct hclgevf_desc desc; 622 u8 tuple_sets; 623 int ret; 624 625 if (handle->pdev->revision == 0x20) 626 return -EOPNOTSUPP; 627 628 if (nfc->data & 629 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 630 return -EINVAL; 631 632 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 633 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 634 635 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 636 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 637 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 638 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 639 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 640 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 641 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 642 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 643 644 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 645 switch (nfc->flow_type) { 646 case TCP_V4_FLOW: 647 req->ipv4_tcp_en = tuple_sets; 648 break; 649 case TCP_V6_FLOW: 650 req->ipv6_tcp_en = tuple_sets; 651 break; 652 case UDP_V4_FLOW: 653 req->ipv4_udp_en = tuple_sets; 654 break; 655 case UDP_V6_FLOW: 656 req->ipv6_udp_en = tuple_sets; 657 break; 658 case SCTP_V4_FLOW: 659 req->ipv4_sctp_en = tuple_sets; 660 break; 661 case SCTP_V6_FLOW: 662 if ((nfc->data & RXH_L4_B_0_1) || 663 (nfc->data & RXH_L4_B_2_3)) 664 return -EINVAL; 665 666 req->ipv6_sctp_en = tuple_sets; 667 break; 668 case IPV4_FLOW: 669 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 670 break; 671 case IPV6_FLOW: 672 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 673 break; 674 default: 675 return -EINVAL; 676 } 677 678 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 679 if (ret) { 680 dev_err(&hdev->pdev->dev, 681 "Set rss tuple fail, status = %d\n", ret); 682 return ret; 683 } 684 685 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 686 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 687 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 688 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 689 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 690 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 691 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 692 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 693 return 0; 694 } 695 696 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 697 struct ethtool_rxnfc *nfc) 698 { 699 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 700 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 701 u8 tuple_sets; 702 703 if (handle->pdev->revision == 0x20) 704 return -EOPNOTSUPP; 705 706 nfc->data = 0; 707 708 switch (nfc->flow_type) { 709 case TCP_V4_FLOW: 710 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 711 break; 712 case UDP_V4_FLOW: 713 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 714 break; 715 case TCP_V6_FLOW: 716 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 717 break; 718 case UDP_V6_FLOW: 719 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 720 break; 721 case SCTP_V4_FLOW: 722 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 723 break; 724 case SCTP_V6_FLOW: 725 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 726 break; 727 case IPV4_FLOW: 728 case IPV6_FLOW: 729 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 730 break; 731 default: 732 return -EINVAL; 733 } 734 735 if (!tuple_sets) 736 return 0; 737 738 if (tuple_sets & HCLGEVF_D_PORT_BIT) 739 nfc->data |= RXH_L4_B_2_3; 740 if (tuple_sets & HCLGEVF_S_PORT_BIT) 741 nfc->data |= RXH_L4_B_0_1; 742 if (tuple_sets & HCLGEVF_D_IP_BIT) 743 nfc->data |= RXH_IP_DST; 744 if (tuple_sets & HCLGEVF_S_IP_BIT) 745 nfc->data |= RXH_IP_SRC; 746 747 return 0; 748 } 749 750 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 751 struct hclgevf_rss_cfg *rss_cfg) 752 { 753 struct hclgevf_rss_input_tuple_cmd *req; 754 struct hclgevf_desc desc; 755 int ret; 756 757 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 758 759 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 760 761 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 762 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 763 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 764 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 765 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 766 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 767 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 768 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 769 770 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 771 if (ret) 772 dev_err(&hdev->pdev->dev, 773 "Configure rss input fail, status = %d\n", ret); 774 return ret; 775 } 776 777 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 778 { 779 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 780 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 781 782 return rss_cfg->rss_size; 783 } 784 785 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 786 int vector_id, 787 struct hnae3_ring_chain_node *ring_chain) 788 { 789 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 790 struct hnae3_ring_chain_node *node; 791 struct hclge_mbx_vf_to_pf_cmd *req; 792 struct hclgevf_desc desc; 793 int i = 0; 794 int status; 795 u8 type; 796 797 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 798 799 for (node = ring_chain; node; node = node->next) { 800 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 801 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 802 803 if (i == 0) { 804 hclgevf_cmd_setup_basic_desc(&desc, 805 HCLGEVF_OPC_MBX_VF_TO_PF, 806 false); 807 type = en ? 808 HCLGE_MBX_MAP_RING_TO_VECTOR : 809 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 810 req->msg[0] = type; 811 req->msg[1] = vector_id; 812 } 813 814 req->msg[idx_offset] = 815 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 816 req->msg[idx_offset + 1] = node->tqp_index; 817 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 818 HNAE3_RING_GL_IDX_M, 819 HNAE3_RING_GL_IDX_S); 820 821 i++; 822 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 823 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 824 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 825 !node->next) { 826 req->msg[2] = i; 827 828 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 829 if (status) { 830 dev_err(&hdev->pdev->dev, 831 "Map TQP fail, status is %d.\n", 832 status); 833 return status; 834 } 835 i = 0; 836 hclgevf_cmd_setup_basic_desc(&desc, 837 HCLGEVF_OPC_MBX_VF_TO_PF, 838 false); 839 req->msg[0] = type; 840 req->msg[1] = vector_id; 841 } 842 } 843 844 return 0; 845 } 846 847 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 848 struct hnae3_ring_chain_node *ring_chain) 849 { 850 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 851 int vector_id; 852 853 vector_id = hclgevf_get_vector_index(hdev, vector); 854 if (vector_id < 0) { 855 dev_err(&handle->pdev->dev, 856 "Get vector index fail. ret =%d\n", vector_id); 857 return vector_id; 858 } 859 860 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 861 } 862 863 static int hclgevf_unmap_ring_from_vector( 864 struct hnae3_handle *handle, 865 int vector, 866 struct hnae3_ring_chain_node *ring_chain) 867 { 868 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 869 int ret, vector_id; 870 871 vector_id = hclgevf_get_vector_index(hdev, vector); 872 if (vector_id < 0) { 873 dev_err(&handle->pdev->dev, 874 "Get vector index fail. ret =%d\n", vector_id); 875 return vector_id; 876 } 877 878 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 879 if (ret) 880 dev_err(&handle->pdev->dev, 881 "Unmap ring from vector fail. vector=%d, ret =%d\n", 882 vector_id, 883 ret); 884 885 return ret; 886 } 887 888 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 889 { 890 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 891 int vector_id; 892 893 vector_id = hclgevf_get_vector_index(hdev, vector); 894 if (vector_id < 0) { 895 dev_err(&handle->pdev->dev, 896 "hclgevf_put_vector get vector index fail. ret =%d\n", 897 vector_id); 898 return vector_id; 899 } 900 901 hclgevf_free_vector(hdev, vector_id); 902 903 return 0; 904 } 905 906 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 907 bool en_uc_pmc, bool en_mc_pmc) 908 { 909 struct hclge_mbx_vf_to_pf_cmd *req; 910 struct hclgevf_desc desc; 911 int status; 912 913 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 914 915 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 916 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 917 req->msg[1] = en_uc_pmc ? 1 : 0; 918 req->msg[2] = en_mc_pmc ? 1 : 0; 919 920 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 921 if (status) 922 dev_err(&hdev->pdev->dev, 923 "Set promisc mode fail, status is %d.\n", status); 924 925 return status; 926 } 927 928 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, 929 bool en_uc_pmc, bool en_mc_pmc) 930 { 931 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 932 933 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); 934 } 935 936 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 937 int stream_id, bool enable) 938 { 939 struct hclgevf_cfg_com_tqp_queue_cmd *req; 940 struct hclgevf_desc desc; 941 int status; 942 943 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 944 945 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 946 false); 947 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 948 req->stream_id = cpu_to_le16(stream_id); 949 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 950 951 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 952 if (status) 953 dev_err(&hdev->pdev->dev, 954 "TQP enable fail, status =%d.\n", status); 955 956 return status; 957 } 958 959 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 960 { 961 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 962 struct hclgevf_tqp *tqp; 963 int i; 964 965 for (i = 0; i < kinfo->num_tqps; i++) { 966 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 967 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 968 } 969 } 970 971 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 972 { 973 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 974 975 ether_addr_copy(p, hdev->hw.mac.mac_addr); 976 } 977 978 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 979 bool is_first) 980 { 981 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 982 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 983 u8 *new_mac_addr = (u8 *)p; 984 u8 msg_data[ETH_ALEN * 2]; 985 u16 subcode; 986 int status; 987 988 ether_addr_copy(msg_data, new_mac_addr); 989 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 990 991 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 992 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 993 994 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 995 subcode, msg_data, ETH_ALEN * 2, 996 true, NULL, 0); 997 if (!status) 998 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 999 1000 return status; 1001 } 1002 1003 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1004 const unsigned char *addr) 1005 { 1006 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1007 1008 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1009 HCLGE_MBX_MAC_VLAN_UC_ADD, 1010 addr, ETH_ALEN, false, NULL, 0); 1011 } 1012 1013 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1014 const unsigned char *addr) 1015 { 1016 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1017 1018 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1019 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1020 addr, ETH_ALEN, false, NULL, 0); 1021 } 1022 1023 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1024 const unsigned char *addr) 1025 { 1026 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1027 1028 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1029 HCLGE_MBX_MAC_VLAN_MC_ADD, 1030 addr, ETH_ALEN, false, NULL, 0); 1031 } 1032 1033 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1034 const unsigned char *addr) 1035 { 1036 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1037 1038 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1039 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1040 addr, ETH_ALEN, false, NULL, 0); 1041 } 1042 1043 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1044 __be16 proto, u16 vlan_id, 1045 bool is_kill) 1046 { 1047 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1048 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1049 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1050 1051 if (vlan_id > 4095) 1052 return -EINVAL; 1053 1054 if (proto != htons(ETH_P_8021Q)) 1055 return -EPROTONOSUPPORT; 1056 1057 msg_data[0] = is_kill; 1058 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1059 memcpy(&msg_data[3], &proto, sizeof(proto)); 1060 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1061 HCLGE_MBX_VLAN_FILTER, msg_data, 1062 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 1063 } 1064 1065 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1066 { 1067 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1068 u8 msg_data; 1069 1070 msg_data = enable ? 1 : 0; 1071 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1072 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1073 1, false, NULL, 0); 1074 } 1075 1076 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1077 { 1078 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1079 u8 msg_data[2]; 1080 int ret; 1081 1082 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 1083 1084 /* disable vf queue before send queue reset msg to PF */ 1085 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1086 if (ret) 1087 return ret; 1088 1089 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1090 2, true, NULL, 0); 1091 } 1092 1093 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1094 enum hnae3_reset_notify_type type) 1095 { 1096 struct hnae3_client *client = hdev->nic_client; 1097 struct hnae3_handle *handle = &hdev->nic; 1098 1099 if (!client->ops->reset_notify) 1100 return -EOPNOTSUPP; 1101 1102 return client->ops->reset_notify(handle, type); 1103 } 1104 1105 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1106 { 1107 #define HCLGEVF_RESET_WAIT_MS 500 1108 #define HCLGEVF_RESET_WAIT_CNT 20 1109 u32 val, cnt = 0; 1110 1111 /* wait to check the hardware reset completion status */ 1112 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 1113 while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) && 1114 (cnt < HCLGEVF_RESET_WAIT_CNT)) { 1115 msleep(HCLGEVF_RESET_WAIT_MS); 1116 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 1117 cnt++; 1118 } 1119 1120 /* hardware completion status should be available by this time */ 1121 if (cnt >= HCLGEVF_RESET_WAIT_CNT) { 1122 dev_warn(&hdev->pdev->dev, 1123 "could'nt get reset done status from h/w, timeout!\n"); 1124 return -EBUSY; 1125 } 1126 1127 /* we will wait a bit more to let reset of the stack to complete. This 1128 * might happen in case reset assertion was made by PF. Yes, this also 1129 * means we might end up waiting bit more even for VF reset. 1130 */ 1131 msleep(5000); 1132 1133 return 0; 1134 } 1135 1136 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1137 { 1138 int ret; 1139 1140 /* uninitialize the nic client */ 1141 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1142 1143 /* re-initialize the hclge device */ 1144 ret = hclgevf_init_hdev(hdev); 1145 if (ret) { 1146 dev_err(&hdev->pdev->dev, 1147 "hclge device re-init failed, VF is disabled!\n"); 1148 return ret; 1149 } 1150 1151 /* bring up the nic client again */ 1152 hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1153 1154 return 0; 1155 } 1156 1157 static int hclgevf_reset(struct hclgevf_dev *hdev) 1158 { 1159 int ret; 1160 1161 hdev->reset_count++; 1162 rtnl_lock(); 1163 1164 /* bring down the nic to stop any ongoing TX/RX */ 1165 hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1166 1167 rtnl_unlock(); 1168 1169 /* check if VF could successfully fetch the hardware reset completion 1170 * status from the hardware 1171 */ 1172 ret = hclgevf_reset_wait(hdev); 1173 if (ret) { 1174 /* can't do much in this situation, will disable VF */ 1175 dev_err(&hdev->pdev->dev, 1176 "VF failed(=%d) to fetch H/W reset completion status\n", 1177 ret); 1178 1179 dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); 1180 rtnl_lock(); 1181 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1182 1183 rtnl_unlock(); 1184 return ret; 1185 } 1186 1187 rtnl_lock(); 1188 1189 /* now, re-initialize the nic client and ae device*/ 1190 ret = hclgevf_reset_stack(hdev); 1191 if (ret) 1192 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1193 1194 /* bring up the nic to enable TX/RX again */ 1195 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1196 1197 rtnl_unlock(); 1198 1199 return ret; 1200 } 1201 1202 static int hclgevf_do_reset(struct hclgevf_dev *hdev) 1203 { 1204 int status; 1205 u8 respmsg; 1206 1207 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1208 0, false, &respmsg, sizeof(u8)); 1209 if (status) 1210 dev_err(&hdev->pdev->dev, 1211 "VF reset request to PF failed(=%d)\n", status); 1212 1213 return status; 1214 } 1215 1216 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1217 unsigned long *addr) 1218 { 1219 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1220 1221 if (test_bit(HNAE3_VF_RESET, addr)) { 1222 rst_level = HNAE3_VF_RESET; 1223 clear_bit(HNAE3_VF_RESET, addr); 1224 } 1225 1226 return rst_level; 1227 } 1228 1229 static void hclgevf_reset_event(struct pci_dev *pdev, 1230 struct hnae3_handle *handle) 1231 { 1232 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1233 1234 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1235 1236 if (!hdev->default_reset_request) 1237 handle->reset_level = 1238 hclgevf_get_reset_level(hdev, 1239 &hdev->default_reset_request); 1240 else 1241 handle->reset_level = HNAE3_VF_RESET; 1242 1243 /* reset of this VF requested */ 1244 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1245 hclgevf_reset_task_schedule(hdev); 1246 1247 handle->last_reset_time = jiffies; 1248 } 1249 1250 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1251 enum hnae3_reset_type rst_type) 1252 { 1253 struct hclgevf_dev *hdev = ae_dev->priv; 1254 1255 set_bit(rst_type, &hdev->default_reset_request); 1256 } 1257 1258 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1259 { 1260 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1261 1262 return hdev->fw_version; 1263 } 1264 1265 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1266 { 1267 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1268 1269 vector->vector_irq = pci_irq_vector(hdev->pdev, 1270 HCLGEVF_MISC_VECTOR_NUM); 1271 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1272 /* vector status always valid for Vector 0 */ 1273 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1274 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1275 1276 hdev->num_msi_left -= 1; 1277 hdev->num_msi_used += 1; 1278 } 1279 1280 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1281 { 1282 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1283 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1284 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1285 schedule_work(&hdev->rst_service_task); 1286 } 1287 } 1288 1289 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1290 { 1291 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1292 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1293 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1294 schedule_work(&hdev->mbx_service_task); 1295 } 1296 } 1297 1298 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1299 { 1300 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1301 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1302 schedule_work(&hdev->service_task); 1303 } 1304 1305 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1306 { 1307 /* if we have any pending mailbox event then schedule the mbx task */ 1308 if (hdev->mbx_event_pending) 1309 hclgevf_mbx_task_schedule(hdev); 1310 1311 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1312 hclgevf_reset_task_schedule(hdev); 1313 } 1314 1315 static void hclgevf_service_timer(struct timer_list *t) 1316 { 1317 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1318 1319 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1320 1321 hclgevf_task_schedule(hdev); 1322 } 1323 1324 static void hclgevf_reset_service_task(struct work_struct *work) 1325 { 1326 struct hclgevf_dev *hdev = 1327 container_of(work, struct hclgevf_dev, rst_service_task); 1328 int ret; 1329 1330 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1331 return; 1332 1333 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1334 1335 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1336 &hdev->reset_state)) { 1337 /* PF has initmated that it is about to reset the hardware. 1338 * We now have to poll & check if harware has actually completed 1339 * the reset sequence. On hardware reset completion, VF needs to 1340 * reset the client and ae device. 1341 */ 1342 hdev->reset_attempts = 0; 1343 1344 ret = hclgevf_reset(hdev); 1345 if (ret) 1346 dev_err(&hdev->pdev->dev, "VF stack reset failed.\n"); 1347 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1348 &hdev->reset_state)) { 1349 /* we could be here when either of below happens: 1350 * 1. reset was initiated due to watchdog timeout due to 1351 * a. IMP was earlier reset and our TX got choked down and 1352 * which resulted in watchdog reacting and inducing VF 1353 * reset. This also means our cmdq would be unreliable. 1354 * b. problem in TX due to other lower layer(example link 1355 * layer not functioning properly etc.) 1356 * 2. VF reset might have been initiated due to some config 1357 * change. 1358 * 1359 * NOTE: Theres no clear way to detect above cases than to react 1360 * to the response of PF for this reset request. PF will ack the 1361 * 1b and 2. cases but we will not get any intimation about 1a 1362 * from PF as cmdq would be in unreliable state i.e. mailbox 1363 * communication between PF and VF would be broken. 1364 */ 1365 1366 /* if we are never geting into pending state it means either: 1367 * 1. PF is not receiving our request which could be due to IMP 1368 * reset 1369 * 2. PF is screwed 1370 * We cannot do much for 2. but to check first we can try reset 1371 * our PCIe + stack and see if it alleviates the problem. 1372 */ 1373 if (hdev->reset_attempts > 3) { 1374 /* prepare for full reset of stack + pcie interface */ 1375 hdev->nic.reset_level = HNAE3_VF_FULL_RESET; 1376 1377 /* "defer" schedule the reset task again */ 1378 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1379 } else { 1380 hdev->reset_attempts++; 1381 1382 /* request PF for resetting this VF via mailbox */ 1383 ret = hclgevf_do_reset(hdev); 1384 if (ret) 1385 dev_warn(&hdev->pdev->dev, 1386 "VF rst fail, stack will call\n"); 1387 } 1388 } 1389 1390 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1391 } 1392 1393 static void hclgevf_mailbox_service_task(struct work_struct *work) 1394 { 1395 struct hclgevf_dev *hdev; 1396 1397 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1398 1399 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1400 return; 1401 1402 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1403 1404 hclgevf_mbx_async_handler(hdev); 1405 1406 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1407 } 1408 1409 static void hclgevf_service_task(struct work_struct *work) 1410 { 1411 struct hclgevf_dev *hdev; 1412 1413 hdev = container_of(work, struct hclgevf_dev, service_task); 1414 1415 /* request the link status from the PF. PF would be able to tell VF 1416 * about such updates in future so we might remove this later 1417 */ 1418 hclgevf_request_link_info(hdev); 1419 1420 hclgevf_deferred_task_schedule(hdev); 1421 1422 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1423 } 1424 1425 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1426 { 1427 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1428 } 1429 1430 static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) 1431 { 1432 u32 cmdq_src_reg; 1433 1434 /* fetch the events from their corresponding regs */ 1435 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1436 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1437 1438 /* check for vector0 mailbox(=CMDQ RX) event source */ 1439 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1440 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1441 *clearval = cmdq_src_reg; 1442 return true; 1443 } 1444 1445 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1446 1447 return false; 1448 } 1449 1450 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1451 { 1452 writel(en ? 1 : 0, vector->addr); 1453 } 1454 1455 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1456 { 1457 struct hclgevf_dev *hdev = data; 1458 u32 clearval; 1459 1460 hclgevf_enable_vector(&hdev->misc_vector, false); 1461 if (!hclgevf_check_event_cause(hdev, &clearval)) 1462 goto skip_sched; 1463 1464 hclgevf_mbx_handler(hdev); 1465 1466 hclgevf_clear_event_cause(hdev, clearval); 1467 1468 skip_sched: 1469 hclgevf_enable_vector(&hdev->misc_vector, true); 1470 1471 return IRQ_HANDLED; 1472 } 1473 1474 static int hclgevf_configure(struct hclgevf_dev *hdev) 1475 { 1476 int ret; 1477 1478 hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE; 1479 1480 /* get queue configuration from PF */ 1481 ret = hclgevf_get_queue_info(hdev); 1482 if (ret) 1483 return ret; 1484 /* get tc configuration from PF */ 1485 return hclgevf_get_tc_info(hdev); 1486 } 1487 1488 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1489 { 1490 struct pci_dev *pdev = ae_dev->pdev; 1491 struct hclgevf_dev *hdev = ae_dev->priv; 1492 1493 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1494 if (!hdev) 1495 return -ENOMEM; 1496 1497 hdev->pdev = pdev; 1498 hdev->ae_dev = ae_dev; 1499 ae_dev->priv = hdev; 1500 1501 return 0; 1502 } 1503 1504 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1505 { 1506 struct hnae3_handle *roce = &hdev->roce; 1507 struct hnae3_handle *nic = &hdev->nic; 1508 1509 roce->rinfo.num_vectors = hdev->num_roce_msix; 1510 1511 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1512 hdev->num_msi_left == 0) 1513 return -EINVAL; 1514 1515 roce->rinfo.base_vector = hdev->roce_base_vector; 1516 1517 roce->rinfo.netdev = nic->kinfo.netdev; 1518 roce->rinfo.roce_io_base = hdev->hw.io_base; 1519 1520 roce->pdev = nic->pdev; 1521 roce->ae_algo = nic->ae_algo; 1522 roce->numa_node_mask = nic->numa_node_mask; 1523 1524 return 0; 1525 } 1526 1527 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1528 { 1529 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1530 int i, ret; 1531 1532 rss_cfg->rss_size = hdev->rss_size_max; 1533 1534 if (hdev->pdev->revision >= 0x21) { 1535 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 1536 netdev_rss_key_fill(rss_cfg->rss_hash_key, 1537 HCLGEVF_RSS_KEY_SIZE); 1538 1539 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 1540 rss_cfg->rss_hash_key); 1541 if (ret) 1542 return ret; 1543 1544 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 1545 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1546 rss_cfg->rss_tuple_sets.ipv4_udp_en = 1547 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1548 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 1549 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1550 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 1551 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1552 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 1553 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1554 rss_cfg->rss_tuple_sets.ipv6_udp_en = 1555 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1556 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 1557 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1558 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 1559 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1560 1561 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 1562 if (ret) 1563 return ret; 1564 1565 } 1566 1567 /* Initialize RSS indirect table for each vport */ 1568 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1569 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1570 1571 ret = hclgevf_set_rss_indir_table(hdev); 1572 if (ret) 1573 return ret; 1574 1575 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1576 } 1577 1578 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1579 { 1580 /* other vlan config(like, VLAN TX/RX offload) would also be added 1581 * here later 1582 */ 1583 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1584 false); 1585 } 1586 1587 static int hclgevf_ae_start(struct hnae3_handle *handle) 1588 { 1589 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1590 1591 /* reset tqp stats */ 1592 hclgevf_reset_tqp_stats(handle); 1593 1594 hclgevf_request_link_info(hdev); 1595 1596 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1597 mod_timer(&hdev->service_timer, jiffies + HZ); 1598 1599 return 0; 1600 } 1601 1602 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1603 { 1604 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1605 1606 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1607 1608 /* reset tqp stats */ 1609 hclgevf_reset_tqp_stats(handle); 1610 del_timer_sync(&hdev->service_timer); 1611 cancel_work_sync(&hdev->service_task); 1612 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1613 hclgevf_update_link_status(hdev, 0); 1614 } 1615 1616 static void hclgevf_state_init(struct hclgevf_dev *hdev) 1617 { 1618 /* if this is on going reset then skip this initialization */ 1619 if (hclgevf_dev_ongoing_reset(hdev)) 1620 return; 1621 1622 /* setup tasks for the MBX */ 1623 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1624 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1625 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1626 1627 /* setup tasks for service timer */ 1628 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1629 1630 INIT_WORK(&hdev->service_task, hclgevf_service_task); 1631 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1632 1633 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 1634 1635 mutex_init(&hdev->mbx_resp.mbx_mutex); 1636 1637 /* bring the device down */ 1638 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1639 } 1640 1641 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1642 { 1643 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1644 1645 if (hdev->service_timer.function) 1646 del_timer_sync(&hdev->service_timer); 1647 if (hdev->service_task.func) 1648 cancel_work_sync(&hdev->service_task); 1649 if (hdev->mbx_service_task.func) 1650 cancel_work_sync(&hdev->mbx_service_task); 1651 if (hdev->rst_service_task.func) 1652 cancel_work_sync(&hdev->rst_service_task); 1653 1654 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1655 } 1656 1657 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1658 { 1659 struct pci_dev *pdev = hdev->pdev; 1660 int vectors; 1661 int i; 1662 1663 /* if this is on going reset then skip this initialization */ 1664 if (hclgevf_dev_ongoing_reset(hdev)) 1665 return 0; 1666 1667 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 1668 vectors = pci_alloc_irq_vectors(pdev, 1669 hdev->roce_base_msix_offset + 1, 1670 hdev->num_msi, 1671 PCI_IRQ_MSIX); 1672 else 1673 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1674 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1675 1676 if (vectors < 0) { 1677 dev_err(&pdev->dev, 1678 "failed(%d) to allocate MSI/MSI-X vectors\n", 1679 vectors); 1680 return vectors; 1681 } 1682 if (vectors < hdev->num_msi) 1683 dev_warn(&hdev->pdev->dev, 1684 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1685 hdev->num_msi, vectors); 1686 1687 hdev->num_msi = vectors; 1688 hdev->num_msi_left = vectors; 1689 hdev->base_msi_vector = pdev->irq; 1690 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 1691 1692 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1693 sizeof(u16), GFP_KERNEL); 1694 if (!hdev->vector_status) { 1695 pci_free_irq_vectors(pdev); 1696 return -ENOMEM; 1697 } 1698 1699 for (i = 0; i < hdev->num_msi; i++) 1700 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 1701 1702 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 1703 sizeof(int), GFP_KERNEL); 1704 if (!hdev->vector_irq) { 1705 pci_free_irq_vectors(pdev); 1706 return -ENOMEM; 1707 } 1708 1709 return 0; 1710 } 1711 1712 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 1713 { 1714 struct pci_dev *pdev = hdev->pdev; 1715 1716 pci_free_irq_vectors(pdev); 1717 } 1718 1719 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 1720 { 1721 int ret = 0; 1722 1723 /* if this is on going reset then skip this initialization */ 1724 if (hclgevf_dev_ongoing_reset(hdev)) 1725 return 0; 1726 1727 hclgevf_get_misc_vector(hdev); 1728 1729 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 1730 0, "hclgevf_cmd", hdev); 1731 if (ret) { 1732 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 1733 hdev->misc_vector.vector_irq); 1734 return ret; 1735 } 1736 1737 hclgevf_clear_event_cause(hdev, 0); 1738 1739 /* enable misc. vector(vector 0) */ 1740 hclgevf_enable_vector(&hdev->misc_vector, true); 1741 1742 return ret; 1743 } 1744 1745 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 1746 { 1747 /* disable misc vector(vector 0) */ 1748 hclgevf_enable_vector(&hdev->misc_vector, false); 1749 synchronize_irq(hdev->misc_vector.vector_irq); 1750 free_irq(hdev->misc_vector.vector_irq, hdev); 1751 hclgevf_free_vector(hdev, 0); 1752 } 1753 1754 static int hclgevf_init_client_instance(struct hnae3_client *client, 1755 struct hnae3_ae_dev *ae_dev) 1756 { 1757 struct hclgevf_dev *hdev = ae_dev->priv; 1758 int ret; 1759 1760 switch (client->type) { 1761 case HNAE3_CLIENT_KNIC: 1762 hdev->nic_client = client; 1763 hdev->nic.client = client; 1764 1765 ret = client->ops->init_instance(&hdev->nic); 1766 if (ret) 1767 goto clear_nic; 1768 1769 hnae3_set_client_init_flag(client, ae_dev, 1); 1770 1771 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 1772 struct hnae3_client *rc = hdev->roce_client; 1773 1774 ret = hclgevf_init_roce_base_info(hdev); 1775 if (ret) 1776 goto clear_roce; 1777 ret = rc->ops->init_instance(&hdev->roce); 1778 if (ret) 1779 goto clear_roce; 1780 1781 hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 1782 1); 1783 } 1784 break; 1785 case HNAE3_CLIENT_UNIC: 1786 hdev->nic_client = client; 1787 hdev->nic.client = client; 1788 1789 ret = client->ops->init_instance(&hdev->nic); 1790 if (ret) 1791 goto clear_nic; 1792 1793 hnae3_set_client_init_flag(client, ae_dev, 1); 1794 break; 1795 case HNAE3_CLIENT_ROCE: 1796 if (hnae3_dev_roce_supported(hdev)) { 1797 hdev->roce_client = client; 1798 hdev->roce.client = client; 1799 } 1800 1801 if (hdev->roce_client && hdev->nic_client) { 1802 ret = hclgevf_init_roce_base_info(hdev); 1803 if (ret) 1804 goto clear_roce; 1805 1806 ret = client->ops->init_instance(&hdev->roce); 1807 if (ret) 1808 goto clear_roce; 1809 } 1810 1811 hnae3_set_client_init_flag(client, ae_dev, 1); 1812 break; 1813 default: 1814 return -EINVAL; 1815 } 1816 1817 return 0; 1818 1819 clear_nic: 1820 hdev->nic_client = NULL; 1821 hdev->nic.client = NULL; 1822 return ret; 1823 clear_roce: 1824 hdev->roce_client = NULL; 1825 hdev->roce.client = NULL; 1826 return ret; 1827 } 1828 1829 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 1830 struct hnae3_ae_dev *ae_dev) 1831 { 1832 struct hclgevf_dev *hdev = ae_dev->priv; 1833 1834 /* un-init roce, if it exists */ 1835 if (hdev->roce_client) { 1836 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 1837 hdev->roce_client = NULL; 1838 hdev->roce.client = NULL; 1839 } 1840 1841 /* un-init nic/unic, if this was not called by roce client */ 1842 if (client->ops->uninit_instance && hdev->nic_client && 1843 client->type != HNAE3_CLIENT_ROCE) { 1844 client->ops->uninit_instance(&hdev->nic, 0); 1845 hdev->nic_client = NULL; 1846 hdev->nic.client = NULL; 1847 } 1848 } 1849 1850 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 1851 { 1852 struct pci_dev *pdev = hdev->pdev; 1853 struct hclgevf_hw *hw; 1854 int ret; 1855 1856 /* check if we need to skip initialization of pci. This will happen if 1857 * device is undergoing VF reset. Otherwise, we would need to 1858 * re-initialize pci interface again i.e. when device is not going 1859 * through *any* reset or actually undergoing full reset. 1860 */ 1861 if (hclgevf_dev_ongoing_reset(hdev)) 1862 return 0; 1863 1864 ret = pci_enable_device(pdev); 1865 if (ret) { 1866 dev_err(&pdev->dev, "failed to enable PCI device\n"); 1867 return ret; 1868 } 1869 1870 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1871 if (ret) { 1872 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 1873 goto err_disable_device; 1874 } 1875 1876 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 1877 if (ret) { 1878 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 1879 goto err_disable_device; 1880 } 1881 1882 pci_set_master(pdev); 1883 hw = &hdev->hw; 1884 hw->hdev = hdev; 1885 hw->io_base = pci_iomap(pdev, 2, 0); 1886 if (!hw->io_base) { 1887 dev_err(&pdev->dev, "can't map configuration register space\n"); 1888 ret = -ENOMEM; 1889 goto err_clr_master; 1890 } 1891 1892 return 0; 1893 1894 err_clr_master: 1895 pci_clear_master(pdev); 1896 pci_release_regions(pdev); 1897 err_disable_device: 1898 pci_disable_device(pdev); 1899 1900 return ret; 1901 } 1902 1903 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 1904 { 1905 struct pci_dev *pdev = hdev->pdev; 1906 1907 pci_iounmap(pdev, hdev->hw.io_base); 1908 pci_clear_master(pdev); 1909 pci_release_regions(pdev); 1910 pci_disable_device(pdev); 1911 } 1912 1913 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 1914 { 1915 struct hclgevf_query_res_cmd *req; 1916 struct hclgevf_desc desc; 1917 int ret; 1918 1919 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 1920 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1921 if (ret) { 1922 dev_err(&hdev->pdev->dev, 1923 "query vf resource failed, ret = %d.\n", ret); 1924 return ret; 1925 } 1926 1927 req = (struct hclgevf_query_res_cmd *)desc.data; 1928 1929 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 1930 hdev->roce_base_msix_offset = 1931 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 1932 HCLGEVF_MSIX_OFT_ROCEE_M, 1933 HCLGEVF_MSIX_OFT_ROCEE_S); 1934 hdev->num_roce_msix = 1935 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 1936 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 1937 1938 /* VF should have NIC vectors and Roce vectors, NIC vectors 1939 * are queued before Roce vectors. The offset is fixed to 64. 1940 */ 1941 hdev->num_msi = hdev->num_roce_msix + 1942 hdev->roce_base_msix_offset; 1943 } else { 1944 hdev->num_msi = 1945 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 1946 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 1947 } 1948 1949 return 0; 1950 } 1951 1952 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 1953 { 1954 struct pci_dev *pdev = hdev->pdev; 1955 int ret; 1956 1957 /* check if device is on-going full reset(i.e. pcie as well) */ 1958 if (hclgevf_dev_ongoing_full_reset(hdev)) { 1959 dev_warn(&pdev->dev, "device is going full reset\n"); 1960 hclgevf_uninit_hdev(hdev); 1961 } 1962 1963 ret = hclgevf_pci_init(hdev); 1964 if (ret) { 1965 dev_err(&pdev->dev, "PCI initialization failed\n"); 1966 return ret; 1967 } 1968 1969 ret = hclgevf_cmd_init(hdev); 1970 if (ret) 1971 goto err_cmd_init; 1972 1973 /* Get vf resource */ 1974 ret = hclgevf_query_vf_resource(hdev); 1975 if (ret) { 1976 dev_err(&hdev->pdev->dev, 1977 "Query vf status error, ret = %d.\n", ret); 1978 goto err_query_vf; 1979 } 1980 1981 ret = hclgevf_init_msi(hdev); 1982 if (ret) { 1983 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 1984 goto err_query_vf; 1985 } 1986 1987 hclgevf_state_init(hdev); 1988 1989 ret = hclgevf_misc_irq_init(hdev); 1990 if (ret) { 1991 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 1992 ret); 1993 goto err_misc_irq_init; 1994 } 1995 1996 ret = hclgevf_configure(hdev); 1997 if (ret) { 1998 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 1999 goto err_config; 2000 } 2001 2002 ret = hclgevf_alloc_tqps(hdev); 2003 if (ret) { 2004 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2005 goto err_config; 2006 } 2007 2008 ret = hclgevf_set_handle_info(hdev); 2009 if (ret) { 2010 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2011 goto err_config; 2012 } 2013 2014 /* Initialize RSS for this VF */ 2015 ret = hclgevf_rss_init_hw(hdev); 2016 if (ret) { 2017 dev_err(&hdev->pdev->dev, 2018 "failed(%d) to initialize RSS\n", ret); 2019 goto err_config; 2020 } 2021 2022 ret = hclgevf_init_vlan_config(hdev); 2023 if (ret) { 2024 dev_err(&hdev->pdev->dev, 2025 "failed(%d) to initialize VLAN config\n", ret); 2026 goto err_config; 2027 } 2028 2029 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 2030 2031 return 0; 2032 2033 err_config: 2034 hclgevf_misc_irq_uninit(hdev); 2035 err_misc_irq_init: 2036 hclgevf_state_uninit(hdev); 2037 hclgevf_uninit_msi(hdev); 2038 err_query_vf: 2039 hclgevf_cmd_uninit(hdev); 2040 err_cmd_init: 2041 hclgevf_pci_uninit(hdev); 2042 return ret; 2043 } 2044 2045 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2046 { 2047 hclgevf_state_uninit(hdev); 2048 hclgevf_misc_irq_uninit(hdev); 2049 hclgevf_cmd_uninit(hdev); 2050 hclgevf_uninit_msi(hdev); 2051 hclgevf_pci_uninit(hdev); 2052 } 2053 2054 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2055 { 2056 struct pci_dev *pdev = ae_dev->pdev; 2057 int ret; 2058 2059 ret = hclgevf_alloc_hdev(ae_dev); 2060 if (ret) { 2061 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2062 return ret; 2063 } 2064 2065 ret = hclgevf_init_hdev(ae_dev->priv); 2066 if (ret) 2067 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2068 2069 return ret; 2070 } 2071 2072 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2073 { 2074 struct hclgevf_dev *hdev = ae_dev->priv; 2075 2076 hclgevf_uninit_hdev(hdev); 2077 ae_dev->priv = NULL; 2078 } 2079 2080 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2081 { 2082 struct hnae3_handle *nic = &hdev->nic; 2083 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2084 2085 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 2086 } 2087 2088 /** 2089 * hclgevf_get_channels - Get the current channels enabled and max supported. 2090 * @handle: hardware information for network interface 2091 * @ch: ethtool channels structure 2092 * 2093 * We don't support separate tx and rx queues as channels. The other count 2094 * represents how many queues are being used for control. max_combined counts 2095 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2096 * q_vectors since we support a lot more queue pairs than q_vectors. 2097 **/ 2098 static void hclgevf_get_channels(struct hnae3_handle *handle, 2099 struct ethtool_channels *ch) 2100 { 2101 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2102 2103 ch->max_combined = hclgevf_get_max_channels(hdev); 2104 ch->other_count = 0; 2105 ch->max_other = 0; 2106 ch->combined_count = hdev->num_tqps; 2107 } 2108 2109 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2110 u16 *alloc_tqps, u16 *max_rss_size) 2111 { 2112 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2113 2114 *alloc_tqps = hdev->num_tqps; 2115 *max_rss_size = hdev->rss_size_max; 2116 } 2117 2118 static int hclgevf_get_status(struct hnae3_handle *handle) 2119 { 2120 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2121 2122 return hdev->hw.mac.link; 2123 } 2124 2125 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2126 u8 *auto_neg, u32 *speed, 2127 u8 *duplex) 2128 { 2129 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2130 2131 if (speed) 2132 *speed = hdev->hw.mac.speed; 2133 if (duplex) 2134 *duplex = hdev->hw.mac.duplex; 2135 if (auto_neg) 2136 *auto_neg = AUTONEG_DISABLE; 2137 } 2138 2139 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 2140 u8 duplex) 2141 { 2142 hdev->hw.mac.speed = speed; 2143 hdev->hw.mac.duplex = duplex; 2144 } 2145 2146 static void hclgevf_get_media_type(struct hnae3_handle *handle, 2147 u8 *media_type) 2148 { 2149 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2150 if (media_type) 2151 *media_type = hdev->hw.mac.media_type; 2152 } 2153 2154 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 2155 { 2156 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2157 2158 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 2159 } 2160 2161 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 2162 { 2163 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2164 2165 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2166 } 2167 2168 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 2169 { 2170 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2171 2172 return hdev->reset_count; 2173 } 2174 2175 static const struct hnae3_ae_ops hclgevf_ops = { 2176 .init_ae_dev = hclgevf_init_ae_dev, 2177 .uninit_ae_dev = hclgevf_uninit_ae_dev, 2178 .init_client_instance = hclgevf_init_client_instance, 2179 .uninit_client_instance = hclgevf_uninit_client_instance, 2180 .start = hclgevf_ae_start, 2181 .stop = hclgevf_ae_stop, 2182 .map_ring_to_vector = hclgevf_map_ring_to_vector, 2183 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2184 .get_vector = hclgevf_get_vector, 2185 .put_vector = hclgevf_put_vector, 2186 .reset_queue = hclgevf_reset_tqp, 2187 .set_promisc_mode = hclgevf_set_promisc_mode, 2188 .get_mac_addr = hclgevf_get_mac_addr, 2189 .set_mac_addr = hclgevf_set_mac_addr, 2190 .add_uc_addr = hclgevf_add_uc_addr, 2191 .rm_uc_addr = hclgevf_rm_uc_addr, 2192 .add_mc_addr = hclgevf_add_mc_addr, 2193 .rm_mc_addr = hclgevf_rm_mc_addr, 2194 .get_stats = hclgevf_get_stats, 2195 .update_stats = hclgevf_update_stats, 2196 .get_strings = hclgevf_get_strings, 2197 .get_sset_count = hclgevf_get_sset_count, 2198 .get_rss_key_size = hclgevf_get_rss_key_size, 2199 .get_rss_indir_size = hclgevf_get_rss_indir_size, 2200 .get_rss = hclgevf_get_rss, 2201 .set_rss = hclgevf_set_rss, 2202 .get_rss_tuple = hclgevf_get_rss_tuple, 2203 .set_rss_tuple = hclgevf_set_rss_tuple, 2204 .get_tc_size = hclgevf_get_tc_size, 2205 .get_fw_version = hclgevf_get_fw_version, 2206 .set_vlan_filter = hclgevf_set_vlan_filter, 2207 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 2208 .reset_event = hclgevf_reset_event, 2209 .set_default_reset_request = hclgevf_set_def_reset_request, 2210 .get_channels = hclgevf_get_channels, 2211 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2212 .get_status = hclgevf_get_status, 2213 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2214 .get_media_type = hclgevf_get_media_type, 2215 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 2216 .ae_dev_resetting = hclgevf_ae_dev_resetting, 2217 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 2218 }; 2219 2220 static struct hnae3_ae_algo ae_algovf = { 2221 .ops = &hclgevf_ops, 2222 .pdev_id_table = ae_algovf_pci_tbl, 2223 }; 2224 2225 static int hclgevf_init(void) 2226 { 2227 pr_info("%s is initializing\n", HCLGEVF_NAME); 2228 2229 hnae3_register_ae_algo(&ae_algovf); 2230 2231 return 0; 2232 } 2233 2234 static void hclgevf_exit(void) 2235 { 2236 hnae3_unregister_ae_algo(&ae_algovf); 2237 } 2238 module_init(hclgevf_init); 2239 module_exit(hclgevf_exit); 2240 2241 MODULE_LICENSE("GPL"); 2242 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2243 MODULE_DESCRIPTION("HCLGEVF Driver"); 2244 MODULE_VERSION(HCLGEVF_MOD_VERSION); 2245