1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <net/rtnetlink.h> 6 #include "hclgevf_cmd.h" 7 #include "hclgevf_main.h" 8 #include "hclge_mbx.h" 9 #include "hnae3.h" 10 11 #define HCLGEVF_NAME "hclgevf" 12 13 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 14 static struct hnae3_ae_algo ae_algovf; 15 16 static const struct pci_device_id ae_algovf_pci_tbl[] = { 17 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 19 /* required last entry */ 20 {0, } 21 }; 22 23 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 24 25 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 26 struct hnae3_handle *handle) 27 { 28 return container_of(handle, struct hclgevf_dev, nic); 29 } 30 31 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 32 { 33 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 34 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 35 struct hclgevf_desc desc; 36 struct hclgevf_tqp *tqp; 37 int status; 38 int i; 39 40 for (i = 0; i < kinfo->num_tqps; i++) { 41 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 42 hclgevf_cmd_setup_basic_desc(&desc, 43 HCLGEVF_OPC_QUERY_RX_STATUS, 44 true); 45 46 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 47 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 48 if (status) { 49 dev_err(&hdev->pdev->dev, 50 "Query tqp stat fail, status = %d,queue = %d\n", 51 status, i); 52 return status; 53 } 54 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 55 le32_to_cpu(desc.data[1]); 56 57 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 58 true); 59 60 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 61 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 62 if (status) { 63 dev_err(&hdev->pdev->dev, 64 "Query tqp stat fail, status = %d,queue = %d\n", 65 status, i); 66 return status; 67 } 68 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 69 le32_to_cpu(desc.data[1]); 70 } 71 72 return 0; 73 } 74 75 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 76 { 77 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 78 struct hclgevf_tqp *tqp; 79 u64 *buff = data; 80 int i; 81 82 for (i = 0; i < kinfo->num_tqps; i++) { 83 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 84 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 85 } 86 for (i = 0; i < kinfo->num_tqps; i++) { 87 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 88 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 89 } 90 91 return buff; 92 } 93 94 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 95 { 96 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 97 98 return kinfo->num_tqps * 2; 99 } 100 101 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 102 { 103 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 104 u8 *buff = data; 105 int i = 0; 106 107 for (i = 0; i < kinfo->num_tqps; i++) { 108 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 109 struct hclgevf_tqp, q); 110 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 111 tqp->index); 112 buff += ETH_GSTRING_LEN; 113 } 114 115 for (i = 0; i < kinfo->num_tqps; i++) { 116 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 117 struct hclgevf_tqp, q); 118 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 119 tqp->index); 120 buff += ETH_GSTRING_LEN; 121 } 122 123 return buff; 124 } 125 126 static void hclgevf_update_stats(struct hnae3_handle *handle, 127 struct net_device_stats *net_stats) 128 { 129 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 130 int status; 131 132 status = hclgevf_tqps_update_stats(handle); 133 if (status) 134 dev_err(&hdev->pdev->dev, 135 "VF update of TQPS stats fail, status = %d.\n", 136 status); 137 } 138 139 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 140 { 141 if (strset == ETH_SS_TEST) 142 return -EOPNOTSUPP; 143 else if (strset == ETH_SS_STATS) 144 return hclgevf_tqps_get_sset_count(handle, strset); 145 146 return 0; 147 } 148 149 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 150 u8 *data) 151 { 152 u8 *p = (char *)data; 153 154 if (strset == ETH_SS_STATS) 155 p = hclgevf_tqps_get_strings(handle, p); 156 } 157 158 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 159 { 160 hclgevf_tqps_get_stats(handle, data); 161 } 162 163 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 164 { 165 u8 resp_msg; 166 int status; 167 168 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 169 true, &resp_msg, sizeof(u8)); 170 if (status) { 171 dev_err(&hdev->pdev->dev, 172 "VF request to get TC info from PF failed %d", 173 status); 174 return status; 175 } 176 177 hdev->hw_tc_map = resp_msg; 178 179 return 0; 180 } 181 182 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 183 { 184 #define HCLGEVF_TQPS_RSS_INFO_LEN 8 185 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 186 int status; 187 188 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 189 true, resp_msg, 190 HCLGEVF_TQPS_RSS_INFO_LEN); 191 if (status) { 192 dev_err(&hdev->pdev->dev, 193 "VF request to get tqp info from PF failed %d", 194 status); 195 return status; 196 } 197 198 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 199 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 200 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 201 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 202 203 return 0; 204 } 205 206 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 207 { 208 struct hclgevf_tqp *tqp; 209 int i; 210 211 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 212 sizeof(struct hclgevf_tqp), GFP_KERNEL); 213 if (!hdev->htqp) 214 return -ENOMEM; 215 216 tqp = hdev->htqp; 217 218 for (i = 0; i < hdev->num_tqps; i++) { 219 tqp->dev = &hdev->pdev->dev; 220 tqp->index = i; 221 222 tqp->q.ae_algo = &ae_algovf; 223 tqp->q.buf_size = hdev->rx_buf_len; 224 tqp->q.desc_num = hdev->num_desc; 225 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 226 i * HCLGEVF_TQP_REG_SIZE; 227 228 tqp++; 229 } 230 231 return 0; 232 } 233 234 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 235 { 236 struct hnae3_handle *nic = &hdev->nic; 237 struct hnae3_knic_private_info *kinfo; 238 u16 new_tqps = hdev->num_tqps; 239 int i; 240 241 kinfo = &nic->kinfo; 242 kinfo->num_tc = 0; 243 kinfo->num_desc = hdev->num_desc; 244 kinfo->rx_buf_len = hdev->rx_buf_len; 245 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 246 if (hdev->hw_tc_map & BIT(i)) 247 kinfo->num_tc++; 248 249 kinfo->rss_size 250 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 251 new_tqps = kinfo->rss_size * kinfo->num_tc; 252 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 253 254 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 255 sizeof(struct hnae3_queue *), GFP_KERNEL); 256 if (!kinfo->tqp) 257 return -ENOMEM; 258 259 for (i = 0; i < kinfo->num_tqps; i++) { 260 hdev->htqp[i].q.handle = &hdev->nic; 261 hdev->htqp[i].q.tqp_index = i; 262 kinfo->tqp[i] = &hdev->htqp[i].q; 263 } 264 265 return 0; 266 } 267 268 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 269 { 270 int status; 271 u8 resp_msg; 272 273 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 274 0, false, &resp_msg, sizeof(u8)); 275 if (status) 276 dev_err(&hdev->pdev->dev, 277 "VF failed to fetch link status(%d) from PF", status); 278 } 279 280 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 281 { 282 struct hnae3_handle *handle = &hdev->nic; 283 struct hnae3_client *client; 284 285 client = handle->client; 286 287 link_state = 288 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 289 290 if (link_state != hdev->hw.mac.link) { 291 client->ops->link_status_change(handle, !!link_state); 292 hdev->hw.mac.link = link_state; 293 } 294 } 295 296 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 297 { 298 struct hnae3_handle *nic = &hdev->nic; 299 int ret; 300 301 nic->ae_algo = &ae_algovf; 302 nic->pdev = hdev->pdev; 303 nic->numa_node_mask = hdev->numa_node_mask; 304 nic->flags |= HNAE3_SUPPORT_VF; 305 306 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 307 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 308 hdev->ae_dev->dev_type); 309 return -EINVAL; 310 } 311 312 ret = hclgevf_knic_setup(hdev); 313 if (ret) 314 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 315 ret); 316 return ret; 317 } 318 319 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 320 { 321 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 322 dev_warn(&hdev->pdev->dev, 323 "vector(vector_id %d) has been freed.\n", vector_id); 324 return; 325 } 326 327 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 328 hdev->num_msi_left += 1; 329 hdev->num_msi_used -= 1; 330 } 331 332 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 333 struct hnae3_vector_info *vector_info) 334 { 335 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 336 struct hnae3_vector_info *vector = vector_info; 337 int alloc = 0; 338 int i, j; 339 340 vector_num = min(hdev->num_msi_left, vector_num); 341 342 for (j = 0; j < vector_num; j++) { 343 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 344 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 345 vector->vector = pci_irq_vector(hdev->pdev, i); 346 vector->io_addr = hdev->hw.io_base + 347 HCLGEVF_VECTOR_REG_BASE + 348 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 349 hdev->vector_status[i] = 0; 350 hdev->vector_irq[i] = vector->vector; 351 352 vector++; 353 alloc++; 354 355 break; 356 } 357 } 358 } 359 hdev->num_msi_left -= alloc; 360 hdev->num_msi_used += alloc; 361 362 return alloc; 363 } 364 365 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 366 { 367 int i; 368 369 for (i = 0; i < hdev->num_msi; i++) 370 if (vector == hdev->vector_irq[i]) 371 return i; 372 373 return -EINVAL; 374 } 375 376 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 377 const u8 hfunc, const u8 *key) 378 { 379 struct hclgevf_rss_config_cmd *req; 380 struct hclgevf_desc desc; 381 int key_offset; 382 int key_size; 383 int ret; 384 385 req = (struct hclgevf_rss_config_cmd *)desc.data; 386 387 for (key_offset = 0; key_offset < 3; key_offset++) { 388 hclgevf_cmd_setup_basic_desc(&desc, 389 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 390 false); 391 392 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 393 req->hash_config |= 394 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 395 396 if (key_offset == 2) 397 key_size = 398 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 399 else 400 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 401 402 memcpy(req->hash_key, 403 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 404 405 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 406 if (ret) { 407 dev_err(&hdev->pdev->dev, 408 "Configure RSS config fail, status = %d\n", 409 ret); 410 return ret; 411 } 412 } 413 414 return 0; 415 } 416 417 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 418 { 419 return HCLGEVF_RSS_KEY_SIZE; 420 } 421 422 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 423 { 424 return HCLGEVF_RSS_IND_TBL_SIZE; 425 } 426 427 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 428 { 429 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 430 struct hclgevf_rss_indirection_table_cmd *req; 431 struct hclgevf_desc desc; 432 int status; 433 int i, j; 434 435 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 436 437 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 438 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 439 false); 440 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 441 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 442 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 443 req->rss_result[j] = 444 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 445 446 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 447 if (status) { 448 dev_err(&hdev->pdev->dev, 449 "VF failed(=%d) to set RSS indirection table\n", 450 status); 451 return status; 452 } 453 } 454 455 return 0; 456 } 457 458 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 459 { 460 struct hclgevf_rss_tc_mode_cmd *req; 461 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 462 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 463 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 464 struct hclgevf_desc desc; 465 u16 roundup_size; 466 int status; 467 int i; 468 469 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 470 471 roundup_size = roundup_pow_of_two(rss_size); 472 roundup_size = ilog2(roundup_size); 473 474 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 475 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 476 tc_size[i] = roundup_size; 477 tc_offset[i] = rss_size * i; 478 } 479 480 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 481 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 482 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 483 (tc_valid[i] & 0x1)); 484 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 485 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 486 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 487 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 488 } 489 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 490 if (status) 491 dev_err(&hdev->pdev->dev, 492 "VF failed(=%d) to set rss tc mode\n", status); 493 494 return status; 495 } 496 497 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 498 u8 *hfunc) 499 { 500 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 501 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 502 int i; 503 504 if (handle->pdev->revision >= 0x21) { 505 /* Get hash algorithm */ 506 if (hfunc) { 507 switch (rss_cfg->hash_algo) { 508 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 509 *hfunc = ETH_RSS_HASH_TOP; 510 break; 511 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 512 *hfunc = ETH_RSS_HASH_XOR; 513 break; 514 default: 515 *hfunc = ETH_RSS_HASH_UNKNOWN; 516 break; 517 } 518 } 519 520 /* Get the RSS Key required by the user */ 521 if (key) 522 memcpy(key, rss_cfg->rss_hash_key, 523 HCLGEVF_RSS_KEY_SIZE); 524 } 525 526 if (indir) 527 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 528 indir[i] = rss_cfg->rss_indirection_tbl[i]; 529 530 return 0; 531 } 532 533 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 534 const u8 *key, const u8 hfunc) 535 { 536 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 537 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 538 int ret, i; 539 540 if (handle->pdev->revision >= 0x21) { 541 /* Set the RSS Hash Key if specififed by the user */ 542 if (key) { 543 switch (hfunc) { 544 case ETH_RSS_HASH_TOP: 545 rss_cfg->hash_algo = 546 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 547 break; 548 case ETH_RSS_HASH_XOR: 549 rss_cfg->hash_algo = 550 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 551 break; 552 case ETH_RSS_HASH_NO_CHANGE: 553 break; 554 default: 555 return -EINVAL; 556 } 557 558 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 559 key); 560 if (ret) 561 return ret; 562 563 /* Update the shadow RSS key with user specified qids */ 564 memcpy(rss_cfg->rss_hash_key, key, 565 HCLGEVF_RSS_KEY_SIZE); 566 } 567 } 568 569 /* update the shadow RSS table with user specified qids */ 570 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 571 rss_cfg->rss_indirection_tbl[i] = indir[i]; 572 573 /* update the hardware */ 574 return hclgevf_set_rss_indir_table(hdev); 575 } 576 577 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 578 { 579 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 580 581 if (nfc->data & RXH_L4_B_2_3) 582 hash_sets |= HCLGEVF_D_PORT_BIT; 583 else 584 hash_sets &= ~HCLGEVF_D_PORT_BIT; 585 586 if (nfc->data & RXH_IP_SRC) 587 hash_sets |= HCLGEVF_S_IP_BIT; 588 else 589 hash_sets &= ~HCLGEVF_S_IP_BIT; 590 591 if (nfc->data & RXH_IP_DST) 592 hash_sets |= HCLGEVF_D_IP_BIT; 593 else 594 hash_sets &= ~HCLGEVF_D_IP_BIT; 595 596 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 597 hash_sets |= HCLGEVF_V_TAG_BIT; 598 599 return hash_sets; 600 } 601 602 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 603 struct ethtool_rxnfc *nfc) 604 { 605 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 606 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 607 struct hclgevf_rss_input_tuple_cmd *req; 608 struct hclgevf_desc desc; 609 u8 tuple_sets; 610 int ret; 611 612 if (handle->pdev->revision == 0x20) 613 return -EOPNOTSUPP; 614 615 if (nfc->data & 616 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 617 return -EINVAL; 618 619 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 620 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 621 622 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 623 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 624 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 625 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 626 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 627 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 628 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 629 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 630 631 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 632 switch (nfc->flow_type) { 633 case TCP_V4_FLOW: 634 req->ipv4_tcp_en = tuple_sets; 635 break; 636 case TCP_V6_FLOW: 637 req->ipv6_tcp_en = tuple_sets; 638 break; 639 case UDP_V4_FLOW: 640 req->ipv4_udp_en = tuple_sets; 641 break; 642 case UDP_V6_FLOW: 643 req->ipv6_udp_en = tuple_sets; 644 break; 645 case SCTP_V4_FLOW: 646 req->ipv4_sctp_en = tuple_sets; 647 break; 648 case SCTP_V6_FLOW: 649 if ((nfc->data & RXH_L4_B_0_1) || 650 (nfc->data & RXH_L4_B_2_3)) 651 return -EINVAL; 652 653 req->ipv6_sctp_en = tuple_sets; 654 break; 655 case IPV4_FLOW: 656 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 657 break; 658 case IPV6_FLOW: 659 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 660 break; 661 default: 662 return -EINVAL; 663 } 664 665 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 666 if (ret) { 667 dev_err(&hdev->pdev->dev, 668 "Set rss tuple fail, status = %d\n", ret); 669 return ret; 670 } 671 672 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 673 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 674 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 675 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 676 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 677 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 678 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 679 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 680 return 0; 681 } 682 683 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 684 struct ethtool_rxnfc *nfc) 685 { 686 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 687 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 688 u8 tuple_sets; 689 690 if (handle->pdev->revision == 0x20) 691 return -EOPNOTSUPP; 692 693 nfc->data = 0; 694 695 switch (nfc->flow_type) { 696 case TCP_V4_FLOW: 697 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 698 break; 699 case UDP_V4_FLOW: 700 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 701 break; 702 case TCP_V6_FLOW: 703 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 704 break; 705 case UDP_V6_FLOW: 706 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 707 break; 708 case SCTP_V4_FLOW: 709 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 710 break; 711 case SCTP_V6_FLOW: 712 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 713 break; 714 case IPV4_FLOW: 715 case IPV6_FLOW: 716 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 717 break; 718 default: 719 return -EINVAL; 720 } 721 722 if (!tuple_sets) 723 return 0; 724 725 if (tuple_sets & HCLGEVF_D_PORT_BIT) 726 nfc->data |= RXH_L4_B_2_3; 727 if (tuple_sets & HCLGEVF_S_PORT_BIT) 728 nfc->data |= RXH_L4_B_0_1; 729 if (tuple_sets & HCLGEVF_D_IP_BIT) 730 nfc->data |= RXH_IP_DST; 731 if (tuple_sets & HCLGEVF_S_IP_BIT) 732 nfc->data |= RXH_IP_SRC; 733 734 return 0; 735 } 736 737 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 738 struct hclgevf_rss_cfg *rss_cfg) 739 { 740 struct hclgevf_rss_input_tuple_cmd *req; 741 struct hclgevf_desc desc; 742 int ret; 743 744 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 745 746 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 747 748 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 749 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 750 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 751 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 752 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 753 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 754 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 755 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 756 757 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 758 if (ret) 759 dev_err(&hdev->pdev->dev, 760 "Configure rss input fail, status = %d\n", ret); 761 return ret; 762 } 763 764 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 765 { 766 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 767 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 768 769 return rss_cfg->rss_size; 770 } 771 772 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 773 int vector_id, 774 struct hnae3_ring_chain_node *ring_chain) 775 { 776 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 777 struct hnae3_ring_chain_node *node; 778 struct hclge_mbx_vf_to_pf_cmd *req; 779 struct hclgevf_desc desc; 780 int i = 0; 781 int status; 782 u8 type; 783 784 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 785 786 for (node = ring_chain; node; node = node->next) { 787 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 788 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 789 790 if (i == 0) { 791 hclgevf_cmd_setup_basic_desc(&desc, 792 HCLGEVF_OPC_MBX_VF_TO_PF, 793 false); 794 type = en ? 795 HCLGE_MBX_MAP_RING_TO_VECTOR : 796 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 797 req->msg[0] = type; 798 req->msg[1] = vector_id; 799 } 800 801 req->msg[idx_offset] = 802 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 803 req->msg[idx_offset + 1] = node->tqp_index; 804 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 805 HNAE3_RING_GL_IDX_M, 806 HNAE3_RING_GL_IDX_S); 807 808 i++; 809 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 810 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 811 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 812 !node->next) { 813 req->msg[2] = i; 814 815 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 816 if (status) { 817 dev_err(&hdev->pdev->dev, 818 "Map TQP fail, status is %d.\n", 819 status); 820 return status; 821 } 822 i = 0; 823 hclgevf_cmd_setup_basic_desc(&desc, 824 HCLGEVF_OPC_MBX_VF_TO_PF, 825 false); 826 req->msg[0] = type; 827 req->msg[1] = vector_id; 828 } 829 } 830 831 return 0; 832 } 833 834 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 835 struct hnae3_ring_chain_node *ring_chain) 836 { 837 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 838 int vector_id; 839 840 vector_id = hclgevf_get_vector_index(hdev, vector); 841 if (vector_id < 0) { 842 dev_err(&handle->pdev->dev, 843 "Get vector index fail. ret =%d\n", vector_id); 844 return vector_id; 845 } 846 847 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 848 } 849 850 static int hclgevf_unmap_ring_from_vector( 851 struct hnae3_handle *handle, 852 int vector, 853 struct hnae3_ring_chain_node *ring_chain) 854 { 855 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 856 int ret, vector_id; 857 858 vector_id = hclgevf_get_vector_index(hdev, vector); 859 if (vector_id < 0) { 860 dev_err(&handle->pdev->dev, 861 "Get vector index fail. ret =%d\n", vector_id); 862 return vector_id; 863 } 864 865 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 866 if (ret) 867 dev_err(&handle->pdev->dev, 868 "Unmap ring from vector fail. vector=%d, ret =%d\n", 869 vector_id, 870 ret); 871 872 return ret; 873 } 874 875 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 876 { 877 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 878 int vector_id; 879 880 vector_id = hclgevf_get_vector_index(hdev, vector); 881 if (vector_id < 0) { 882 dev_err(&handle->pdev->dev, 883 "hclgevf_put_vector get vector index fail. ret =%d\n", 884 vector_id); 885 return vector_id; 886 } 887 888 hclgevf_free_vector(hdev, vector_id); 889 890 return 0; 891 } 892 893 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 894 bool en_uc_pmc, bool en_mc_pmc) 895 { 896 struct hclge_mbx_vf_to_pf_cmd *req; 897 struct hclgevf_desc desc; 898 int status; 899 900 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 901 902 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 903 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 904 req->msg[1] = en_uc_pmc ? 1 : 0; 905 req->msg[2] = en_mc_pmc ? 1 : 0; 906 907 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 908 if (status) 909 dev_err(&hdev->pdev->dev, 910 "Set promisc mode fail, status is %d.\n", status); 911 912 return status; 913 } 914 915 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, 916 bool en_uc_pmc, bool en_mc_pmc) 917 { 918 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 919 920 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); 921 } 922 923 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 924 int stream_id, bool enable) 925 { 926 struct hclgevf_cfg_com_tqp_queue_cmd *req; 927 struct hclgevf_desc desc; 928 int status; 929 930 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 931 932 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 933 false); 934 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 935 req->stream_id = cpu_to_le16(stream_id); 936 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 937 938 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 939 if (status) 940 dev_err(&hdev->pdev->dev, 941 "TQP enable fail, status =%d.\n", status); 942 943 return status; 944 } 945 946 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 947 { 948 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 949 struct hclgevf_tqp *tqp; 950 int i; 951 952 for (i = 0; i < kinfo->num_tqps; i++) { 953 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 954 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 955 } 956 } 957 958 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 959 { 960 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 961 962 ether_addr_copy(p, hdev->hw.mac.mac_addr); 963 } 964 965 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 966 bool is_first) 967 { 968 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 969 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 970 u8 *new_mac_addr = (u8 *)p; 971 u8 msg_data[ETH_ALEN * 2]; 972 u16 subcode; 973 int status; 974 975 ether_addr_copy(msg_data, new_mac_addr); 976 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 977 978 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 979 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 980 981 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 982 subcode, msg_data, ETH_ALEN * 2, 983 true, NULL, 0); 984 if (!status) 985 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 986 987 return status; 988 } 989 990 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 991 const unsigned char *addr) 992 { 993 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 994 995 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 996 HCLGE_MBX_MAC_VLAN_UC_ADD, 997 addr, ETH_ALEN, false, NULL, 0); 998 } 999 1000 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1001 const unsigned char *addr) 1002 { 1003 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1004 1005 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1006 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1007 addr, ETH_ALEN, false, NULL, 0); 1008 } 1009 1010 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1011 const unsigned char *addr) 1012 { 1013 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1014 1015 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1016 HCLGE_MBX_MAC_VLAN_MC_ADD, 1017 addr, ETH_ALEN, false, NULL, 0); 1018 } 1019 1020 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1021 const unsigned char *addr) 1022 { 1023 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1024 1025 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1026 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1027 addr, ETH_ALEN, false, NULL, 0); 1028 } 1029 1030 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1031 __be16 proto, u16 vlan_id, 1032 bool is_kill) 1033 { 1034 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1035 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1036 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1037 1038 if (vlan_id > 4095) 1039 return -EINVAL; 1040 1041 if (proto != htons(ETH_P_8021Q)) 1042 return -EPROTONOSUPPORT; 1043 1044 msg_data[0] = is_kill; 1045 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1046 memcpy(&msg_data[3], &proto, sizeof(proto)); 1047 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1048 HCLGE_MBX_VLAN_FILTER, msg_data, 1049 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 1050 } 1051 1052 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1053 { 1054 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1055 u8 msg_data; 1056 1057 msg_data = enable ? 1 : 0; 1058 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1059 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1060 1, false, NULL, 0); 1061 } 1062 1063 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1064 { 1065 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1066 u8 msg_data[2]; 1067 int ret; 1068 1069 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 1070 1071 /* disable vf queue before send queue reset msg to PF */ 1072 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1073 if (ret) 1074 return ret; 1075 1076 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1077 2, true, NULL, 0); 1078 } 1079 1080 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1081 enum hnae3_reset_notify_type type) 1082 { 1083 struct hnae3_client *client = hdev->nic_client; 1084 struct hnae3_handle *handle = &hdev->nic; 1085 1086 if (!client->ops->reset_notify) 1087 return -EOPNOTSUPP; 1088 1089 return client->ops->reset_notify(handle, type); 1090 } 1091 1092 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1093 { 1094 #define HCLGEVF_RESET_WAIT_MS 500 1095 #define HCLGEVF_RESET_WAIT_CNT 20 1096 u32 val, cnt = 0; 1097 1098 /* wait to check the hardware reset completion status */ 1099 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 1100 while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) && 1101 (cnt < HCLGEVF_RESET_WAIT_CNT)) { 1102 msleep(HCLGEVF_RESET_WAIT_MS); 1103 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 1104 cnt++; 1105 } 1106 1107 /* hardware completion status should be available by this time */ 1108 if (cnt >= HCLGEVF_RESET_WAIT_CNT) { 1109 dev_warn(&hdev->pdev->dev, 1110 "could'nt get reset done status from h/w, timeout!\n"); 1111 return -EBUSY; 1112 } 1113 1114 /* we will wait a bit more to let reset of the stack to complete. This 1115 * might happen in case reset assertion was made by PF. Yes, this also 1116 * means we might end up waiting bit more even for VF reset. 1117 */ 1118 msleep(5000); 1119 1120 return 0; 1121 } 1122 1123 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1124 { 1125 int ret; 1126 1127 /* uninitialize the nic client */ 1128 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1129 1130 /* re-initialize the hclge device */ 1131 ret = hclgevf_reset_hdev(hdev); 1132 if (ret) { 1133 dev_err(&hdev->pdev->dev, 1134 "hclge device re-init failed, VF is disabled!\n"); 1135 return ret; 1136 } 1137 1138 /* bring up the nic client again */ 1139 hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1140 1141 return 0; 1142 } 1143 1144 static int hclgevf_reset(struct hclgevf_dev *hdev) 1145 { 1146 int ret; 1147 1148 hdev->reset_count++; 1149 rtnl_lock(); 1150 1151 /* bring down the nic to stop any ongoing TX/RX */ 1152 hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1153 1154 rtnl_unlock(); 1155 1156 /* check if VF could successfully fetch the hardware reset completion 1157 * status from the hardware 1158 */ 1159 ret = hclgevf_reset_wait(hdev); 1160 if (ret) { 1161 /* can't do much in this situation, will disable VF */ 1162 dev_err(&hdev->pdev->dev, 1163 "VF failed(=%d) to fetch H/W reset completion status\n", 1164 ret); 1165 1166 dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); 1167 rtnl_lock(); 1168 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1169 1170 rtnl_unlock(); 1171 return ret; 1172 } 1173 1174 rtnl_lock(); 1175 1176 /* now, re-initialize the nic client and ae device*/ 1177 ret = hclgevf_reset_stack(hdev); 1178 if (ret) 1179 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1180 1181 /* bring up the nic to enable TX/RX again */ 1182 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1183 1184 rtnl_unlock(); 1185 1186 return ret; 1187 } 1188 1189 static int hclgevf_do_reset(struct hclgevf_dev *hdev) 1190 { 1191 int status; 1192 u8 respmsg; 1193 1194 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1195 0, false, &respmsg, sizeof(u8)); 1196 if (status) 1197 dev_err(&hdev->pdev->dev, 1198 "VF reset request to PF failed(=%d)\n", status); 1199 1200 return status; 1201 } 1202 1203 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1204 unsigned long *addr) 1205 { 1206 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1207 1208 if (test_bit(HNAE3_VF_RESET, addr)) { 1209 rst_level = HNAE3_VF_RESET; 1210 clear_bit(HNAE3_VF_RESET, addr); 1211 } 1212 1213 return rst_level; 1214 } 1215 1216 static void hclgevf_reset_event(struct pci_dev *pdev, 1217 struct hnae3_handle *handle) 1218 { 1219 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1220 1221 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1222 1223 if (!hdev->default_reset_request) 1224 hdev->reset_level = 1225 hclgevf_get_reset_level(hdev, 1226 &hdev->default_reset_request); 1227 else 1228 hdev->reset_level = HNAE3_VF_RESET; 1229 1230 /* reset of this VF requested */ 1231 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1232 hclgevf_reset_task_schedule(hdev); 1233 1234 hdev->last_reset_time = jiffies; 1235 } 1236 1237 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1238 enum hnae3_reset_type rst_type) 1239 { 1240 struct hclgevf_dev *hdev = ae_dev->priv; 1241 1242 set_bit(rst_type, &hdev->default_reset_request); 1243 } 1244 1245 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1246 { 1247 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1248 1249 return hdev->fw_version; 1250 } 1251 1252 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1253 { 1254 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1255 1256 vector->vector_irq = pci_irq_vector(hdev->pdev, 1257 HCLGEVF_MISC_VECTOR_NUM); 1258 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1259 /* vector status always valid for Vector 0 */ 1260 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1261 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1262 1263 hdev->num_msi_left -= 1; 1264 hdev->num_msi_used += 1; 1265 } 1266 1267 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1268 { 1269 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1270 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1271 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1272 schedule_work(&hdev->rst_service_task); 1273 } 1274 } 1275 1276 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1277 { 1278 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1279 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1280 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1281 schedule_work(&hdev->mbx_service_task); 1282 } 1283 } 1284 1285 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1286 { 1287 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1288 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1289 schedule_work(&hdev->service_task); 1290 } 1291 1292 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1293 { 1294 /* if we have any pending mailbox event then schedule the mbx task */ 1295 if (hdev->mbx_event_pending) 1296 hclgevf_mbx_task_schedule(hdev); 1297 1298 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1299 hclgevf_reset_task_schedule(hdev); 1300 } 1301 1302 static void hclgevf_service_timer(struct timer_list *t) 1303 { 1304 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1305 1306 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1307 1308 hclgevf_task_schedule(hdev); 1309 } 1310 1311 static void hclgevf_reset_service_task(struct work_struct *work) 1312 { 1313 struct hclgevf_dev *hdev = 1314 container_of(work, struct hclgevf_dev, rst_service_task); 1315 int ret; 1316 1317 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1318 return; 1319 1320 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1321 1322 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1323 &hdev->reset_state)) { 1324 /* PF has initmated that it is about to reset the hardware. 1325 * We now have to poll & check if harware has actually completed 1326 * the reset sequence. On hardware reset completion, VF needs to 1327 * reset the client and ae device. 1328 */ 1329 hdev->reset_attempts = 0; 1330 1331 ret = hclgevf_reset(hdev); 1332 if (ret) 1333 dev_err(&hdev->pdev->dev, "VF stack reset failed.\n"); 1334 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1335 &hdev->reset_state)) { 1336 /* we could be here when either of below happens: 1337 * 1. reset was initiated due to watchdog timeout due to 1338 * a. IMP was earlier reset and our TX got choked down and 1339 * which resulted in watchdog reacting and inducing VF 1340 * reset. This also means our cmdq would be unreliable. 1341 * b. problem in TX due to other lower layer(example link 1342 * layer not functioning properly etc.) 1343 * 2. VF reset might have been initiated due to some config 1344 * change. 1345 * 1346 * NOTE: Theres no clear way to detect above cases than to react 1347 * to the response of PF for this reset request. PF will ack the 1348 * 1b and 2. cases but we will not get any intimation about 1a 1349 * from PF as cmdq would be in unreliable state i.e. mailbox 1350 * communication between PF and VF would be broken. 1351 */ 1352 1353 /* if we are never geting into pending state it means either: 1354 * 1. PF is not receiving our request which could be due to IMP 1355 * reset 1356 * 2. PF is screwed 1357 * We cannot do much for 2. but to check first we can try reset 1358 * our PCIe + stack and see if it alleviates the problem. 1359 */ 1360 if (hdev->reset_attempts > 3) { 1361 /* prepare for full reset of stack + pcie interface */ 1362 hdev->reset_level = HNAE3_VF_FULL_RESET; 1363 1364 /* "defer" schedule the reset task again */ 1365 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1366 } else { 1367 hdev->reset_attempts++; 1368 1369 /* request PF for resetting this VF via mailbox */ 1370 ret = hclgevf_do_reset(hdev); 1371 if (ret) 1372 dev_warn(&hdev->pdev->dev, 1373 "VF rst fail, stack will call\n"); 1374 } 1375 } 1376 1377 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1378 } 1379 1380 static void hclgevf_mailbox_service_task(struct work_struct *work) 1381 { 1382 struct hclgevf_dev *hdev; 1383 1384 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1385 1386 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1387 return; 1388 1389 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1390 1391 hclgevf_mbx_async_handler(hdev); 1392 1393 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1394 } 1395 1396 static void hclgevf_service_task(struct work_struct *work) 1397 { 1398 struct hclgevf_dev *hdev; 1399 1400 hdev = container_of(work, struct hclgevf_dev, service_task); 1401 1402 /* request the link status from the PF. PF would be able to tell VF 1403 * about such updates in future so we might remove this later 1404 */ 1405 hclgevf_request_link_info(hdev); 1406 1407 hclgevf_deferred_task_schedule(hdev); 1408 1409 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1410 } 1411 1412 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1413 { 1414 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1415 } 1416 1417 static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) 1418 { 1419 u32 cmdq_src_reg; 1420 1421 /* fetch the events from their corresponding regs */ 1422 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1423 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1424 1425 /* check for vector0 mailbox(=CMDQ RX) event source */ 1426 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1427 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1428 *clearval = cmdq_src_reg; 1429 return true; 1430 } 1431 1432 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1433 1434 return false; 1435 } 1436 1437 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1438 { 1439 writel(en ? 1 : 0, vector->addr); 1440 } 1441 1442 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1443 { 1444 struct hclgevf_dev *hdev = data; 1445 u32 clearval; 1446 1447 hclgevf_enable_vector(&hdev->misc_vector, false); 1448 if (!hclgevf_check_event_cause(hdev, &clearval)) 1449 goto skip_sched; 1450 1451 hclgevf_mbx_handler(hdev); 1452 1453 hclgevf_clear_event_cause(hdev, clearval); 1454 1455 skip_sched: 1456 hclgevf_enable_vector(&hdev->misc_vector, true); 1457 1458 return IRQ_HANDLED; 1459 } 1460 1461 static int hclgevf_configure(struct hclgevf_dev *hdev) 1462 { 1463 int ret; 1464 1465 hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE; 1466 1467 /* get queue configuration from PF */ 1468 ret = hclgevf_get_queue_info(hdev); 1469 if (ret) 1470 return ret; 1471 /* get tc configuration from PF */ 1472 return hclgevf_get_tc_info(hdev); 1473 } 1474 1475 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1476 { 1477 struct pci_dev *pdev = ae_dev->pdev; 1478 struct hclgevf_dev *hdev = ae_dev->priv; 1479 1480 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1481 if (!hdev) 1482 return -ENOMEM; 1483 1484 hdev->pdev = pdev; 1485 hdev->ae_dev = ae_dev; 1486 ae_dev->priv = hdev; 1487 1488 return 0; 1489 } 1490 1491 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1492 { 1493 struct hnae3_handle *roce = &hdev->roce; 1494 struct hnae3_handle *nic = &hdev->nic; 1495 1496 roce->rinfo.num_vectors = hdev->num_roce_msix; 1497 1498 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1499 hdev->num_msi_left == 0) 1500 return -EINVAL; 1501 1502 roce->rinfo.base_vector = hdev->roce_base_vector; 1503 1504 roce->rinfo.netdev = nic->kinfo.netdev; 1505 roce->rinfo.roce_io_base = hdev->hw.io_base; 1506 1507 roce->pdev = nic->pdev; 1508 roce->ae_algo = nic->ae_algo; 1509 roce->numa_node_mask = nic->numa_node_mask; 1510 1511 return 0; 1512 } 1513 1514 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1515 { 1516 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1517 int i, ret; 1518 1519 rss_cfg->rss_size = hdev->rss_size_max; 1520 1521 if (hdev->pdev->revision >= 0x21) { 1522 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 1523 netdev_rss_key_fill(rss_cfg->rss_hash_key, 1524 HCLGEVF_RSS_KEY_SIZE); 1525 1526 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 1527 rss_cfg->rss_hash_key); 1528 if (ret) 1529 return ret; 1530 1531 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 1532 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1533 rss_cfg->rss_tuple_sets.ipv4_udp_en = 1534 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1535 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 1536 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1537 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 1538 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1539 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 1540 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1541 rss_cfg->rss_tuple_sets.ipv6_udp_en = 1542 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1543 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 1544 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1545 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 1546 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1547 1548 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 1549 if (ret) 1550 return ret; 1551 1552 } 1553 1554 /* Initialize RSS indirect table for each vport */ 1555 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1556 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1557 1558 ret = hclgevf_set_rss_indir_table(hdev); 1559 if (ret) 1560 return ret; 1561 1562 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1563 } 1564 1565 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1566 { 1567 /* other vlan config(like, VLAN TX/RX offload) would also be added 1568 * here later 1569 */ 1570 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1571 false); 1572 } 1573 1574 static int hclgevf_ae_start(struct hnae3_handle *handle) 1575 { 1576 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1577 1578 /* reset tqp stats */ 1579 hclgevf_reset_tqp_stats(handle); 1580 1581 hclgevf_request_link_info(hdev); 1582 1583 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1584 mod_timer(&hdev->service_timer, jiffies + HZ); 1585 1586 return 0; 1587 } 1588 1589 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1590 { 1591 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1592 1593 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1594 1595 /* reset tqp stats */ 1596 hclgevf_reset_tqp_stats(handle); 1597 del_timer_sync(&hdev->service_timer); 1598 cancel_work_sync(&hdev->service_task); 1599 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1600 hclgevf_update_link_status(hdev, 0); 1601 } 1602 1603 static void hclgevf_state_init(struct hclgevf_dev *hdev) 1604 { 1605 /* setup tasks for the MBX */ 1606 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1607 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1608 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1609 1610 /* setup tasks for service timer */ 1611 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1612 1613 INIT_WORK(&hdev->service_task, hclgevf_service_task); 1614 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1615 1616 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 1617 1618 mutex_init(&hdev->mbx_resp.mbx_mutex); 1619 1620 /* bring the device down */ 1621 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1622 } 1623 1624 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1625 { 1626 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1627 1628 if (hdev->service_timer.function) 1629 del_timer_sync(&hdev->service_timer); 1630 if (hdev->service_task.func) 1631 cancel_work_sync(&hdev->service_task); 1632 if (hdev->mbx_service_task.func) 1633 cancel_work_sync(&hdev->mbx_service_task); 1634 if (hdev->rst_service_task.func) 1635 cancel_work_sync(&hdev->rst_service_task); 1636 1637 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1638 } 1639 1640 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1641 { 1642 struct pci_dev *pdev = hdev->pdev; 1643 int vectors; 1644 int i; 1645 1646 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 1647 vectors = pci_alloc_irq_vectors(pdev, 1648 hdev->roce_base_msix_offset + 1, 1649 hdev->num_msi, 1650 PCI_IRQ_MSIX); 1651 else 1652 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1653 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1654 1655 if (vectors < 0) { 1656 dev_err(&pdev->dev, 1657 "failed(%d) to allocate MSI/MSI-X vectors\n", 1658 vectors); 1659 return vectors; 1660 } 1661 if (vectors < hdev->num_msi) 1662 dev_warn(&hdev->pdev->dev, 1663 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1664 hdev->num_msi, vectors); 1665 1666 hdev->num_msi = vectors; 1667 hdev->num_msi_left = vectors; 1668 hdev->base_msi_vector = pdev->irq; 1669 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 1670 1671 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1672 sizeof(u16), GFP_KERNEL); 1673 if (!hdev->vector_status) { 1674 pci_free_irq_vectors(pdev); 1675 return -ENOMEM; 1676 } 1677 1678 for (i = 0; i < hdev->num_msi; i++) 1679 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 1680 1681 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 1682 sizeof(int), GFP_KERNEL); 1683 if (!hdev->vector_irq) { 1684 pci_free_irq_vectors(pdev); 1685 return -ENOMEM; 1686 } 1687 1688 return 0; 1689 } 1690 1691 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 1692 { 1693 struct pci_dev *pdev = hdev->pdev; 1694 1695 pci_free_irq_vectors(pdev); 1696 } 1697 1698 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 1699 { 1700 int ret = 0; 1701 1702 hclgevf_get_misc_vector(hdev); 1703 1704 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 1705 0, "hclgevf_cmd", hdev); 1706 if (ret) { 1707 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 1708 hdev->misc_vector.vector_irq); 1709 return ret; 1710 } 1711 1712 hclgevf_clear_event_cause(hdev, 0); 1713 1714 /* enable misc. vector(vector 0) */ 1715 hclgevf_enable_vector(&hdev->misc_vector, true); 1716 1717 return ret; 1718 } 1719 1720 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 1721 { 1722 /* disable misc vector(vector 0) */ 1723 hclgevf_enable_vector(&hdev->misc_vector, false); 1724 synchronize_irq(hdev->misc_vector.vector_irq); 1725 free_irq(hdev->misc_vector.vector_irq, hdev); 1726 hclgevf_free_vector(hdev, 0); 1727 } 1728 1729 static int hclgevf_init_client_instance(struct hnae3_client *client, 1730 struct hnae3_ae_dev *ae_dev) 1731 { 1732 struct hclgevf_dev *hdev = ae_dev->priv; 1733 int ret; 1734 1735 switch (client->type) { 1736 case HNAE3_CLIENT_KNIC: 1737 hdev->nic_client = client; 1738 hdev->nic.client = client; 1739 1740 ret = client->ops->init_instance(&hdev->nic); 1741 if (ret) 1742 goto clear_nic; 1743 1744 hnae3_set_client_init_flag(client, ae_dev, 1); 1745 1746 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 1747 struct hnae3_client *rc = hdev->roce_client; 1748 1749 ret = hclgevf_init_roce_base_info(hdev); 1750 if (ret) 1751 goto clear_roce; 1752 ret = rc->ops->init_instance(&hdev->roce); 1753 if (ret) 1754 goto clear_roce; 1755 1756 hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 1757 1); 1758 } 1759 break; 1760 case HNAE3_CLIENT_UNIC: 1761 hdev->nic_client = client; 1762 hdev->nic.client = client; 1763 1764 ret = client->ops->init_instance(&hdev->nic); 1765 if (ret) 1766 goto clear_nic; 1767 1768 hnae3_set_client_init_flag(client, ae_dev, 1); 1769 break; 1770 case HNAE3_CLIENT_ROCE: 1771 if (hnae3_dev_roce_supported(hdev)) { 1772 hdev->roce_client = client; 1773 hdev->roce.client = client; 1774 } 1775 1776 if (hdev->roce_client && hdev->nic_client) { 1777 ret = hclgevf_init_roce_base_info(hdev); 1778 if (ret) 1779 goto clear_roce; 1780 1781 ret = client->ops->init_instance(&hdev->roce); 1782 if (ret) 1783 goto clear_roce; 1784 } 1785 1786 hnae3_set_client_init_flag(client, ae_dev, 1); 1787 break; 1788 default: 1789 return -EINVAL; 1790 } 1791 1792 return 0; 1793 1794 clear_nic: 1795 hdev->nic_client = NULL; 1796 hdev->nic.client = NULL; 1797 return ret; 1798 clear_roce: 1799 hdev->roce_client = NULL; 1800 hdev->roce.client = NULL; 1801 return ret; 1802 } 1803 1804 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 1805 struct hnae3_ae_dev *ae_dev) 1806 { 1807 struct hclgevf_dev *hdev = ae_dev->priv; 1808 1809 /* un-init roce, if it exists */ 1810 if (hdev->roce_client) { 1811 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 1812 hdev->roce_client = NULL; 1813 hdev->roce.client = NULL; 1814 } 1815 1816 /* un-init nic/unic, if this was not called by roce client */ 1817 if (client->ops->uninit_instance && hdev->nic_client && 1818 client->type != HNAE3_CLIENT_ROCE) { 1819 client->ops->uninit_instance(&hdev->nic, 0); 1820 hdev->nic_client = NULL; 1821 hdev->nic.client = NULL; 1822 } 1823 } 1824 1825 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 1826 { 1827 struct pci_dev *pdev = hdev->pdev; 1828 struct hclgevf_hw *hw; 1829 int ret; 1830 1831 ret = pci_enable_device(pdev); 1832 if (ret) { 1833 dev_err(&pdev->dev, "failed to enable PCI device\n"); 1834 return ret; 1835 } 1836 1837 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1838 if (ret) { 1839 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 1840 goto err_disable_device; 1841 } 1842 1843 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 1844 if (ret) { 1845 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 1846 goto err_disable_device; 1847 } 1848 1849 pci_set_master(pdev); 1850 hw = &hdev->hw; 1851 hw->hdev = hdev; 1852 hw->io_base = pci_iomap(pdev, 2, 0); 1853 if (!hw->io_base) { 1854 dev_err(&pdev->dev, "can't map configuration register space\n"); 1855 ret = -ENOMEM; 1856 goto err_clr_master; 1857 } 1858 1859 return 0; 1860 1861 err_clr_master: 1862 pci_clear_master(pdev); 1863 pci_release_regions(pdev); 1864 err_disable_device: 1865 pci_disable_device(pdev); 1866 1867 return ret; 1868 } 1869 1870 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 1871 { 1872 struct pci_dev *pdev = hdev->pdev; 1873 1874 pci_iounmap(pdev, hdev->hw.io_base); 1875 pci_clear_master(pdev); 1876 pci_release_regions(pdev); 1877 pci_disable_device(pdev); 1878 } 1879 1880 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 1881 { 1882 struct hclgevf_query_res_cmd *req; 1883 struct hclgevf_desc desc; 1884 int ret; 1885 1886 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 1887 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1888 if (ret) { 1889 dev_err(&hdev->pdev->dev, 1890 "query vf resource failed, ret = %d.\n", ret); 1891 return ret; 1892 } 1893 1894 req = (struct hclgevf_query_res_cmd *)desc.data; 1895 1896 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 1897 hdev->roce_base_msix_offset = 1898 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 1899 HCLGEVF_MSIX_OFT_ROCEE_M, 1900 HCLGEVF_MSIX_OFT_ROCEE_S); 1901 hdev->num_roce_msix = 1902 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 1903 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 1904 1905 /* VF should have NIC vectors and Roce vectors, NIC vectors 1906 * are queued before Roce vectors. The offset is fixed to 64. 1907 */ 1908 hdev->num_msi = hdev->num_roce_msix + 1909 hdev->roce_base_msix_offset; 1910 } else { 1911 hdev->num_msi = 1912 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 1913 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 1914 } 1915 1916 return 0; 1917 } 1918 1919 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 1920 { 1921 struct pci_dev *pdev = hdev->pdev; 1922 int ret; 1923 1924 ret = hclgevf_cmd_init(hdev); 1925 if (ret) { 1926 dev_err(&pdev->dev, "cmd failed %d\n", ret); 1927 return ret; 1928 } 1929 1930 ret = hclgevf_rss_init_hw(hdev); 1931 if (ret) { 1932 dev_err(&hdev->pdev->dev, 1933 "failed(%d) to initialize RSS\n", ret); 1934 return ret; 1935 } 1936 1937 ret = hclgevf_init_vlan_config(hdev); 1938 if (ret) { 1939 dev_err(&hdev->pdev->dev, 1940 "failed(%d) to initialize VLAN config\n", ret); 1941 return ret; 1942 } 1943 1944 dev_info(&hdev->pdev->dev, "Reset done\n"); 1945 1946 return 0; 1947 } 1948 1949 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 1950 { 1951 struct pci_dev *pdev = hdev->pdev; 1952 int ret; 1953 1954 ret = hclgevf_pci_init(hdev); 1955 if (ret) { 1956 dev_err(&pdev->dev, "PCI initialization failed\n"); 1957 return ret; 1958 } 1959 1960 ret = hclgevf_cmd_queue_init(hdev); 1961 if (ret) { 1962 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); 1963 goto err_cmd_queue_init; 1964 } 1965 1966 ret = hclgevf_cmd_init(hdev); 1967 if (ret) 1968 goto err_cmd_init; 1969 1970 /* Get vf resource */ 1971 ret = hclgevf_query_vf_resource(hdev); 1972 if (ret) { 1973 dev_err(&hdev->pdev->dev, 1974 "Query vf status error, ret = %d.\n", ret); 1975 goto err_cmd_init; 1976 } 1977 1978 ret = hclgevf_init_msi(hdev); 1979 if (ret) { 1980 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 1981 goto err_cmd_init; 1982 } 1983 1984 hclgevf_state_init(hdev); 1985 hdev->reset_level = HNAE3_VF_RESET; 1986 1987 ret = hclgevf_misc_irq_init(hdev); 1988 if (ret) { 1989 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 1990 ret); 1991 goto err_misc_irq_init; 1992 } 1993 1994 ret = hclgevf_configure(hdev); 1995 if (ret) { 1996 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 1997 goto err_config; 1998 } 1999 2000 ret = hclgevf_alloc_tqps(hdev); 2001 if (ret) { 2002 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2003 goto err_config; 2004 } 2005 2006 ret = hclgevf_set_handle_info(hdev); 2007 if (ret) { 2008 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2009 goto err_config; 2010 } 2011 2012 /* Initialize RSS for this VF */ 2013 ret = hclgevf_rss_init_hw(hdev); 2014 if (ret) { 2015 dev_err(&hdev->pdev->dev, 2016 "failed(%d) to initialize RSS\n", ret); 2017 goto err_config; 2018 } 2019 2020 ret = hclgevf_init_vlan_config(hdev); 2021 if (ret) { 2022 dev_err(&hdev->pdev->dev, 2023 "failed(%d) to initialize VLAN config\n", ret); 2024 goto err_config; 2025 } 2026 2027 hdev->last_reset_time = jiffies; 2028 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 2029 2030 return 0; 2031 2032 err_config: 2033 hclgevf_misc_irq_uninit(hdev); 2034 err_misc_irq_init: 2035 hclgevf_state_uninit(hdev); 2036 hclgevf_uninit_msi(hdev); 2037 err_cmd_init: 2038 hclgevf_cmd_uninit(hdev); 2039 err_cmd_queue_init: 2040 hclgevf_pci_uninit(hdev); 2041 return ret; 2042 } 2043 2044 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2045 { 2046 hclgevf_state_uninit(hdev); 2047 hclgevf_misc_irq_uninit(hdev); 2048 hclgevf_cmd_uninit(hdev); 2049 hclgevf_uninit_msi(hdev); 2050 hclgevf_pci_uninit(hdev); 2051 } 2052 2053 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2054 { 2055 struct pci_dev *pdev = ae_dev->pdev; 2056 int ret; 2057 2058 ret = hclgevf_alloc_hdev(ae_dev); 2059 if (ret) { 2060 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2061 return ret; 2062 } 2063 2064 ret = hclgevf_init_hdev(ae_dev->priv); 2065 if (ret) 2066 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2067 2068 return ret; 2069 } 2070 2071 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2072 { 2073 struct hclgevf_dev *hdev = ae_dev->priv; 2074 2075 hclgevf_uninit_hdev(hdev); 2076 ae_dev->priv = NULL; 2077 } 2078 2079 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2080 { 2081 struct hnae3_handle *nic = &hdev->nic; 2082 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2083 2084 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 2085 } 2086 2087 /** 2088 * hclgevf_get_channels - Get the current channels enabled and max supported. 2089 * @handle: hardware information for network interface 2090 * @ch: ethtool channels structure 2091 * 2092 * We don't support separate tx and rx queues as channels. The other count 2093 * represents how many queues are being used for control. max_combined counts 2094 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2095 * q_vectors since we support a lot more queue pairs than q_vectors. 2096 **/ 2097 static void hclgevf_get_channels(struct hnae3_handle *handle, 2098 struct ethtool_channels *ch) 2099 { 2100 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2101 2102 ch->max_combined = hclgevf_get_max_channels(hdev); 2103 ch->other_count = 0; 2104 ch->max_other = 0; 2105 ch->combined_count = hdev->num_tqps; 2106 } 2107 2108 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2109 u16 *alloc_tqps, u16 *max_rss_size) 2110 { 2111 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2112 2113 *alloc_tqps = hdev->num_tqps; 2114 *max_rss_size = hdev->rss_size_max; 2115 } 2116 2117 static int hclgevf_get_status(struct hnae3_handle *handle) 2118 { 2119 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2120 2121 return hdev->hw.mac.link; 2122 } 2123 2124 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2125 u8 *auto_neg, u32 *speed, 2126 u8 *duplex) 2127 { 2128 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2129 2130 if (speed) 2131 *speed = hdev->hw.mac.speed; 2132 if (duplex) 2133 *duplex = hdev->hw.mac.duplex; 2134 if (auto_neg) 2135 *auto_neg = AUTONEG_DISABLE; 2136 } 2137 2138 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 2139 u8 duplex) 2140 { 2141 hdev->hw.mac.speed = speed; 2142 hdev->hw.mac.duplex = duplex; 2143 } 2144 2145 static void hclgevf_get_media_type(struct hnae3_handle *handle, 2146 u8 *media_type) 2147 { 2148 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2149 if (media_type) 2150 *media_type = hdev->hw.mac.media_type; 2151 } 2152 2153 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 2154 { 2155 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2156 2157 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 2158 } 2159 2160 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 2161 { 2162 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2163 2164 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2165 } 2166 2167 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 2168 { 2169 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2170 2171 return hdev->reset_count; 2172 } 2173 2174 static const struct hnae3_ae_ops hclgevf_ops = { 2175 .init_ae_dev = hclgevf_init_ae_dev, 2176 .uninit_ae_dev = hclgevf_uninit_ae_dev, 2177 .init_client_instance = hclgevf_init_client_instance, 2178 .uninit_client_instance = hclgevf_uninit_client_instance, 2179 .start = hclgevf_ae_start, 2180 .stop = hclgevf_ae_stop, 2181 .map_ring_to_vector = hclgevf_map_ring_to_vector, 2182 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2183 .get_vector = hclgevf_get_vector, 2184 .put_vector = hclgevf_put_vector, 2185 .reset_queue = hclgevf_reset_tqp, 2186 .set_promisc_mode = hclgevf_set_promisc_mode, 2187 .get_mac_addr = hclgevf_get_mac_addr, 2188 .set_mac_addr = hclgevf_set_mac_addr, 2189 .add_uc_addr = hclgevf_add_uc_addr, 2190 .rm_uc_addr = hclgevf_rm_uc_addr, 2191 .add_mc_addr = hclgevf_add_mc_addr, 2192 .rm_mc_addr = hclgevf_rm_mc_addr, 2193 .get_stats = hclgevf_get_stats, 2194 .update_stats = hclgevf_update_stats, 2195 .get_strings = hclgevf_get_strings, 2196 .get_sset_count = hclgevf_get_sset_count, 2197 .get_rss_key_size = hclgevf_get_rss_key_size, 2198 .get_rss_indir_size = hclgevf_get_rss_indir_size, 2199 .get_rss = hclgevf_get_rss, 2200 .set_rss = hclgevf_set_rss, 2201 .get_rss_tuple = hclgevf_get_rss_tuple, 2202 .set_rss_tuple = hclgevf_set_rss_tuple, 2203 .get_tc_size = hclgevf_get_tc_size, 2204 .get_fw_version = hclgevf_get_fw_version, 2205 .set_vlan_filter = hclgevf_set_vlan_filter, 2206 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 2207 .reset_event = hclgevf_reset_event, 2208 .set_default_reset_request = hclgevf_set_def_reset_request, 2209 .get_channels = hclgevf_get_channels, 2210 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2211 .get_status = hclgevf_get_status, 2212 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2213 .get_media_type = hclgevf_get_media_type, 2214 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 2215 .ae_dev_resetting = hclgevf_ae_dev_resetting, 2216 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 2217 }; 2218 2219 static struct hnae3_ae_algo ae_algovf = { 2220 .ops = &hclgevf_ops, 2221 .pdev_id_table = ae_algovf_pci_tbl, 2222 }; 2223 2224 static int hclgevf_init(void) 2225 { 2226 pr_info("%s is initializing\n", HCLGEVF_NAME); 2227 2228 hnae3_register_ae_algo(&ae_algovf); 2229 2230 return 0; 2231 } 2232 2233 static void hclgevf_exit(void) 2234 { 2235 hnae3_unregister_ae_algo(&ae_algovf); 2236 } 2237 module_init(hclgevf_init); 2238 module_exit(hclgevf_exit); 2239 2240 MODULE_LICENSE("GPL"); 2241 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2242 MODULE_DESCRIPTION("HCLGEVF Driver"); 2243 MODULE_VERSION(HCLGEVF_MOD_VERSION); 2244