1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 25 26 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 27 struct hnae3_handle *handle) 28 { 29 return container_of(handle, struct hclgevf_dev, nic); 30 } 31 32 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 33 { 34 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 35 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 36 struct hclgevf_desc desc; 37 struct hclgevf_tqp *tqp; 38 int status; 39 int i; 40 41 for (i = 0; i < kinfo->num_tqps; i++) { 42 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 43 hclgevf_cmd_setup_basic_desc(&desc, 44 HCLGEVF_OPC_QUERY_RX_STATUS, 45 true); 46 47 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 48 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 49 if (status) { 50 dev_err(&hdev->pdev->dev, 51 "Query tqp stat fail, status = %d,queue = %d\n", 52 status, i); 53 return status; 54 } 55 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 56 le32_to_cpu(desc.data[1]); 57 58 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 59 true); 60 61 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 62 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 63 if (status) { 64 dev_err(&hdev->pdev->dev, 65 "Query tqp stat fail, status = %d,queue = %d\n", 66 status, i); 67 return status; 68 } 69 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 70 le32_to_cpu(desc.data[1]); 71 } 72 73 return 0; 74 } 75 76 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 77 { 78 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 79 struct hclgevf_tqp *tqp; 80 u64 *buff = data; 81 int i; 82 83 for (i = 0; i < kinfo->num_tqps; i++) { 84 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 85 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 86 } 87 for (i = 0; i < kinfo->num_tqps; i++) { 88 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 89 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 90 } 91 92 return buff; 93 } 94 95 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 96 { 97 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 98 99 return kinfo->num_tqps * 2; 100 } 101 102 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 103 { 104 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 105 u8 *buff = data; 106 int i = 0; 107 108 for (i = 0; i < kinfo->num_tqps; i++) { 109 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 110 struct hclgevf_tqp, q); 111 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 112 tqp->index); 113 buff += ETH_GSTRING_LEN; 114 } 115 116 for (i = 0; i < kinfo->num_tqps; i++) { 117 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 118 struct hclgevf_tqp, q); 119 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 120 tqp->index); 121 buff += ETH_GSTRING_LEN; 122 } 123 124 return buff; 125 } 126 127 static void hclgevf_update_stats(struct hnae3_handle *handle, 128 struct net_device_stats *net_stats) 129 { 130 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 131 int status; 132 133 status = hclgevf_tqps_update_stats(handle); 134 if (status) 135 dev_err(&hdev->pdev->dev, 136 "VF update of TQPS stats fail, status = %d.\n", 137 status); 138 } 139 140 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 141 { 142 if (strset == ETH_SS_TEST) 143 return -EOPNOTSUPP; 144 else if (strset == ETH_SS_STATS) 145 return hclgevf_tqps_get_sset_count(handle, strset); 146 147 return 0; 148 } 149 150 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 151 u8 *data) 152 { 153 u8 *p = (char *)data; 154 155 if (strset == ETH_SS_STATS) 156 p = hclgevf_tqps_get_strings(handle, p); 157 } 158 159 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 160 { 161 hclgevf_tqps_get_stats(handle, data); 162 } 163 164 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 165 { 166 u8 resp_msg; 167 int status; 168 169 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 170 true, &resp_msg, sizeof(u8)); 171 if (status) { 172 dev_err(&hdev->pdev->dev, 173 "VF request to get TC info from PF failed %d", 174 status); 175 return status; 176 } 177 178 hdev->hw_tc_map = resp_msg; 179 180 return 0; 181 } 182 183 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 184 { 185 #define HCLGEVF_TQPS_RSS_INFO_LEN 8 186 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 187 int status; 188 189 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 190 true, resp_msg, 191 HCLGEVF_TQPS_RSS_INFO_LEN); 192 if (status) { 193 dev_err(&hdev->pdev->dev, 194 "VF request to get tqp info from PF failed %d", 195 status); 196 return status; 197 } 198 199 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 200 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 201 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 202 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 203 204 return 0; 205 } 206 207 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 208 { 209 struct hclgevf_tqp *tqp; 210 int i; 211 212 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 213 sizeof(struct hclgevf_tqp), GFP_KERNEL); 214 if (!hdev->htqp) 215 return -ENOMEM; 216 217 tqp = hdev->htqp; 218 219 for (i = 0; i < hdev->num_tqps; i++) { 220 tqp->dev = &hdev->pdev->dev; 221 tqp->index = i; 222 223 tqp->q.ae_algo = &ae_algovf; 224 tqp->q.buf_size = hdev->rx_buf_len; 225 tqp->q.desc_num = hdev->num_desc; 226 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 227 i * HCLGEVF_TQP_REG_SIZE; 228 229 tqp++; 230 } 231 232 return 0; 233 } 234 235 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 236 { 237 struct hnae3_handle *nic = &hdev->nic; 238 struct hnae3_knic_private_info *kinfo; 239 u16 new_tqps = hdev->num_tqps; 240 int i; 241 242 kinfo = &nic->kinfo; 243 kinfo->num_tc = 0; 244 kinfo->num_desc = hdev->num_desc; 245 kinfo->rx_buf_len = hdev->rx_buf_len; 246 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 247 if (hdev->hw_tc_map & BIT(i)) 248 kinfo->num_tc++; 249 250 kinfo->rss_size 251 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 252 new_tqps = kinfo->rss_size * kinfo->num_tc; 253 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 254 255 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 256 sizeof(struct hnae3_queue *), GFP_KERNEL); 257 if (!kinfo->tqp) 258 return -ENOMEM; 259 260 for (i = 0; i < kinfo->num_tqps; i++) { 261 hdev->htqp[i].q.handle = &hdev->nic; 262 hdev->htqp[i].q.tqp_index = i; 263 kinfo->tqp[i] = &hdev->htqp[i].q; 264 } 265 266 return 0; 267 } 268 269 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 270 { 271 int status; 272 u8 resp_msg; 273 274 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 275 0, false, &resp_msg, sizeof(u8)); 276 if (status) 277 dev_err(&hdev->pdev->dev, 278 "VF failed to fetch link status(%d) from PF", status); 279 } 280 281 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 282 { 283 struct hnae3_handle *handle = &hdev->nic; 284 struct hnae3_client *client; 285 286 client = handle->client; 287 288 link_state = 289 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 290 291 if (link_state != hdev->hw.mac.link) { 292 client->ops->link_status_change(handle, !!link_state); 293 hdev->hw.mac.link = link_state; 294 } 295 } 296 297 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 298 { 299 struct hnae3_handle *nic = &hdev->nic; 300 int ret; 301 302 nic->ae_algo = &ae_algovf; 303 nic->pdev = hdev->pdev; 304 nic->numa_node_mask = hdev->numa_node_mask; 305 nic->flags |= HNAE3_SUPPORT_VF; 306 307 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 308 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 309 hdev->ae_dev->dev_type); 310 return -EINVAL; 311 } 312 313 ret = hclgevf_knic_setup(hdev); 314 if (ret) 315 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 316 ret); 317 return ret; 318 } 319 320 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 321 { 322 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 323 dev_warn(&hdev->pdev->dev, 324 "vector(vector_id %d) has been freed.\n", vector_id); 325 return; 326 } 327 328 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 329 hdev->num_msi_left += 1; 330 hdev->num_msi_used -= 1; 331 } 332 333 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 334 struct hnae3_vector_info *vector_info) 335 { 336 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 337 struct hnae3_vector_info *vector = vector_info; 338 int alloc = 0; 339 int i, j; 340 341 vector_num = min(hdev->num_msi_left, vector_num); 342 343 for (j = 0; j < vector_num; j++) { 344 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 345 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 346 vector->vector = pci_irq_vector(hdev->pdev, i); 347 vector->io_addr = hdev->hw.io_base + 348 HCLGEVF_VECTOR_REG_BASE + 349 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 350 hdev->vector_status[i] = 0; 351 hdev->vector_irq[i] = vector->vector; 352 353 vector++; 354 alloc++; 355 356 break; 357 } 358 } 359 } 360 hdev->num_msi_left -= alloc; 361 hdev->num_msi_used += alloc; 362 363 return alloc; 364 } 365 366 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 367 { 368 int i; 369 370 for (i = 0; i < hdev->num_msi; i++) 371 if (vector == hdev->vector_irq[i]) 372 return i; 373 374 return -EINVAL; 375 } 376 377 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 378 const u8 hfunc, const u8 *key) 379 { 380 struct hclgevf_rss_config_cmd *req; 381 struct hclgevf_desc desc; 382 int key_offset; 383 int key_size; 384 int ret; 385 386 req = (struct hclgevf_rss_config_cmd *)desc.data; 387 388 for (key_offset = 0; key_offset < 3; key_offset++) { 389 hclgevf_cmd_setup_basic_desc(&desc, 390 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 391 false); 392 393 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 394 req->hash_config |= 395 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 396 397 if (key_offset == 2) 398 key_size = 399 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 400 else 401 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 402 403 memcpy(req->hash_key, 404 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 405 406 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 407 if (ret) { 408 dev_err(&hdev->pdev->dev, 409 "Configure RSS config fail, status = %d\n", 410 ret); 411 return ret; 412 } 413 } 414 415 return 0; 416 } 417 418 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 419 { 420 return HCLGEVF_RSS_KEY_SIZE; 421 } 422 423 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 424 { 425 return HCLGEVF_RSS_IND_TBL_SIZE; 426 } 427 428 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 429 { 430 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 431 struct hclgevf_rss_indirection_table_cmd *req; 432 struct hclgevf_desc desc; 433 int status; 434 int i, j; 435 436 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 437 438 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 439 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 440 false); 441 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 442 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 443 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 444 req->rss_result[j] = 445 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 446 447 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 448 if (status) { 449 dev_err(&hdev->pdev->dev, 450 "VF failed(=%d) to set RSS indirection table\n", 451 status); 452 return status; 453 } 454 } 455 456 return 0; 457 } 458 459 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 460 { 461 struct hclgevf_rss_tc_mode_cmd *req; 462 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 463 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 464 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 465 struct hclgevf_desc desc; 466 u16 roundup_size; 467 int status; 468 int i; 469 470 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 471 472 roundup_size = roundup_pow_of_two(rss_size); 473 roundup_size = ilog2(roundup_size); 474 475 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 476 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 477 tc_size[i] = roundup_size; 478 tc_offset[i] = rss_size * i; 479 } 480 481 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 482 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 483 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 484 (tc_valid[i] & 0x1)); 485 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 486 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 487 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 488 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 489 } 490 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 491 if (status) 492 dev_err(&hdev->pdev->dev, 493 "VF failed(=%d) to set rss tc mode\n", status); 494 495 return status; 496 } 497 498 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 499 u8 *hfunc) 500 { 501 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 502 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 503 int i; 504 505 if (handle->pdev->revision >= 0x21) { 506 /* Get hash algorithm */ 507 if (hfunc) { 508 switch (rss_cfg->hash_algo) { 509 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 510 *hfunc = ETH_RSS_HASH_TOP; 511 break; 512 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 513 *hfunc = ETH_RSS_HASH_XOR; 514 break; 515 default: 516 *hfunc = ETH_RSS_HASH_UNKNOWN; 517 break; 518 } 519 } 520 521 /* Get the RSS Key required by the user */ 522 if (key) 523 memcpy(key, rss_cfg->rss_hash_key, 524 HCLGEVF_RSS_KEY_SIZE); 525 } 526 527 if (indir) 528 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 529 indir[i] = rss_cfg->rss_indirection_tbl[i]; 530 531 return 0; 532 } 533 534 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 535 const u8 *key, const u8 hfunc) 536 { 537 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 538 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 539 int ret, i; 540 541 if (handle->pdev->revision >= 0x21) { 542 /* Set the RSS Hash Key if specififed by the user */ 543 if (key) { 544 switch (hfunc) { 545 case ETH_RSS_HASH_TOP: 546 rss_cfg->hash_algo = 547 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 548 break; 549 case ETH_RSS_HASH_XOR: 550 rss_cfg->hash_algo = 551 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 552 break; 553 case ETH_RSS_HASH_NO_CHANGE: 554 break; 555 default: 556 return -EINVAL; 557 } 558 559 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 560 key); 561 if (ret) 562 return ret; 563 564 /* Update the shadow RSS key with user specified qids */ 565 memcpy(rss_cfg->rss_hash_key, key, 566 HCLGEVF_RSS_KEY_SIZE); 567 } 568 } 569 570 /* update the shadow RSS table with user specified qids */ 571 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 572 rss_cfg->rss_indirection_tbl[i] = indir[i]; 573 574 /* update the hardware */ 575 return hclgevf_set_rss_indir_table(hdev); 576 } 577 578 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 579 { 580 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 581 582 if (nfc->data & RXH_L4_B_2_3) 583 hash_sets |= HCLGEVF_D_PORT_BIT; 584 else 585 hash_sets &= ~HCLGEVF_D_PORT_BIT; 586 587 if (nfc->data & RXH_IP_SRC) 588 hash_sets |= HCLGEVF_S_IP_BIT; 589 else 590 hash_sets &= ~HCLGEVF_S_IP_BIT; 591 592 if (nfc->data & RXH_IP_DST) 593 hash_sets |= HCLGEVF_D_IP_BIT; 594 else 595 hash_sets &= ~HCLGEVF_D_IP_BIT; 596 597 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 598 hash_sets |= HCLGEVF_V_TAG_BIT; 599 600 return hash_sets; 601 } 602 603 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 604 struct ethtool_rxnfc *nfc) 605 { 606 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 607 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 608 struct hclgevf_rss_input_tuple_cmd *req; 609 struct hclgevf_desc desc; 610 u8 tuple_sets; 611 int ret; 612 613 if (handle->pdev->revision == 0x20) 614 return -EOPNOTSUPP; 615 616 if (nfc->data & 617 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 618 return -EINVAL; 619 620 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 621 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 622 623 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 624 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 625 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 626 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 627 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 628 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 629 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 630 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 631 632 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 633 switch (nfc->flow_type) { 634 case TCP_V4_FLOW: 635 req->ipv4_tcp_en = tuple_sets; 636 break; 637 case TCP_V6_FLOW: 638 req->ipv6_tcp_en = tuple_sets; 639 break; 640 case UDP_V4_FLOW: 641 req->ipv4_udp_en = tuple_sets; 642 break; 643 case UDP_V6_FLOW: 644 req->ipv6_udp_en = tuple_sets; 645 break; 646 case SCTP_V4_FLOW: 647 req->ipv4_sctp_en = tuple_sets; 648 break; 649 case SCTP_V6_FLOW: 650 if ((nfc->data & RXH_L4_B_0_1) || 651 (nfc->data & RXH_L4_B_2_3)) 652 return -EINVAL; 653 654 req->ipv6_sctp_en = tuple_sets; 655 break; 656 case IPV4_FLOW: 657 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 658 break; 659 case IPV6_FLOW: 660 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 661 break; 662 default: 663 return -EINVAL; 664 } 665 666 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 667 if (ret) { 668 dev_err(&hdev->pdev->dev, 669 "Set rss tuple fail, status = %d\n", ret); 670 return ret; 671 } 672 673 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 674 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 675 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 676 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 677 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 678 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 679 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 680 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 681 return 0; 682 } 683 684 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 685 struct ethtool_rxnfc *nfc) 686 { 687 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 688 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 689 u8 tuple_sets; 690 691 if (handle->pdev->revision == 0x20) 692 return -EOPNOTSUPP; 693 694 nfc->data = 0; 695 696 switch (nfc->flow_type) { 697 case TCP_V4_FLOW: 698 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 699 break; 700 case UDP_V4_FLOW: 701 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 702 break; 703 case TCP_V6_FLOW: 704 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 705 break; 706 case UDP_V6_FLOW: 707 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 708 break; 709 case SCTP_V4_FLOW: 710 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 711 break; 712 case SCTP_V6_FLOW: 713 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 714 break; 715 case IPV4_FLOW: 716 case IPV6_FLOW: 717 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 718 break; 719 default: 720 return -EINVAL; 721 } 722 723 if (!tuple_sets) 724 return 0; 725 726 if (tuple_sets & HCLGEVF_D_PORT_BIT) 727 nfc->data |= RXH_L4_B_2_3; 728 if (tuple_sets & HCLGEVF_S_PORT_BIT) 729 nfc->data |= RXH_L4_B_0_1; 730 if (tuple_sets & HCLGEVF_D_IP_BIT) 731 nfc->data |= RXH_IP_DST; 732 if (tuple_sets & HCLGEVF_S_IP_BIT) 733 nfc->data |= RXH_IP_SRC; 734 735 return 0; 736 } 737 738 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 739 struct hclgevf_rss_cfg *rss_cfg) 740 { 741 struct hclgevf_rss_input_tuple_cmd *req; 742 struct hclgevf_desc desc; 743 int ret; 744 745 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 746 747 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 748 749 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 750 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 751 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 752 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 753 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 754 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 755 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 756 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 757 758 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 759 if (ret) 760 dev_err(&hdev->pdev->dev, 761 "Configure rss input fail, status = %d\n", ret); 762 return ret; 763 } 764 765 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 766 { 767 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 768 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 769 770 return rss_cfg->rss_size; 771 } 772 773 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 774 int vector_id, 775 struct hnae3_ring_chain_node *ring_chain) 776 { 777 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 778 struct hnae3_ring_chain_node *node; 779 struct hclge_mbx_vf_to_pf_cmd *req; 780 struct hclgevf_desc desc; 781 int i = 0; 782 int status; 783 u8 type; 784 785 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 786 787 for (node = ring_chain; node; node = node->next) { 788 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 789 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 790 791 if (i == 0) { 792 hclgevf_cmd_setup_basic_desc(&desc, 793 HCLGEVF_OPC_MBX_VF_TO_PF, 794 false); 795 type = en ? 796 HCLGE_MBX_MAP_RING_TO_VECTOR : 797 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 798 req->msg[0] = type; 799 req->msg[1] = vector_id; 800 } 801 802 req->msg[idx_offset] = 803 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 804 req->msg[idx_offset + 1] = node->tqp_index; 805 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 806 HNAE3_RING_GL_IDX_M, 807 HNAE3_RING_GL_IDX_S); 808 809 i++; 810 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 811 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 812 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 813 !node->next) { 814 req->msg[2] = i; 815 816 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 817 if (status) { 818 dev_err(&hdev->pdev->dev, 819 "Map TQP fail, status is %d.\n", 820 status); 821 return status; 822 } 823 i = 0; 824 hclgevf_cmd_setup_basic_desc(&desc, 825 HCLGEVF_OPC_MBX_VF_TO_PF, 826 false); 827 req->msg[0] = type; 828 req->msg[1] = vector_id; 829 } 830 } 831 832 return 0; 833 } 834 835 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 836 struct hnae3_ring_chain_node *ring_chain) 837 { 838 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 839 int vector_id; 840 841 vector_id = hclgevf_get_vector_index(hdev, vector); 842 if (vector_id < 0) { 843 dev_err(&handle->pdev->dev, 844 "Get vector index fail. ret =%d\n", vector_id); 845 return vector_id; 846 } 847 848 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 849 } 850 851 static int hclgevf_unmap_ring_from_vector( 852 struct hnae3_handle *handle, 853 int vector, 854 struct hnae3_ring_chain_node *ring_chain) 855 { 856 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 857 int ret, vector_id; 858 859 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 860 return 0; 861 862 vector_id = hclgevf_get_vector_index(hdev, vector); 863 if (vector_id < 0) { 864 dev_err(&handle->pdev->dev, 865 "Get vector index fail. ret =%d\n", vector_id); 866 return vector_id; 867 } 868 869 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 870 if (ret) 871 dev_err(&handle->pdev->dev, 872 "Unmap ring from vector fail. vector=%d, ret =%d\n", 873 vector_id, 874 ret); 875 876 return ret; 877 } 878 879 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 880 { 881 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 882 int vector_id; 883 884 vector_id = hclgevf_get_vector_index(hdev, vector); 885 if (vector_id < 0) { 886 dev_err(&handle->pdev->dev, 887 "hclgevf_put_vector get vector index fail. ret =%d\n", 888 vector_id); 889 return vector_id; 890 } 891 892 hclgevf_free_vector(hdev, vector_id); 893 894 return 0; 895 } 896 897 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 898 bool en_uc_pmc, bool en_mc_pmc) 899 { 900 struct hclge_mbx_vf_to_pf_cmd *req; 901 struct hclgevf_desc desc; 902 int status; 903 904 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 905 906 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 907 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 908 req->msg[1] = en_uc_pmc ? 1 : 0; 909 req->msg[2] = en_mc_pmc ? 1 : 0; 910 911 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 912 if (status) 913 dev_err(&hdev->pdev->dev, 914 "Set promisc mode fail, status is %d.\n", status); 915 916 return status; 917 } 918 919 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, 920 bool en_uc_pmc, bool en_mc_pmc) 921 { 922 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 923 924 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); 925 } 926 927 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 928 int stream_id, bool enable) 929 { 930 struct hclgevf_cfg_com_tqp_queue_cmd *req; 931 struct hclgevf_desc desc; 932 int status; 933 934 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 935 936 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 937 false); 938 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 939 req->stream_id = cpu_to_le16(stream_id); 940 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 941 942 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 943 if (status) 944 dev_err(&hdev->pdev->dev, 945 "TQP enable fail, status =%d.\n", status); 946 947 return status; 948 } 949 950 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 951 { 952 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 953 struct hclgevf_tqp *tqp; 954 int i; 955 956 for (i = 0; i < kinfo->num_tqps; i++) { 957 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 958 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 959 } 960 } 961 962 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 963 { 964 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 965 966 ether_addr_copy(p, hdev->hw.mac.mac_addr); 967 } 968 969 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 970 bool is_first) 971 { 972 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 973 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 974 u8 *new_mac_addr = (u8 *)p; 975 u8 msg_data[ETH_ALEN * 2]; 976 u16 subcode; 977 int status; 978 979 ether_addr_copy(msg_data, new_mac_addr); 980 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 981 982 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 983 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 984 985 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 986 subcode, msg_data, ETH_ALEN * 2, 987 true, NULL, 0); 988 if (!status) 989 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 990 991 return status; 992 } 993 994 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 995 const unsigned char *addr) 996 { 997 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 998 999 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1000 HCLGE_MBX_MAC_VLAN_UC_ADD, 1001 addr, ETH_ALEN, false, NULL, 0); 1002 } 1003 1004 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1005 const unsigned char *addr) 1006 { 1007 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1008 1009 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1010 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1011 addr, ETH_ALEN, false, NULL, 0); 1012 } 1013 1014 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1015 const unsigned char *addr) 1016 { 1017 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1018 1019 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1020 HCLGE_MBX_MAC_VLAN_MC_ADD, 1021 addr, ETH_ALEN, false, NULL, 0); 1022 } 1023 1024 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1025 const unsigned char *addr) 1026 { 1027 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1028 1029 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1030 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1031 addr, ETH_ALEN, false, NULL, 0); 1032 } 1033 1034 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1035 __be16 proto, u16 vlan_id, 1036 bool is_kill) 1037 { 1038 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1039 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1040 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1041 1042 if (vlan_id > 4095) 1043 return -EINVAL; 1044 1045 if (proto != htons(ETH_P_8021Q)) 1046 return -EPROTONOSUPPORT; 1047 1048 msg_data[0] = is_kill; 1049 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1050 memcpy(&msg_data[3], &proto, sizeof(proto)); 1051 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1052 HCLGE_MBX_VLAN_FILTER, msg_data, 1053 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 1054 } 1055 1056 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1057 { 1058 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1059 u8 msg_data; 1060 1061 msg_data = enable ? 1 : 0; 1062 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1063 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1064 1, false, NULL, 0); 1065 } 1066 1067 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1068 { 1069 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1070 u8 msg_data[2]; 1071 int ret; 1072 1073 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 1074 1075 /* disable vf queue before send queue reset msg to PF */ 1076 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1077 if (ret) 1078 return ret; 1079 1080 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1081 2, true, NULL, 0); 1082 } 1083 1084 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1085 enum hnae3_reset_notify_type type) 1086 { 1087 struct hnae3_client *client = hdev->nic_client; 1088 struct hnae3_handle *handle = &hdev->nic; 1089 int ret; 1090 1091 if (!client->ops->reset_notify) 1092 return -EOPNOTSUPP; 1093 1094 ret = client->ops->reset_notify(handle, type); 1095 if (ret) 1096 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1097 type, ret); 1098 1099 return ret; 1100 } 1101 1102 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1103 { 1104 #define HCLGEVF_RESET_WAIT_US 20000 1105 #define HCLGEVF_RESET_WAIT_CNT 2000 1106 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1107 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1108 1109 u32 val; 1110 int ret; 1111 1112 /* wait to check the hardware reset completion status */ 1113 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1114 dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val); 1115 1116 ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val, 1117 !(val & HCLGEVF_RST_ING_BITS), 1118 HCLGEVF_RESET_WAIT_US, 1119 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1120 1121 /* hardware completion status should be available by this time */ 1122 if (ret) { 1123 dev_err(&hdev->pdev->dev, 1124 "could'nt get reset done status from h/w, timeout!\n"); 1125 return ret; 1126 } 1127 1128 /* we will wait a bit more to let reset of the stack to complete. This 1129 * might happen in case reset assertion was made by PF. Yes, this also 1130 * means we might end up waiting bit more even for VF reset. 1131 */ 1132 msleep(5000); 1133 1134 return 0; 1135 } 1136 1137 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1138 { 1139 int ret; 1140 1141 /* uninitialize the nic client */ 1142 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1143 if (ret) 1144 return ret; 1145 1146 /* re-initialize the hclge device */ 1147 ret = hclgevf_reset_hdev(hdev); 1148 if (ret) { 1149 dev_err(&hdev->pdev->dev, 1150 "hclge device re-init failed, VF is disabled!\n"); 1151 return ret; 1152 } 1153 1154 /* bring up the nic client again */ 1155 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1156 if (ret) 1157 return ret; 1158 1159 return 0; 1160 } 1161 1162 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1163 { 1164 int ret = 0; 1165 1166 switch (hdev->reset_type) { 1167 case HNAE3_VF_FUNC_RESET: 1168 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1169 0, true, NULL, sizeof(u8)); 1170 break; 1171 default: 1172 break; 1173 } 1174 1175 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1176 1177 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1178 hdev->reset_type, ret); 1179 1180 return ret; 1181 } 1182 1183 static int hclgevf_reset(struct hclgevf_dev *hdev) 1184 { 1185 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1186 int ret; 1187 1188 /* Initialize ae_dev reset status as well, in case enet layer wants to 1189 * know if device is undergoing reset 1190 */ 1191 ae_dev->reset_type = hdev->reset_type; 1192 hdev->reset_count++; 1193 rtnl_lock(); 1194 1195 /* bring down the nic to stop any ongoing TX/RX */ 1196 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1197 if (ret) 1198 goto err_reset_lock; 1199 1200 rtnl_unlock(); 1201 1202 ret = hclgevf_reset_prepare_wait(hdev); 1203 if (ret) 1204 goto err_reset; 1205 1206 /* check if VF could successfully fetch the hardware reset completion 1207 * status from the hardware 1208 */ 1209 ret = hclgevf_reset_wait(hdev); 1210 if (ret) { 1211 /* can't do much in this situation, will disable VF */ 1212 dev_err(&hdev->pdev->dev, 1213 "VF failed(=%d) to fetch H/W reset completion status\n", 1214 ret); 1215 goto err_reset; 1216 } 1217 1218 rtnl_lock(); 1219 1220 /* now, re-initialize the nic client and ae device*/ 1221 ret = hclgevf_reset_stack(hdev); 1222 if (ret) { 1223 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1224 goto err_reset_lock; 1225 } 1226 1227 /* bring up the nic to enable TX/RX again */ 1228 ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1229 if (ret) 1230 goto err_reset_lock; 1231 1232 rtnl_unlock(); 1233 1234 return ret; 1235 err_reset_lock: 1236 rtnl_unlock(); 1237 err_reset: 1238 /* When VF reset failed, only the higher level reset asserted by PF 1239 * can restore it, so re-initialize the command queue to receive 1240 * this higher reset event. 1241 */ 1242 hclgevf_cmd_init(hdev); 1243 dev_err(&hdev->pdev->dev, "failed to reset VF\n"); 1244 1245 return ret; 1246 } 1247 1248 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1249 unsigned long *addr) 1250 { 1251 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1252 1253 /* return the highest priority reset level amongst all */ 1254 if (test_bit(HNAE3_VF_RESET, addr)) { 1255 rst_level = HNAE3_VF_RESET; 1256 clear_bit(HNAE3_VF_RESET, addr); 1257 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1258 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1259 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1260 rst_level = HNAE3_VF_FULL_RESET; 1261 clear_bit(HNAE3_VF_FULL_RESET, addr); 1262 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1263 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1264 rst_level = HNAE3_VF_PF_FUNC_RESET; 1265 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1266 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1267 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1268 rst_level = HNAE3_VF_FUNC_RESET; 1269 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1270 } 1271 1272 return rst_level; 1273 } 1274 1275 static void hclgevf_reset_event(struct pci_dev *pdev, 1276 struct hnae3_handle *handle) 1277 { 1278 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1279 1280 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1281 1282 if (!hdev->default_reset_request) 1283 hdev->reset_level = 1284 hclgevf_get_reset_level(hdev, 1285 &hdev->default_reset_request); 1286 else 1287 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1288 1289 /* reset of this VF requested */ 1290 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1291 hclgevf_reset_task_schedule(hdev); 1292 1293 hdev->last_reset_time = jiffies; 1294 } 1295 1296 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1297 enum hnae3_reset_type rst_type) 1298 { 1299 struct hclgevf_dev *hdev = ae_dev->priv; 1300 1301 set_bit(rst_type, &hdev->default_reset_request); 1302 } 1303 1304 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1305 { 1306 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1307 1308 return hdev->fw_version; 1309 } 1310 1311 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1312 { 1313 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1314 1315 vector->vector_irq = pci_irq_vector(hdev->pdev, 1316 HCLGEVF_MISC_VECTOR_NUM); 1317 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1318 /* vector status always valid for Vector 0 */ 1319 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1320 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1321 1322 hdev->num_msi_left -= 1; 1323 hdev->num_msi_used += 1; 1324 } 1325 1326 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1327 { 1328 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1329 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1330 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1331 schedule_work(&hdev->rst_service_task); 1332 } 1333 } 1334 1335 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1336 { 1337 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1338 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1339 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1340 schedule_work(&hdev->mbx_service_task); 1341 } 1342 } 1343 1344 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1345 { 1346 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1347 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1348 schedule_work(&hdev->service_task); 1349 } 1350 1351 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1352 { 1353 /* if we have any pending mailbox event then schedule the mbx task */ 1354 if (hdev->mbx_event_pending) 1355 hclgevf_mbx_task_schedule(hdev); 1356 1357 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1358 hclgevf_reset_task_schedule(hdev); 1359 } 1360 1361 static void hclgevf_service_timer(struct timer_list *t) 1362 { 1363 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1364 1365 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1366 1367 hclgevf_task_schedule(hdev); 1368 } 1369 1370 static void hclgevf_reset_service_task(struct work_struct *work) 1371 { 1372 struct hclgevf_dev *hdev = 1373 container_of(work, struct hclgevf_dev, rst_service_task); 1374 int ret; 1375 1376 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1377 return; 1378 1379 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1380 1381 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1382 &hdev->reset_state)) { 1383 /* PF has initmated that it is about to reset the hardware. 1384 * We now have to poll & check if harware has actually completed 1385 * the reset sequence. On hardware reset completion, VF needs to 1386 * reset the client and ae device. 1387 */ 1388 hdev->reset_attempts = 0; 1389 1390 hdev->last_reset_time = jiffies; 1391 while ((hdev->reset_type = 1392 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 1393 != HNAE3_NONE_RESET) { 1394 ret = hclgevf_reset(hdev); 1395 if (ret) 1396 dev_err(&hdev->pdev->dev, 1397 "VF stack reset failed %d.\n", ret); 1398 } 1399 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1400 &hdev->reset_state)) { 1401 /* we could be here when either of below happens: 1402 * 1. reset was initiated due to watchdog timeout due to 1403 * a. IMP was earlier reset and our TX got choked down and 1404 * which resulted in watchdog reacting and inducing VF 1405 * reset. This also means our cmdq would be unreliable. 1406 * b. problem in TX due to other lower layer(example link 1407 * layer not functioning properly etc.) 1408 * 2. VF reset might have been initiated due to some config 1409 * change. 1410 * 1411 * NOTE: Theres no clear way to detect above cases than to react 1412 * to the response of PF for this reset request. PF will ack the 1413 * 1b and 2. cases but we will not get any intimation about 1a 1414 * from PF as cmdq would be in unreliable state i.e. mailbox 1415 * communication between PF and VF would be broken. 1416 */ 1417 1418 /* if we are never geting into pending state it means either: 1419 * 1. PF is not receiving our request which could be due to IMP 1420 * reset 1421 * 2. PF is screwed 1422 * We cannot do much for 2. but to check first we can try reset 1423 * our PCIe + stack and see if it alleviates the problem. 1424 */ 1425 if (hdev->reset_attempts > 3) { 1426 /* prepare for full reset of stack + pcie interface */ 1427 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1428 1429 /* "defer" schedule the reset task again */ 1430 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1431 } else { 1432 hdev->reset_attempts++; 1433 1434 set_bit(hdev->reset_level, &hdev->reset_pending); 1435 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1436 } 1437 hclgevf_reset_task_schedule(hdev); 1438 } 1439 1440 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1441 } 1442 1443 static void hclgevf_mailbox_service_task(struct work_struct *work) 1444 { 1445 struct hclgevf_dev *hdev; 1446 1447 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1448 1449 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1450 return; 1451 1452 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1453 1454 hclgevf_mbx_async_handler(hdev); 1455 1456 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1457 } 1458 1459 static void hclgevf_service_task(struct work_struct *work) 1460 { 1461 struct hclgevf_dev *hdev; 1462 1463 hdev = container_of(work, struct hclgevf_dev, service_task); 1464 1465 /* request the link status from the PF. PF would be able to tell VF 1466 * about such updates in future so we might remove this later 1467 */ 1468 hclgevf_request_link_info(hdev); 1469 1470 hclgevf_deferred_task_schedule(hdev); 1471 1472 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1473 } 1474 1475 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1476 { 1477 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1478 } 1479 1480 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1481 u32 *clearval) 1482 { 1483 u32 cmdq_src_reg, rst_ing_reg; 1484 1485 /* fetch the events from their corresponding regs */ 1486 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1487 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1488 1489 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) { 1490 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1491 dev_info(&hdev->pdev->dev, 1492 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1493 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1494 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1495 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1496 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); 1497 *clearval = cmdq_src_reg; 1498 return HCLGEVF_VECTOR0_EVENT_RST; 1499 } 1500 1501 /* check for vector0 mailbox(=CMDQ RX) event source */ 1502 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1503 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1504 *clearval = cmdq_src_reg; 1505 return HCLGEVF_VECTOR0_EVENT_MBX; 1506 } 1507 1508 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1509 1510 return HCLGEVF_VECTOR0_EVENT_OTHER; 1511 } 1512 1513 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1514 { 1515 writel(en ? 1 : 0, vector->addr); 1516 } 1517 1518 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1519 { 1520 enum hclgevf_evt_cause event_cause; 1521 struct hclgevf_dev *hdev = data; 1522 u32 clearval; 1523 1524 hclgevf_enable_vector(&hdev->misc_vector, false); 1525 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 1526 1527 switch (event_cause) { 1528 case HCLGEVF_VECTOR0_EVENT_RST: 1529 hclgevf_reset_task_schedule(hdev); 1530 break; 1531 case HCLGEVF_VECTOR0_EVENT_MBX: 1532 hclgevf_mbx_handler(hdev); 1533 break; 1534 default: 1535 break; 1536 } 1537 1538 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 1539 hclgevf_clear_event_cause(hdev, clearval); 1540 hclgevf_enable_vector(&hdev->misc_vector, true); 1541 } 1542 1543 return IRQ_HANDLED; 1544 } 1545 1546 static int hclgevf_configure(struct hclgevf_dev *hdev) 1547 { 1548 int ret; 1549 1550 hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE; 1551 1552 /* get queue configuration from PF */ 1553 ret = hclgevf_get_queue_info(hdev); 1554 if (ret) 1555 return ret; 1556 /* get tc configuration from PF */ 1557 return hclgevf_get_tc_info(hdev); 1558 } 1559 1560 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1561 { 1562 struct pci_dev *pdev = ae_dev->pdev; 1563 struct hclgevf_dev *hdev = ae_dev->priv; 1564 1565 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1566 if (!hdev) 1567 return -ENOMEM; 1568 1569 hdev->pdev = pdev; 1570 hdev->ae_dev = ae_dev; 1571 ae_dev->priv = hdev; 1572 1573 return 0; 1574 } 1575 1576 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1577 { 1578 struct hnae3_handle *roce = &hdev->roce; 1579 struct hnae3_handle *nic = &hdev->nic; 1580 1581 roce->rinfo.num_vectors = hdev->num_roce_msix; 1582 1583 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1584 hdev->num_msi_left == 0) 1585 return -EINVAL; 1586 1587 roce->rinfo.base_vector = hdev->roce_base_vector; 1588 1589 roce->rinfo.netdev = nic->kinfo.netdev; 1590 roce->rinfo.roce_io_base = hdev->hw.io_base; 1591 1592 roce->pdev = nic->pdev; 1593 roce->ae_algo = nic->ae_algo; 1594 roce->numa_node_mask = nic->numa_node_mask; 1595 1596 return 0; 1597 } 1598 1599 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1600 { 1601 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1602 int i, ret; 1603 1604 rss_cfg->rss_size = hdev->rss_size_max; 1605 1606 if (hdev->pdev->revision >= 0x21) { 1607 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 1608 netdev_rss_key_fill(rss_cfg->rss_hash_key, 1609 HCLGEVF_RSS_KEY_SIZE); 1610 1611 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 1612 rss_cfg->rss_hash_key); 1613 if (ret) 1614 return ret; 1615 1616 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 1617 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1618 rss_cfg->rss_tuple_sets.ipv4_udp_en = 1619 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1620 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 1621 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1622 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 1623 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1624 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 1625 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1626 rss_cfg->rss_tuple_sets.ipv6_udp_en = 1627 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1628 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 1629 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1630 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 1631 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1632 1633 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 1634 if (ret) 1635 return ret; 1636 1637 } 1638 1639 /* Initialize RSS indirect table for each vport */ 1640 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1641 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1642 1643 ret = hclgevf_set_rss_indir_table(hdev); 1644 if (ret) 1645 return ret; 1646 1647 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1648 } 1649 1650 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1651 { 1652 /* other vlan config(like, VLAN TX/RX offload) would also be added 1653 * here later 1654 */ 1655 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1656 false); 1657 } 1658 1659 static int hclgevf_ae_start(struct hnae3_handle *handle) 1660 { 1661 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1662 1663 /* reset tqp stats */ 1664 hclgevf_reset_tqp_stats(handle); 1665 1666 hclgevf_request_link_info(hdev); 1667 1668 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1669 mod_timer(&hdev->service_timer, jiffies + HZ); 1670 1671 return 0; 1672 } 1673 1674 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1675 { 1676 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1677 1678 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1679 1680 /* reset tqp stats */ 1681 hclgevf_reset_tqp_stats(handle); 1682 del_timer_sync(&hdev->service_timer); 1683 cancel_work_sync(&hdev->service_task); 1684 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1685 hclgevf_update_link_status(hdev, 0); 1686 } 1687 1688 static void hclgevf_state_init(struct hclgevf_dev *hdev) 1689 { 1690 /* setup tasks for the MBX */ 1691 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1692 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1693 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1694 1695 /* setup tasks for service timer */ 1696 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1697 1698 INIT_WORK(&hdev->service_task, hclgevf_service_task); 1699 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1700 1701 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 1702 1703 mutex_init(&hdev->mbx_resp.mbx_mutex); 1704 1705 /* bring the device down */ 1706 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1707 } 1708 1709 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1710 { 1711 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1712 1713 if (hdev->service_timer.function) 1714 del_timer_sync(&hdev->service_timer); 1715 if (hdev->service_task.func) 1716 cancel_work_sync(&hdev->service_task); 1717 if (hdev->mbx_service_task.func) 1718 cancel_work_sync(&hdev->mbx_service_task); 1719 if (hdev->rst_service_task.func) 1720 cancel_work_sync(&hdev->rst_service_task); 1721 1722 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1723 } 1724 1725 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1726 { 1727 struct pci_dev *pdev = hdev->pdev; 1728 int vectors; 1729 int i; 1730 1731 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 1732 vectors = pci_alloc_irq_vectors(pdev, 1733 hdev->roce_base_msix_offset + 1, 1734 hdev->num_msi, 1735 PCI_IRQ_MSIX); 1736 else 1737 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1738 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1739 1740 if (vectors < 0) { 1741 dev_err(&pdev->dev, 1742 "failed(%d) to allocate MSI/MSI-X vectors\n", 1743 vectors); 1744 return vectors; 1745 } 1746 if (vectors < hdev->num_msi) 1747 dev_warn(&hdev->pdev->dev, 1748 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1749 hdev->num_msi, vectors); 1750 1751 hdev->num_msi = vectors; 1752 hdev->num_msi_left = vectors; 1753 hdev->base_msi_vector = pdev->irq; 1754 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 1755 1756 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1757 sizeof(u16), GFP_KERNEL); 1758 if (!hdev->vector_status) { 1759 pci_free_irq_vectors(pdev); 1760 return -ENOMEM; 1761 } 1762 1763 for (i = 0; i < hdev->num_msi; i++) 1764 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 1765 1766 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 1767 sizeof(int), GFP_KERNEL); 1768 if (!hdev->vector_irq) { 1769 pci_free_irq_vectors(pdev); 1770 return -ENOMEM; 1771 } 1772 1773 return 0; 1774 } 1775 1776 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 1777 { 1778 struct pci_dev *pdev = hdev->pdev; 1779 1780 pci_free_irq_vectors(pdev); 1781 } 1782 1783 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 1784 { 1785 int ret = 0; 1786 1787 hclgevf_get_misc_vector(hdev); 1788 1789 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 1790 0, "hclgevf_cmd", hdev); 1791 if (ret) { 1792 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 1793 hdev->misc_vector.vector_irq); 1794 return ret; 1795 } 1796 1797 hclgevf_clear_event_cause(hdev, 0); 1798 1799 /* enable misc. vector(vector 0) */ 1800 hclgevf_enable_vector(&hdev->misc_vector, true); 1801 1802 return ret; 1803 } 1804 1805 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 1806 { 1807 /* disable misc vector(vector 0) */ 1808 hclgevf_enable_vector(&hdev->misc_vector, false); 1809 synchronize_irq(hdev->misc_vector.vector_irq); 1810 free_irq(hdev->misc_vector.vector_irq, hdev); 1811 hclgevf_free_vector(hdev, 0); 1812 } 1813 1814 static int hclgevf_init_client_instance(struct hnae3_client *client, 1815 struct hnae3_ae_dev *ae_dev) 1816 { 1817 struct hclgevf_dev *hdev = ae_dev->priv; 1818 int ret; 1819 1820 switch (client->type) { 1821 case HNAE3_CLIENT_KNIC: 1822 hdev->nic_client = client; 1823 hdev->nic.client = client; 1824 1825 ret = client->ops->init_instance(&hdev->nic); 1826 if (ret) 1827 goto clear_nic; 1828 1829 hnae3_set_client_init_flag(client, ae_dev, 1); 1830 1831 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 1832 struct hnae3_client *rc = hdev->roce_client; 1833 1834 ret = hclgevf_init_roce_base_info(hdev); 1835 if (ret) 1836 goto clear_roce; 1837 ret = rc->ops->init_instance(&hdev->roce); 1838 if (ret) 1839 goto clear_roce; 1840 1841 hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 1842 1); 1843 } 1844 break; 1845 case HNAE3_CLIENT_UNIC: 1846 hdev->nic_client = client; 1847 hdev->nic.client = client; 1848 1849 ret = client->ops->init_instance(&hdev->nic); 1850 if (ret) 1851 goto clear_nic; 1852 1853 hnae3_set_client_init_flag(client, ae_dev, 1); 1854 break; 1855 case HNAE3_CLIENT_ROCE: 1856 if (hnae3_dev_roce_supported(hdev)) { 1857 hdev->roce_client = client; 1858 hdev->roce.client = client; 1859 } 1860 1861 if (hdev->roce_client && hdev->nic_client) { 1862 ret = hclgevf_init_roce_base_info(hdev); 1863 if (ret) 1864 goto clear_roce; 1865 1866 ret = client->ops->init_instance(&hdev->roce); 1867 if (ret) 1868 goto clear_roce; 1869 } 1870 1871 hnae3_set_client_init_flag(client, ae_dev, 1); 1872 break; 1873 default: 1874 return -EINVAL; 1875 } 1876 1877 return 0; 1878 1879 clear_nic: 1880 hdev->nic_client = NULL; 1881 hdev->nic.client = NULL; 1882 return ret; 1883 clear_roce: 1884 hdev->roce_client = NULL; 1885 hdev->roce.client = NULL; 1886 return ret; 1887 } 1888 1889 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 1890 struct hnae3_ae_dev *ae_dev) 1891 { 1892 struct hclgevf_dev *hdev = ae_dev->priv; 1893 1894 /* un-init roce, if it exists */ 1895 if (hdev->roce_client) { 1896 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 1897 hdev->roce_client = NULL; 1898 hdev->roce.client = NULL; 1899 } 1900 1901 /* un-init nic/unic, if this was not called by roce client */ 1902 if (client->ops->uninit_instance && hdev->nic_client && 1903 client->type != HNAE3_CLIENT_ROCE) { 1904 client->ops->uninit_instance(&hdev->nic, 0); 1905 hdev->nic_client = NULL; 1906 hdev->nic.client = NULL; 1907 } 1908 } 1909 1910 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 1911 { 1912 struct pci_dev *pdev = hdev->pdev; 1913 struct hclgevf_hw *hw; 1914 int ret; 1915 1916 ret = pci_enable_device(pdev); 1917 if (ret) { 1918 dev_err(&pdev->dev, "failed to enable PCI device\n"); 1919 return ret; 1920 } 1921 1922 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1923 if (ret) { 1924 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 1925 goto err_disable_device; 1926 } 1927 1928 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 1929 if (ret) { 1930 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 1931 goto err_disable_device; 1932 } 1933 1934 pci_set_master(pdev); 1935 hw = &hdev->hw; 1936 hw->hdev = hdev; 1937 hw->io_base = pci_iomap(pdev, 2, 0); 1938 if (!hw->io_base) { 1939 dev_err(&pdev->dev, "can't map configuration register space\n"); 1940 ret = -ENOMEM; 1941 goto err_clr_master; 1942 } 1943 1944 return 0; 1945 1946 err_clr_master: 1947 pci_clear_master(pdev); 1948 pci_release_regions(pdev); 1949 err_disable_device: 1950 pci_disable_device(pdev); 1951 1952 return ret; 1953 } 1954 1955 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 1956 { 1957 struct pci_dev *pdev = hdev->pdev; 1958 1959 pci_iounmap(pdev, hdev->hw.io_base); 1960 pci_clear_master(pdev); 1961 pci_release_regions(pdev); 1962 pci_disable_device(pdev); 1963 } 1964 1965 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 1966 { 1967 struct hclgevf_query_res_cmd *req; 1968 struct hclgevf_desc desc; 1969 int ret; 1970 1971 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 1972 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1973 if (ret) { 1974 dev_err(&hdev->pdev->dev, 1975 "query vf resource failed, ret = %d.\n", ret); 1976 return ret; 1977 } 1978 1979 req = (struct hclgevf_query_res_cmd *)desc.data; 1980 1981 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 1982 hdev->roce_base_msix_offset = 1983 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 1984 HCLGEVF_MSIX_OFT_ROCEE_M, 1985 HCLGEVF_MSIX_OFT_ROCEE_S); 1986 hdev->num_roce_msix = 1987 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 1988 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 1989 1990 /* VF should have NIC vectors and Roce vectors, NIC vectors 1991 * are queued before Roce vectors. The offset is fixed to 64. 1992 */ 1993 hdev->num_msi = hdev->num_roce_msix + 1994 hdev->roce_base_msix_offset; 1995 } else { 1996 hdev->num_msi = 1997 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 1998 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 1999 } 2000 2001 return 0; 2002 } 2003 2004 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2005 { 2006 struct pci_dev *pdev = hdev->pdev; 2007 int ret; 2008 2009 ret = hclgevf_cmd_init(hdev); 2010 if (ret) { 2011 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2012 return ret; 2013 } 2014 2015 ret = hclgevf_rss_init_hw(hdev); 2016 if (ret) { 2017 dev_err(&hdev->pdev->dev, 2018 "failed(%d) to initialize RSS\n", ret); 2019 return ret; 2020 } 2021 2022 ret = hclgevf_init_vlan_config(hdev); 2023 if (ret) { 2024 dev_err(&hdev->pdev->dev, 2025 "failed(%d) to initialize VLAN config\n", ret); 2026 return ret; 2027 } 2028 2029 dev_info(&hdev->pdev->dev, "Reset done\n"); 2030 2031 return 0; 2032 } 2033 2034 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2035 { 2036 struct pci_dev *pdev = hdev->pdev; 2037 int ret; 2038 2039 ret = hclgevf_pci_init(hdev); 2040 if (ret) { 2041 dev_err(&pdev->dev, "PCI initialization failed\n"); 2042 return ret; 2043 } 2044 2045 ret = hclgevf_cmd_queue_init(hdev); 2046 if (ret) { 2047 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); 2048 goto err_cmd_queue_init; 2049 } 2050 2051 ret = hclgevf_cmd_init(hdev); 2052 if (ret) 2053 goto err_cmd_init; 2054 2055 /* Get vf resource */ 2056 ret = hclgevf_query_vf_resource(hdev); 2057 if (ret) { 2058 dev_err(&hdev->pdev->dev, 2059 "Query vf status error, ret = %d.\n", ret); 2060 goto err_cmd_init; 2061 } 2062 2063 ret = hclgevf_init_msi(hdev); 2064 if (ret) { 2065 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2066 goto err_cmd_init; 2067 } 2068 2069 hclgevf_state_init(hdev); 2070 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2071 2072 ret = hclgevf_misc_irq_init(hdev); 2073 if (ret) { 2074 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2075 ret); 2076 goto err_misc_irq_init; 2077 } 2078 2079 ret = hclgevf_configure(hdev); 2080 if (ret) { 2081 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2082 goto err_config; 2083 } 2084 2085 ret = hclgevf_alloc_tqps(hdev); 2086 if (ret) { 2087 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2088 goto err_config; 2089 } 2090 2091 ret = hclgevf_set_handle_info(hdev); 2092 if (ret) { 2093 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2094 goto err_config; 2095 } 2096 2097 /* Initialize RSS for this VF */ 2098 ret = hclgevf_rss_init_hw(hdev); 2099 if (ret) { 2100 dev_err(&hdev->pdev->dev, 2101 "failed(%d) to initialize RSS\n", ret); 2102 goto err_config; 2103 } 2104 2105 ret = hclgevf_init_vlan_config(hdev); 2106 if (ret) { 2107 dev_err(&hdev->pdev->dev, 2108 "failed(%d) to initialize VLAN config\n", ret); 2109 goto err_config; 2110 } 2111 2112 hdev->last_reset_time = jiffies; 2113 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 2114 2115 return 0; 2116 2117 err_config: 2118 hclgevf_misc_irq_uninit(hdev); 2119 err_misc_irq_init: 2120 hclgevf_state_uninit(hdev); 2121 hclgevf_uninit_msi(hdev); 2122 err_cmd_init: 2123 hclgevf_cmd_uninit(hdev); 2124 err_cmd_queue_init: 2125 hclgevf_pci_uninit(hdev); 2126 return ret; 2127 } 2128 2129 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2130 { 2131 hclgevf_state_uninit(hdev); 2132 hclgevf_misc_irq_uninit(hdev); 2133 hclgevf_cmd_uninit(hdev); 2134 hclgevf_uninit_msi(hdev); 2135 hclgevf_pci_uninit(hdev); 2136 } 2137 2138 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2139 { 2140 struct pci_dev *pdev = ae_dev->pdev; 2141 int ret; 2142 2143 ret = hclgevf_alloc_hdev(ae_dev); 2144 if (ret) { 2145 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2146 return ret; 2147 } 2148 2149 ret = hclgevf_init_hdev(ae_dev->priv); 2150 if (ret) 2151 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2152 2153 return ret; 2154 } 2155 2156 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2157 { 2158 struct hclgevf_dev *hdev = ae_dev->priv; 2159 2160 hclgevf_uninit_hdev(hdev); 2161 ae_dev->priv = NULL; 2162 } 2163 2164 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2165 { 2166 struct hnae3_handle *nic = &hdev->nic; 2167 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2168 2169 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 2170 } 2171 2172 /** 2173 * hclgevf_get_channels - Get the current channels enabled and max supported. 2174 * @handle: hardware information for network interface 2175 * @ch: ethtool channels structure 2176 * 2177 * We don't support separate tx and rx queues as channels. The other count 2178 * represents how many queues are being used for control. max_combined counts 2179 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2180 * q_vectors since we support a lot more queue pairs than q_vectors. 2181 **/ 2182 static void hclgevf_get_channels(struct hnae3_handle *handle, 2183 struct ethtool_channels *ch) 2184 { 2185 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2186 2187 ch->max_combined = hclgevf_get_max_channels(hdev); 2188 ch->other_count = 0; 2189 ch->max_other = 0; 2190 ch->combined_count = hdev->num_tqps; 2191 } 2192 2193 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2194 u16 *alloc_tqps, u16 *max_rss_size) 2195 { 2196 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2197 2198 *alloc_tqps = hdev->num_tqps; 2199 *max_rss_size = hdev->rss_size_max; 2200 } 2201 2202 static int hclgevf_get_status(struct hnae3_handle *handle) 2203 { 2204 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2205 2206 return hdev->hw.mac.link; 2207 } 2208 2209 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2210 u8 *auto_neg, u32 *speed, 2211 u8 *duplex) 2212 { 2213 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2214 2215 if (speed) 2216 *speed = hdev->hw.mac.speed; 2217 if (duplex) 2218 *duplex = hdev->hw.mac.duplex; 2219 if (auto_neg) 2220 *auto_neg = AUTONEG_DISABLE; 2221 } 2222 2223 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 2224 u8 duplex) 2225 { 2226 hdev->hw.mac.speed = speed; 2227 hdev->hw.mac.duplex = duplex; 2228 } 2229 2230 static void hclgevf_get_media_type(struct hnae3_handle *handle, 2231 u8 *media_type) 2232 { 2233 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2234 if (media_type) 2235 *media_type = hdev->hw.mac.media_type; 2236 } 2237 2238 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 2239 { 2240 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2241 2242 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2243 } 2244 2245 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 2246 { 2247 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2248 2249 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2250 } 2251 2252 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 2253 { 2254 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2255 2256 return hdev->reset_count; 2257 } 2258 2259 static const struct hnae3_ae_ops hclgevf_ops = { 2260 .init_ae_dev = hclgevf_init_ae_dev, 2261 .uninit_ae_dev = hclgevf_uninit_ae_dev, 2262 .init_client_instance = hclgevf_init_client_instance, 2263 .uninit_client_instance = hclgevf_uninit_client_instance, 2264 .start = hclgevf_ae_start, 2265 .stop = hclgevf_ae_stop, 2266 .map_ring_to_vector = hclgevf_map_ring_to_vector, 2267 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2268 .get_vector = hclgevf_get_vector, 2269 .put_vector = hclgevf_put_vector, 2270 .reset_queue = hclgevf_reset_tqp, 2271 .set_promisc_mode = hclgevf_set_promisc_mode, 2272 .get_mac_addr = hclgevf_get_mac_addr, 2273 .set_mac_addr = hclgevf_set_mac_addr, 2274 .add_uc_addr = hclgevf_add_uc_addr, 2275 .rm_uc_addr = hclgevf_rm_uc_addr, 2276 .add_mc_addr = hclgevf_add_mc_addr, 2277 .rm_mc_addr = hclgevf_rm_mc_addr, 2278 .get_stats = hclgevf_get_stats, 2279 .update_stats = hclgevf_update_stats, 2280 .get_strings = hclgevf_get_strings, 2281 .get_sset_count = hclgevf_get_sset_count, 2282 .get_rss_key_size = hclgevf_get_rss_key_size, 2283 .get_rss_indir_size = hclgevf_get_rss_indir_size, 2284 .get_rss = hclgevf_get_rss, 2285 .set_rss = hclgevf_set_rss, 2286 .get_rss_tuple = hclgevf_get_rss_tuple, 2287 .set_rss_tuple = hclgevf_set_rss_tuple, 2288 .get_tc_size = hclgevf_get_tc_size, 2289 .get_fw_version = hclgevf_get_fw_version, 2290 .set_vlan_filter = hclgevf_set_vlan_filter, 2291 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 2292 .reset_event = hclgevf_reset_event, 2293 .set_default_reset_request = hclgevf_set_def_reset_request, 2294 .get_channels = hclgevf_get_channels, 2295 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2296 .get_status = hclgevf_get_status, 2297 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2298 .get_media_type = hclgevf_get_media_type, 2299 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 2300 .ae_dev_resetting = hclgevf_ae_dev_resetting, 2301 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 2302 }; 2303 2304 static struct hnae3_ae_algo ae_algovf = { 2305 .ops = &hclgevf_ops, 2306 .pdev_id_table = ae_algovf_pci_tbl, 2307 }; 2308 2309 static int hclgevf_init(void) 2310 { 2311 pr_info("%s is initializing\n", HCLGEVF_NAME); 2312 2313 hnae3_register_ae_algo(&ae_algovf); 2314 2315 return 0; 2316 } 2317 2318 static void hclgevf_exit(void) 2319 { 2320 hnae3_unregister_ae_algo(&ae_algovf); 2321 } 2322 module_init(hclgevf_init); 2323 module_exit(hclgevf_exit); 2324 2325 MODULE_LICENSE("GPL"); 2326 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2327 MODULE_DESCRIPTION("HCLGEVF Driver"); 2328 MODULE_VERSION(HCLGEVF_MOD_VERSION); 2329