1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 25 26 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 27 HCLGEVF_CMDQ_TX_ADDR_H_REG, 28 HCLGEVF_CMDQ_TX_DEPTH_REG, 29 HCLGEVF_CMDQ_TX_TAIL_REG, 30 HCLGEVF_CMDQ_TX_HEAD_REG, 31 HCLGEVF_CMDQ_RX_ADDR_L_REG, 32 HCLGEVF_CMDQ_RX_ADDR_H_REG, 33 HCLGEVF_CMDQ_RX_DEPTH_REG, 34 HCLGEVF_CMDQ_RX_TAIL_REG, 35 HCLGEVF_CMDQ_RX_HEAD_REG, 36 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 37 HCLGEVF_CMDQ_INTR_STS_REG, 38 HCLGEVF_CMDQ_INTR_EN_REG, 39 HCLGEVF_CMDQ_INTR_GEN_REG}; 40 41 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 42 HCLGEVF_RST_ING, 43 HCLGEVF_GRO_EN_REG}; 44 45 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 46 HCLGEVF_RING_RX_ADDR_H_REG, 47 HCLGEVF_RING_RX_BD_NUM_REG, 48 HCLGEVF_RING_RX_BD_LENGTH_REG, 49 HCLGEVF_RING_RX_MERGE_EN_REG, 50 HCLGEVF_RING_RX_TAIL_REG, 51 HCLGEVF_RING_RX_HEAD_REG, 52 HCLGEVF_RING_RX_FBD_NUM_REG, 53 HCLGEVF_RING_RX_OFFSET_REG, 54 HCLGEVF_RING_RX_FBD_OFFSET_REG, 55 HCLGEVF_RING_RX_STASH_REG, 56 HCLGEVF_RING_RX_BD_ERR_REG, 57 HCLGEVF_RING_TX_ADDR_L_REG, 58 HCLGEVF_RING_TX_ADDR_H_REG, 59 HCLGEVF_RING_TX_BD_NUM_REG, 60 HCLGEVF_RING_TX_PRIORITY_REG, 61 HCLGEVF_RING_TX_TC_REG, 62 HCLGEVF_RING_TX_MERGE_EN_REG, 63 HCLGEVF_RING_TX_TAIL_REG, 64 HCLGEVF_RING_TX_HEAD_REG, 65 HCLGEVF_RING_TX_FBD_NUM_REG, 66 HCLGEVF_RING_TX_OFFSET_REG, 67 HCLGEVF_RING_TX_EBD_NUM_REG, 68 HCLGEVF_RING_TX_EBD_OFFSET_REG, 69 HCLGEVF_RING_TX_BD_ERR_REG, 70 HCLGEVF_RING_EN_REG}; 71 72 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 73 HCLGEVF_TQP_INTR_GL0_REG, 74 HCLGEVF_TQP_INTR_GL1_REG, 75 HCLGEVF_TQP_INTR_GL2_REG, 76 HCLGEVF_TQP_INTR_RL_REG}; 77 78 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 79 struct hnae3_handle *handle) 80 { 81 return container_of(handle, struct hclgevf_dev, nic); 82 } 83 84 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 85 { 86 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 87 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 88 struct hclgevf_desc desc; 89 struct hclgevf_tqp *tqp; 90 int status; 91 int i; 92 93 for (i = 0; i < kinfo->num_tqps; i++) { 94 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 95 hclgevf_cmd_setup_basic_desc(&desc, 96 HCLGEVF_OPC_QUERY_RX_STATUS, 97 true); 98 99 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 100 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 101 if (status) { 102 dev_err(&hdev->pdev->dev, 103 "Query tqp stat fail, status = %d,queue = %d\n", 104 status, i); 105 return status; 106 } 107 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 108 le32_to_cpu(desc.data[1]); 109 110 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 111 true); 112 113 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 114 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 115 if (status) { 116 dev_err(&hdev->pdev->dev, 117 "Query tqp stat fail, status = %d,queue = %d\n", 118 status, i); 119 return status; 120 } 121 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 122 le32_to_cpu(desc.data[1]); 123 } 124 125 return 0; 126 } 127 128 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 129 { 130 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 131 struct hclgevf_tqp *tqp; 132 u64 *buff = data; 133 int i; 134 135 for (i = 0; i < kinfo->num_tqps; i++) { 136 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 137 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 138 } 139 for (i = 0; i < kinfo->num_tqps; i++) { 140 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 141 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 142 } 143 144 return buff; 145 } 146 147 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 148 { 149 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 150 151 return kinfo->num_tqps * 2; 152 } 153 154 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 155 { 156 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 157 u8 *buff = data; 158 int i = 0; 159 160 for (i = 0; i < kinfo->num_tqps; i++) { 161 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 162 struct hclgevf_tqp, q); 163 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 164 tqp->index); 165 buff += ETH_GSTRING_LEN; 166 } 167 168 for (i = 0; i < kinfo->num_tqps; i++) { 169 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 170 struct hclgevf_tqp, q); 171 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 172 tqp->index); 173 buff += ETH_GSTRING_LEN; 174 } 175 176 return buff; 177 } 178 179 static void hclgevf_update_stats(struct hnae3_handle *handle, 180 struct net_device_stats *net_stats) 181 { 182 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 183 int status; 184 185 status = hclgevf_tqps_update_stats(handle); 186 if (status) 187 dev_err(&hdev->pdev->dev, 188 "VF update of TQPS stats fail, status = %d.\n", 189 status); 190 } 191 192 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 193 { 194 if (strset == ETH_SS_TEST) 195 return -EOPNOTSUPP; 196 else if (strset == ETH_SS_STATS) 197 return hclgevf_tqps_get_sset_count(handle, strset); 198 199 return 0; 200 } 201 202 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 203 u8 *data) 204 { 205 u8 *p = (char *)data; 206 207 if (strset == ETH_SS_STATS) 208 p = hclgevf_tqps_get_strings(handle, p); 209 } 210 211 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 212 { 213 hclgevf_tqps_get_stats(handle, data); 214 } 215 216 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 217 { 218 u8 resp_msg; 219 int status; 220 221 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 222 true, &resp_msg, sizeof(u8)); 223 if (status) { 224 dev_err(&hdev->pdev->dev, 225 "VF request to get TC info from PF failed %d", 226 status); 227 return status; 228 } 229 230 hdev->hw_tc_map = resp_msg; 231 232 return 0; 233 } 234 235 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 236 { 237 #define HCLGEVF_TQPS_RSS_INFO_LEN 8 238 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 239 int status; 240 241 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 242 true, resp_msg, 243 HCLGEVF_TQPS_RSS_INFO_LEN); 244 if (status) { 245 dev_err(&hdev->pdev->dev, 246 "VF request to get tqp info from PF failed %d", 247 status); 248 return status; 249 } 250 251 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 252 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 253 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 254 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 255 256 return 0; 257 } 258 259 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 260 { 261 struct hclgevf_tqp *tqp; 262 int i; 263 264 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 265 sizeof(struct hclgevf_tqp), GFP_KERNEL); 266 if (!hdev->htqp) 267 return -ENOMEM; 268 269 tqp = hdev->htqp; 270 271 for (i = 0; i < hdev->num_tqps; i++) { 272 tqp->dev = &hdev->pdev->dev; 273 tqp->index = i; 274 275 tqp->q.ae_algo = &ae_algovf; 276 tqp->q.buf_size = hdev->rx_buf_len; 277 tqp->q.desc_num = hdev->num_desc; 278 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 279 i * HCLGEVF_TQP_REG_SIZE; 280 281 tqp++; 282 } 283 284 return 0; 285 } 286 287 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 288 { 289 struct hnae3_handle *nic = &hdev->nic; 290 struct hnae3_knic_private_info *kinfo; 291 u16 new_tqps = hdev->num_tqps; 292 int i; 293 294 kinfo = &nic->kinfo; 295 kinfo->num_tc = 0; 296 kinfo->num_desc = hdev->num_desc; 297 kinfo->rx_buf_len = hdev->rx_buf_len; 298 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 299 if (hdev->hw_tc_map & BIT(i)) 300 kinfo->num_tc++; 301 302 kinfo->rss_size 303 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 304 new_tqps = kinfo->rss_size * kinfo->num_tc; 305 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 306 307 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 308 sizeof(struct hnae3_queue *), GFP_KERNEL); 309 if (!kinfo->tqp) 310 return -ENOMEM; 311 312 for (i = 0; i < kinfo->num_tqps; i++) { 313 hdev->htqp[i].q.handle = &hdev->nic; 314 hdev->htqp[i].q.tqp_index = i; 315 kinfo->tqp[i] = &hdev->htqp[i].q; 316 } 317 318 return 0; 319 } 320 321 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 322 { 323 int status; 324 u8 resp_msg; 325 326 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 327 0, false, &resp_msg, sizeof(u8)); 328 if (status) 329 dev_err(&hdev->pdev->dev, 330 "VF failed to fetch link status(%d) from PF", status); 331 } 332 333 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 334 { 335 struct hnae3_handle *handle = &hdev->nic; 336 struct hnae3_client *client; 337 338 client = handle->client; 339 340 link_state = 341 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 342 343 if (link_state != hdev->hw.mac.link) { 344 client->ops->link_status_change(handle, !!link_state); 345 hdev->hw.mac.link = link_state; 346 } 347 } 348 349 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 350 { 351 struct hnae3_handle *nic = &hdev->nic; 352 int ret; 353 354 nic->ae_algo = &ae_algovf; 355 nic->pdev = hdev->pdev; 356 nic->numa_node_mask = hdev->numa_node_mask; 357 nic->flags |= HNAE3_SUPPORT_VF; 358 359 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 360 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 361 hdev->ae_dev->dev_type); 362 return -EINVAL; 363 } 364 365 ret = hclgevf_knic_setup(hdev); 366 if (ret) 367 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 368 ret); 369 return ret; 370 } 371 372 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 373 { 374 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 375 dev_warn(&hdev->pdev->dev, 376 "vector(vector_id %d) has been freed.\n", vector_id); 377 return; 378 } 379 380 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 381 hdev->num_msi_left += 1; 382 hdev->num_msi_used -= 1; 383 } 384 385 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 386 struct hnae3_vector_info *vector_info) 387 { 388 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 389 struct hnae3_vector_info *vector = vector_info; 390 int alloc = 0; 391 int i, j; 392 393 vector_num = min(hdev->num_msi_left, vector_num); 394 395 for (j = 0; j < vector_num; j++) { 396 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 397 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 398 vector->vector = pci_irq_vector(hdev->pdev, i); 399 vector->io_addr = hdev->hw.io_base + 400 HCLGEVF_VECTOR_REG_BASE + 401 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 402 hdev->vector_status[i] = 0; 403 hdev->vector_irq[i] = vector->vector; 404 405 vector++; 406 alloc++; 407 408 break; 409 } 410 } 411 } 412 hdev->num_msi_left -= alloc; 413 hdev->num_msi_used += alloc; 414 415 return alloc; 416 } 417 418 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 419 { 420 int i; 421 422 for (i = 0; i < hdev->num_msi; i++) 423 if (vector == hdev->vector_irq[i]) 424 return i; 425 426 return -EINVAL; 427 } 428 429 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 430 const u8 hfunc, const u8 *key) 431 { 432 struct hclgevf_rss_config_cmd *req; 433 struct hclgevf_desc desc; 434 int key_offset; 435 int key_size; 436 int ret; 437 438 req = (struct hclgevf_rss_config_cmd *)desc.data; 439 440 for (key_offset = 0; key_offset < 3; key_offset++) { 441 hclgevf_cmd_setup_basic_desc(&desc, 442 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 443 false); 444 445 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 446 req->hash_config |= 447 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 448 449 if (key_offset == 2) 450 key_size = 451 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 452 else 453 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 454 455 memcpy(req->hash_key, 456 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 457 458 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 459 if (ret) { 460 dev_err(&hdev->pdev->dev, 461 "Configure RSS config fail, status = %d\n", 462 ret); 463 return ret; 464 } 465 } 466 467 return 0; 468 } 469 470 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 471 { 472 return HCLGEVF_RSS_KEY_SIZE; 473 } 474 475 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 476 { 477 return HCLGEVF_RSS_IND_TBL_SIZE; 478 } 479 480 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 481 { 482 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 483 struct hclgevf_rss_indirection_table_cmd *req; 484 struct hclgevf_desc desc; 485 int status; 486 int i, j; 487 488 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 489 490 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 491 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 492 false); 493 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 494 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 495 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 496 req->rss_result[j] = 497 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 498 499 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 500 if (status) { 501 dev_err(&hdev->pdev->dev, 502 "VF failed(=%d) to set RSS indirection table\n", 503 status); 504 return status; 505 } 506 } 507 508 return 0; 509 } 510 511 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 512 { 513 struct hclgevf_rss_tc_mode_cmd *req; 514 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 515 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 516 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 517 struct hclgevf_desc desc; 518 u16 roundup_size; 519 int status; 520 int i; 521 522 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 523 524 roundup_size = roundup_pow_of_two(rss_size); 525 roundup_size = ilog2(roundup_size); 526 527 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 528 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 529 tc_size[i] = roundup_size; 530 tc_offset[i] = rss_size * i; 531 } 532 533 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 534 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 535 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 536 (tc_valid[i] & 0x1)); 537 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 538 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 539 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 540 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 541 } 542 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 543 if (status) 544 dev_err(&hdev->pdev->dev, 545 "VF failed(=%d) to set rss tc mode\n", status); 546 547 return status; 548 } 549 550 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 551 u8 *hfunc) 552 { 553 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 554 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 555 int i; 556 557 if (handle->pdev->revision >= 0x21) { 558 /* Get hash algorithm */ 559 if (hfunc) { 560 switch (rss_cfg->hash_algo) { 561 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 562 *hfunc = ETH_RSS_HASH_TOP; 563 break; 564 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 565 *hfunc = ETH_RSS_HASH_XOR; 566 break; 567 default: 568 *hfunc = ETH_RSS_HASH_UNKNOWN; 569 break; 570 } 571 } 572 573 /* Get the RSS Key required by the user */ 574 if (key) 575 memcpy(key, rss_cfg->rss_hash_key, 576 HCLGEVF_RSS_KEY_SIZE); 577 } 578 579 if (indir) 580 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 581 indir[i] = rss_cfg->rss_indirection_tbl[i]; 582 583 return 0; 584 } 585 586 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 587 const u8 *key, const u8 hfunc) 588 { 589 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 590 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 591 int ret, i; 592 593 if (handle->pdev->revision >= 0x21) { 594 /* Set the RSS Hash Key if specififed by the user */ 595 if (key) { 596 switch (hfunc) { 597 case ETH_RSS_HASH_TOP: 598 rss_cfg->hash_algo = 599 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 600 break; 601 case ETH_RSS_HASH_XOR: 602 rss_cfg->hash_algo = 603 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 604 break; 605 case ETH_RSS_HASH_NO_CHANGE: 606 break; 607 default: 608 return -EINVAL; 609 } 610 611 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 612 key); 613 if (ret) 614 return ret; 615 616 /* Update the shadow RSS key with user specified qids */ 617 memcpy(rss_cfg->rss_hash_key, key, 618 HCLGEVF_RSS_KEY_SIZE); 619 } 620 } 621 622 /* update the shadow RSS table with user specified qids */ 623 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 624 rss_cfg->rss_indirection_tbl[i] = indir[i]; 625 626 /* update the hardware */ 627 return hclgevf_set_rss_indir_table(hdev); 628 } 629 630 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 631 { 632 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 633 634 if (nfc->data & RXH_L4_B_2_3) 635 hash_sets |= HCLGEVF_D_PORT_BIT; 636 else 637 hash_sets &= ~HCLGEVF_D_PORT_BIT; 638 639 if (nfc->data & RXH_IP_SRC) 640 hash_sets |= HCLGEVF_S_IP_BIT; 641 else 642 hash_sets &= ~HCLGEVF_S_IP_BIT; 643 644 if (nfc->data & RXH_IP_DST) 645 hash_sets |= HCLGEVF_D_IP_BIT; 646 else 647 hash_sets &= ~HCLGEVF_D_IP_BIT; 648 649 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 650 hash_sets |= HCLGEVF_V_TAG_BIT; 651 652 return hash_sets; 653 } 654 655 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 656 struct ethtool_rxnfc *nfc) 657 { 658 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 659 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 660 struct hclgevf_rss_input_tuple_cmd *req; 661 struct hclgevf_desc desc; 662 u8 tuple_sets; 663 int ret; 664 665 if (handle->pdev->revision == 0x20) 666 return -EOPNOTSUPP; 667 668 if (nfc->data & 669 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 670 return -EINVAL; 671 672 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 673 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 674 675 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 676 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 677 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 678 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 679 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 680 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 681 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 682 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 683 684 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 685 switch (nfc->flow_type) { 686 case TCP_V4_FLOW: 687 req->ipv4_tcp_en = tuple_sets; 688 break; 689 case TCP_V6_FLOW: 690 req->ipv6_tcp_en = tuple_sets; 691 break; 692 case UDP_V4_FLOW: 693 req->ipv4_udp_en = tuple_sets; 694 break; 695 case UDP_V6_FLOW: 696 req->ipv6_udp_en = tuple_sets; 697 break; 698 case SCTP_V4_FLOW: 699 req->ipv4_sctp_en = tuple_sets; 700 break; 701 case SCTP_V6_FLOW: 702 if ((nfc->data & RXH_L4_B_0_1) || 703 (nfc->data & RXH_L4_B_2_3)) 704 return -EINVAL; 705 706 req->ipv6_sctp_en = tuple_sets; 707 break; 708 case IPV4_FLOW: 709 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 710 break; 711 case IPV6_FLOW: 712 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 713 break; 714 default: 715 return -EINVAL; 716 } 717 718 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 719 if (ret) { 720 dev_err(&hdev->pdev->dev, 721 "Set rss tuple fail, status = %d\n", ret); 722 return ret; 723 } 724 725 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 726 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 727 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 728 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 729 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 730 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 731 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 732 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 733 return 0; 734 } 735 736 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 737 struct ethtool_rxnfc *nfc) 738 { 739 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 740 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 741 u8 tuple_sets; 742 743 if (handle->pdev->revision == 0x20) 744 return -EOPNOTSUPP; 745 746 nfc->data = 0; 747 748 switch (nfc->flow_type) { 749 case TCP_V4_FLOW: 750 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 751 break; 752 case UDP_V4_FLOW: 753 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 754 break; 755 case TCP_V6_FLOW: 756 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 757 break; 758 case UDP_V6_FLOW: 759 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 760 break; 761 case SCTP_V4_FLOW: 762 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 763 break; 764 case SCTP_V6_FLOW: 765 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 766 break; 767 case IPV4_FLOW: 768 case IPV6_FLOW: 769 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 770 break; 771 default: 772 return -EINVAL; 773 } 774 775 if (!tuple_sets) 776 return 0; 777 778 if (tuple_sets & HCLGEVF_D_PORT_BIT) 779 nfc->data |= RXH_L4_B_2_3; 780 if (tuple_sets & HCLGEVF_S_PORT_BIT) 781 nfc->data |= RXH_L4_B_0_1; 782 if (tuple_sets & HCLGEVF_D_IP_BIT) 783 nfc->data |= RXH_IP_DST; 784 if (tuple_sets & HCLGEVF_S_IP_BIT) 785 nfc->data |= RXH_IP_SRC; 786 787 return 0; 788 } 789 790 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 791 struct hclgevf_rss_cfg *rss_cfg) 792 { 793 struct hclgevf_rss_input_tuple_cmd *req; 794 struct hclgevf_desc desc; 795 int ret; 796 797 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 798 799 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 800 801 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 802 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 803 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 804 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 805 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 806 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 807 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 808 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 809 810 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 811 if (ret) 812 dev_err(&hdev->pdev->dev, 813 "Configure rss input fail, status = %d\n", ret); 814 return ret; 815 } 816 817 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 818 { 819 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 820 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 821 822 return rss_cfg->rss_size; 823 } 824 825 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 826 int vector_id, 827 struct hnae3_ring_chain_node *ring_chain) 828 { 829 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 830 struct hnae3_ring_chain_node *node; 831 struct hclge_mbx_vf_to_pf_cmd *req; 832 struct hclgevf_desc desc; 833 int i = 0; 834 int status; 835 u8 type; 836 837 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 838 839 for (node = ring_chain; node; node = node->next) { 840 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 841 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 842 843 if (i == 0) { 844 hclgevf_cmd_setup_basic_desc(&desc, 845 HCLGEVF_OPC_MBX_VF_TO_PF, 846 false); 847 type = en ? 848 HCLGE_MBX_MAP_RING_TO_VECTOR : 849 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 850 req->msg[0] = type; 851 req->msg[1] = vector_id; 852 } 853 854 req->msg[idx_offset] = 855 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 856 req->msg[idx_offset + 1] = node->tqp_index; 857 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 858 HNAE3_RING_GL_IDX_M, 859 HNAE3_RING_GL_IDX_S); 860 861 i++; 862 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 863 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 864 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 865 !node->next) { 866 req->msg[2] = i; 867 868 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 869 if (status) { 870 dev_err(&hdev->pdev->dev, 871 "Map TQP fail, status is %d.\n", 872 status); 873 return status; 874 } 875 i = 0; 876 hclgevf_cmd_setup_basic_desc(&desc, 877 HCLGEVF_OPC_MBX_VF_TO_PF, 878 false); 879 req->msg[0] = type; 880 req->msg[1] = vector_id; 881 } 882 } 883 884 return 0; 885 } 886 887 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 888 struct hnae3_ring_chain_node *ring_chain) 889 { 890 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 891 int vector_id; 892 893 vector_id = hclgevf_get_vector_index(hdev, vector); 894 if (vector_id < 0) { 895 dev_err(&handle->pdev->dev, 896 "Get vector index fail. ret =%d\n", vector_id); 897 return vector_id; 898 } 899 900 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 901 } 902 903 static int hclgevf_unmap_ring_from_vector( 904 struct hnae3_handle *handle, 905 int vector, 906 struct hnae3_ring_chain_node *ring_chain) 907 { 908 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 909 int ret, vector_id; 910 911 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 912 return 0; 913 914 vector_id = hclgevf_get_vector_index(hdev, vector); 915 if (vector_id < 0) { 916 dev_err(&handle->pdev->dev, 917 "Get vector index fail. ret =%d\n", vector_id); 918 return vector_id; 919 } 920 921 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 922 if (ret) 923 dev_err(&handle->pdev->dev, 924 "Unmap ring from vector fail. vector=%d, ret =%d\n", 925 vector_id, 926 ret); 927 928 return ret; 929 } 930 931 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 932 { 933 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 934 int vector_id; 935 936 vector_id = hclgevf_get_vector_index(hdev, vector); 937 if (vector_id < 0) { 938 dev_err(&handle->pdev->dev, 939 "hclgevf_put_vector get vector index fail. ret =%d\n", 940 vector_id); 941 return vector_id; 942 } 943 944 hclgevf_free_vector(hdev, vector_id); 945 946 return 0; 947 } 948 949 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 950 bool en_uc_pmc, bool en_mc_pmc) 951 { 952 struct hclge_mbx_vf_to_pf_cmd *req; 953 struct hclgevf_desc desc; 954 int status; 955 956 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 957 958 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 959 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 960 req->msg[1] = en_uc_pmc ? 1 : 0; 961 req->msg[2] = en_mc_pmc ? 1 : 0; 962 963 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 964 if (status) 965 dev_err(&hdev->pdev->dev, 966 "Set promisc mode fail, status is %d.\n", status); 967 968 return status; 969 } 970 971 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, 972 bool en_uc_pmc, bool en_mc_pmc) 973 { 974 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 975 976 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); 977 } 978 979 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 980 int stream_id, bool enable) 981 { 982 struct hclgevf_cfg_com_tqp_queue_cmd *req; 983 struct hclgevf_desc desc; 984 int status; 985 986 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 987 988 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 989 false); 990 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 991 req->stream_id = cpu_to_le16(stream_id); 992 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 993 994 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 995 if (status) 996 dev_err(&hdev->pdev->dev, 997 "TQP enable fail, status =%d.\n", status); 998 999 return status; 1000 } 1001 1002 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1003 { 1004 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1005 struct hclgevf_tqp *tqp; 1006 int i; 1007 1008 for (i = 0; i < kinfo->num_tqps; i++) { 1009 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1010 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1011 } 1012 } 1013 1014 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1015 { 1016 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1017 1018 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1019 } 1020 1021 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1022 bool is_first) 1023 { 1024 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1025 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1026 u8 *new_mac_addr = (u8 *)p; 1027 u8 msg_data[ETH_ALEN * 2]; 1028 u16 subcode; 1029 int status; 1030 1031 ether_addr_copy(msg_data, new_mac_addr); 1032 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 1033 1034 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 1035 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1036 1037 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1038 subcode, msg_data, ETH_ALEN * 2, 1039 true, NULL, 0); 1040 if (!status) 1041 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1042 1043 return status; 1044 } 1045 1046 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1047 const unsigned char *addr) 1048 { 1049 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1050 1051 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1052 HCLGE_MBX_MAC_VLAN_UC_ADD, 1053 addr, ETH_ALEN, false, NULL, 0); 1054 } 1055 1056 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1057 const unsigned char *addr) 1058 { 1059 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1060 1061 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1062 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1063 addr, ETH_ALEN, false, NULL, 0); 1064 } 1065 1066 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1067 const unsigned char *addr) 1068 { 1069 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1070 1071 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1072 HCLGE_MBX_MAC_VLAN_MC_ADD, 1073 addr, ETH_ALEN, false, NULL, 0); 1074 } 1075 1076 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1077 const unsigned char *addr) 1078 { 1079 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1080 1081 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1082 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1083 addr, ETH_ALEN, false, NULL, 0); 1084 } 1085 1086 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1087 __be16 proto, u16 vlan_id, 1088 bool is_kill) 1089 { 1090 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1091 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1092 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1093 1094 if (vlan_id > 4095) 1095 return -EINVAL; 1096 1097 if (proto != htons(ETH_P_8021Q)) 1098 return -EPROTONOSUPPORT; 1099 1100 msg_data[0] = is_kill; 1101 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1102 memcpy(&msg_data[3], &proto, sizeof(proto)); 1103 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1104 HCLGE_MBX_VLAN_FILTER, msg_data, 1105 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 1106 } 1107 1108 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1109 { 1110 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1111 u8 msg_data; 1112 1113 msg_data = enable ? 1 : 0; 1114 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1115 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1116 1, false, NULL, 0); 1117 } 1118 1119 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1120 { 1121 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1122 u8 msg_data[2]; 1123 int ret; 1124 1125 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 1126 1127 /* disable vf queue before send queue reset msg to PF */ 1128 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1129 if (ret) 1130 return ret; 1131 1132 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1133 2, true, NULL, 0); 1134 } 1135 1136 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1137 { 1138 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1139 1140 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu, 1141 sizeof(new_mtu), true, NULL, 0); 1142 } 1143 1144 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1145 enum hnae3_reset_notify_type type) 1146 { 1147 struct hnae3_client *client = hdev->nic_client; 1148 struct hnae3_handle *handle = &hdev->nic; 1149 int ret; 1150 1151 if (!client->ops->reset_notify) 1152 return -EOPNOTSUPP; 1153 1154 ret = client->ops->reset_notify(handle, type); 1155 if (ret) 1156 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1157 type, ret); 1158 1159 return ret; 1160 } 1161 1162 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 1163 { 1164 struct hclgevf_dev *hdev = ae_dev->priv; 1165 1166 set_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1167 } 1168 1169 static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev, 1170 unsigned long delay_us, 1171 unsigned long wait_cnt) 1172 { 1173 unsigned long cnt = 0; 1174 1175 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && 1176 cnt++ < wait_cnt) 1177 usleep_range(delay_us, delay_us * 2); 1178 1179 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { 1180 dev_err(&hdev->pdev->dev, 1181 "flr wait timeout\n"); 1182 return -ETIMEDOUT; 1183 } 1184 1185 return 0; 1186 } 1187 1188 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1189 { 1190 #define HCLGEVF_RESET_WAIT_US 20000 1191 #define HCLGEVF_RESET_WAIT_CNT 2000 1192 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1193 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1194 1195 u32 val; 1196 int ret; 1197 1198 /* wait to check the hardware reset completion status */ 1199 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1200 dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val); 1201 1202 if (hdev->reset_type == HNAE3_FLR_RESET) 1203 return hclgevf_flr_poll_timeout(hdev, 1204 HCLGEVF_RESET_WAIT_US, 1205 HCLGEVF_RESET_WAIT_CNT); 1206 1207 ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val, 1208 !(val & HCLGEVF_RST_ING_BITS), 1209 HCLGEVF_RESET_WAIT_US, 1210 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1211 1212 /* hardware completion status should be available by this time */ 1213 if (ret) { 1214 dev_err(&hdev->pdev->dev, 1215 "could'nt get reset done status from h/w, timeout!\n"); 1216 return ret; 1217 } 1218 1219 /* we will wait a bit more to let reset of the stack to complete. This 1220 * might happen in case reset assertion was made by PF. Yes, this also 1221 * means we might end up waiting bit more even for VF reset. 1222 */ 1223 msleep(5000); 1224 1225 return 0; 1226 } 1227 1228 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1229 { 1230 int ret; 1231 1232 /* uninitialize the nic client */ 1233 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1234 if (ret) 1235 return ret; 1236 1237 /* re-initialize the hclge device */ 1238 ret = hclgevf_reset_hdev(hdev); 1239 if (ret) { 1240 dev_err(&hdev->pdev->dev, 1241 "hclge device re-init failed, VF is disabled!\n"); 1242 return ret; 1243 } 1244 1245 /* bring up the nic client again */ 1246 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1247 if (ret) 1248 return ret; 1249 1250 return 0; 1251 } 1252 1253 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1254 { 1255 int ret = 0; 1256 1257 switch (hdev->reset_type) { 1258 case HNAE3_VF_FUNC_RESET: 1259 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1260 0, true, NULL, sizeof(u8)); 1261 break; 1262 case HNAE3_FLR_RESET: 1263 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1264 break; 1265 default: 1266 break; 1267 } 1268 1269 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1270 1271 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1272 hdev->reset_type, ret); 1273 1274 return ret; 1275 } 1276 1277 static int hclgevf_reset(struct hclgevf_dev *hdev) 1278 { 1279 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1280 int ret; 1281 1282 /* Initialize ae_dev reset status as well, in case enet layer wants to 1283 * know if device is undergoing reset 1284 */ 1285 ae_dev->reset_type = hdev->reset_type; 1286 hdev->reset_count++; 1287 rtnl_lock(); 1288 1289 /* bring down the nic to stop any ongoing TX/RX */ 1290 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1291 if (ret) 1292 goto err_reset_lock; 1293 1294 rtnl_unlock(); 1295 1296 ret = hclgevf_reset_prepare_wait(hdev); 1297 if (ret) 1298 goto err_reset; 1299 1300 /* check if VF could successfully fetch the hardware reset completion 1301 * status from the hardware 1302 */ 1303 ret = hclgevf_reset_wait(hdev); 1304 if (ret) { 1305 /* can't do much in this situation, will disable VF */ 1306 dev_err(&hdev->pdev->dev, 1307 "VF failed(=%d) to fetch H/W reset completion status\n", 1308 ret); 1309 goto err_reset; 1310 } 1311 1312 rtnl_lock(); 1313 1314 /* now, re-initialize the nic client and ae device*/ 1315 ret = hclgevf_reset_stack(hdev); 1316 if (ret) { 1317 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1318 goto err_reset_lock; 1319 } 1320 1321 /* bring up the nic to enable TX/RX again */ 1322 ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1323 if (ret) 1324 goto err_reset_lock; 1325 1326 rtnl_unlock(); 1327 1328 return ret; 1329 err_reset_lock: 1330 rtnl_unlock(); 1331 err_reset: 1332 /* When VF reset failed, only the higher level reset asserted by PF 1333 * can restore it, so re-initialize the command queue to receive 1334 * this higher reset event. 1335 */ 1336 hclgevf_cmd_init(hdev); 1337 dev_err(&hdev->pdev->dev, "failed to reset VF\n"); 1338 1339 return ret; 1340 } 1341 1342 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1343 unsigned long *addr) 1344 { 1345 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1346 1347 /* return the highest priority reset level amongst all */ 1348 if (test_bit(HNAE3_VF_RESET, addr)) { 1349 rst_level = HNAE3_VF_RESET; 1350 clear_bit(HNAE3_VF_RESET, addr); 1351 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1352 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1353 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1354 rst_level = HNAE3_VF_FULL_RESET; 1355 clear_bit(HNAE3_VF_FULL_RESET, addr); 1356 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1357 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1358 rst_level = HNAE3_VF_PF_FUNC_RESET; 1359 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1360 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1361 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1362 rst_level = HNAE3_VF_FUNC_RESET; 1363 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1364 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1365 rst_level = HNAE3_FLR_RESET; 1366 clear_bit(HNAE3_FLR_RESET, addr); 1367 } 1368 1369 return rst_level; 1370 } 1371 1372 static void hclgevf_reset_event(struct pci_dev *pdev, 1373 struct hnae3_handle *handle) 1374 { 1375 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1376 struct hclgevf_dev *hdev = ae_dev->priv; 1377 1378 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1379 1380 if (hdev->default_reset_request) 1381 hdev->reset_level = 1382 hclgevf_get_reset_level(hdev, 1383 &hdev->default_reset_request); 1384 else 1385 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1386 1387 /* reset of this VF requested */ 1388 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1389 hclgevf_reset_task_schedule(hdev); 1390 1391 hdev->last_reset_time = jiffies; 1392 } 1393 1394 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1395 enum hnae3_reset_type rst_type) 1396 { 1397 struct hclgevf_dev *hdev = ae_dev->priv; 1398 1399 set_bit(rst_type, &hdev->default_reset_request); 1400 } 1401 1402 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1403 { 1404 #define HCLGEVF_FLR_WAIT_MS 100 1405 #define HCLGEVF_FLR_WAIT_CNT 50 1406 struct hclgevf_dev *hdev = ae_dev->priv; 1407 int cnt = 0; 1408 1409 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1410 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1411 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); 1412 hclgevf_reset_event(hdev->pdev, NULL); 1413 1414 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && 1415 cnt++ < HCLGEVF_FLR_WAIT_CNT) 1416 msleep(HCLGEVF_FLR_WAIT_MS); 1417 1418 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) 1419 dev_err(&hdev->pdev->dev, 1420 "flr wait down timeout: %d\n", cnt); 1421 } 1422 1423 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1424 { 1425 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1426 1427 return hdev->fw_version; 1428 } 1429 1430 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1431 { 1432 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1433 1434 vector->vector_irq = pci_irq_vector(hdev->pdev, 1435 HCLGEVF_MISC_VECTOR_NUM); 1436 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1437 /* vector status always valid for Vector 0 */ 1438 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1439 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1440 1441 hdev->num_msi_left -= 1; 1442 hdev->num_msi_used += 1; 1443 } 1444 1445 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1446 { 1447 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1448 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1449 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1450 schedule_work(&hdev->rst_service_task); 1451 } 1452 } 1453 1454 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1455 { 1456 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1457 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1458 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1459 schedule_work(&hdev->mbx_service_task); 1460 } 1461 } 1462 1463 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1464 { 1465 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1466 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1467 schedule_work(&hdev->service_task); 1468 } 1469 1470 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1471 { 1472 /* if we have any pending mailbox event then schedule the mbx task */ 1473 if (hdev->mbx_event_pending) 1474 hclgevf_mbx_task_schedule(hdev); 1475 1476 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1477 hclgevf_reset_task_schedule(hdev); 1478 } 1479 1480 static void hclgevf_service_timer(struct timer_list *t) 1481 { 1482 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1483 1484 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1485 1486 hclgevf_task_schedule(hdev); 1487 } 1488 1489 static void hclgevf_reset_service_task(struct work_struct *work) 1490 { 1491 struct hclgevf_dev *hdev = 1492 container_of(work, struct hclgevf_dev, rst_service_task); 1493 int ret; 1494 1495 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1496 return; 1497 1498 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1499 1500 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1501 &hdev->reset_state)) { 1502 /* PF has initmated that it is about to reset the hardware. 1503 * We now have to poll & check if harware has actually completed 1504 * the reset sequence. On hardware reset completion, VF needs to 1505 * reset the client and ae device. 1506 */ 1507 hdev->reset_attempts = 0; 1508 1509 hdev->last_reset_time = jiffies; 1510 while ((hdev->reset_type = 1511 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 1512 != HNAE3_NONE_RESET) { 1513 ret = hclgevf_reset(hdev); 1514 if (ret) 1515 dev_err(&hdev->pdev->dev, 1516 "VF stack reset failed %d.\n", ret); 1517 } 1518 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1519 &hdev->reset_state)) { 1520 /* we could be here when either of below happens: 1521 * 1. reset was initiated due to watchdog timeout due to 1522 * a. IMP was earlier reset and our TX got choked down and 1523 * which resulted in watchdog reacting and inducing VF 1524 * reset. This also means our cmdq would be unreliable. 1525 * b. problem in TX due to other lower layer(example link 1526 * layer not functioning properly etc.) 1527 * 2. VF reset might have been initiated due to some config 1528 * change. 1529 * 1530 * NOTE: Theres no clear way to detect above cases than to react 1531 * to the response of PF for this reset request. PF will ack the 1532 * 1b and 2. cases but we will not get any intimation about 1a 1533 * from PF as cmdq would be in unreliable state i.e. mailbox 1534 * communication between PF and VF would be broken. 1535 */ 1536 1537 /* if we are never geting into pending state it means either: 1538 * 1. PF is not receiving our request which could be due to IMP 1539 * reset 1540 * 2. PF is screwed 1541 * We cannot do much for 2. but to check first we can try reset 1542 * our PCIe + stack and see if it alleviates the problem. 1543 */ 1544 if (hdev->reset_attempts > 3) { 1545 /* prepare for full reset of stack + pcie interface */ 1546 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1547 1548 /* "defer" schedule the reset task again */ 1549 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1550 } else { 1551 hdev->reset_attempts++; 1552 1553 set_bit(hdev->reset_level, &hdev->reset_pending); 1554 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1555 } 1556 hclgevf_reset_task_schedule(hdev); 1557 } 1558 1559 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1560 } 1561 1562 static void hclgevf_mailbox_service_task(struct work_struct *work) 1563 { 1564 struct hclgevf_dev *hdev; 1565 1566 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1567 1568 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1569 return; 1570 1571 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1572 1573 hclgevf_mbx_async_handler(hdev); 1574 1575 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1576 } 1577 1578 static void hclgevf_keep_alive_timer(struct timer_list *t) 1579 { 1580 struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); 1581 1582 schedule_work(&hdev->keep_alive_task); 1583 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 1584 } 1585 1586 static void hclgevf_keep_alive_task(struct work_struct *work) 1587 { 1588 struct hclgevf_dev *hdev; 1589 u8 respmsg; 1590 int ret; 1591 1592 hdev = container_of(work, struct hclgevf_dev, keep_alive_task); 1593 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, 1594 0, false, &respmsg, sizeof(u8)); 1595 if (ret) 1596 dev_err(&hdev->pdev->dev, 1597 "VF sends keep alive cmd failed(=%d)\n", ret); 1598 } 1599 1600 static void hclgevf_service_task(struct work_struct *work) 1601 { 1602 struct hclgevf_dev *hdev; 1603 1604 hdev = container_of(work, struct hclgevf_dev, service_task); 1605 1606 /* request the link status from the PF. PF would be able to tell VF 1607 * about such updates in future so we might remove this later 1608 */ 1609 hclgevf_request_link_info(hdev); 1610 1611 hclgevf_deferred_task_schedule(hdev); 1612 1613 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1614 } 1615 1616 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1617 { 1618 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1619 } 1620 1621 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1622 u32 *clearval) 1623 { 1624 u32 cmdq_src_reg, rst_ing_reg; 1625 1626 /* fetch the events from their corresponding regs */ 1627 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1628 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1629 1630 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) { 1631 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1632 dev_info(&hdev->pdev->dev, 1633 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1634 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1635 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1636 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1637 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); 1638 *clearval = cmdq_src_reg; 1639 return HCLGEVF_VECTOR0_EVENT_RST; 1640 } 1641 1642 /* check for vector0 mailbox(=CMDQ RX) event source */ 1643 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1644 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1645 *clearval = cmdq_src_reg; 1646 return HCLGEVF_VECTOR0_EVENT_MBX; 1647 } 1648 1649 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1650 1651 return HCLGEVF_VECTOR0_EVENT_OTHER; 1652 } 1653 1654 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1655 { 1656 writel(en ? 1 : 0, vector->addr); 1657 } 1658 1659 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1660 { 1661 enum hclgevf_evt_cause event_cause; 1662 struct hclgevf_dev *hdev = data; 1663 u32 clearval; 1664 1665 hclgevf_enable_vector(&hdev->misc_vector, false); 1666 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 1667 1668 switch (event_cause) { 1669 case HCLGEVF_VECTOR0_EVENT_RST: 1670 hclgevf_reset_task_schedule(hdev); 1671 break; 1672 case HCLGEVF_VECTOR0_EVENT_MBX: 1673 hclgevf_mbx_handler(hdev); 1674 break; 1675 default: 1676 break; 1677 } 1678 1679 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 1680 hclgevf_clear_event_cause(hdev, clearval); 1681 hclgevf_enable_vector(&hdev->misc_vector, true); 1682 } 1683 1684 return IRQ_HANDLED; 1685 } 1686 1687 static int hclgevf_configure(struct hclgevf_dev *hdev) 1688 { 1689 int ret; 1690 1691 hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE; 1692 1693 /* get queue configuration from PF */ 1694 ret = hclgevf_get_queue_info(hdev); 1695 if (ret) 1696 return ret; 1697 /* get tc configuration from PF */ 1698 return hclgevf_get_tc_info(hdev); 1699 } 1700 1701 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1702 { 1703 struct pci_dev *pdev = ae_dev->pdev; 1704 struct hclgevf_dev *hdev = ae_dev->priv; 1705 1706 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1707 if (!hdev) 1708 return -ENOMEM; 1709 1710 hdev->pdev = pdev; 1711 hdev->ae_dev = ae_dev; 1712 ae_dev->priv = hdev; 1713 1714 return 0; 1715 } 1716 1717 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1718 { 1719 struct hnae3_handle *roce = &hdev->roce; 1720 struct hnae3_handle *nic = &hdev->nic; 1721 1722 roce->rinfo.num_vectors = hdev->num_roce_msix; 1723 1724 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1725 hdev->num_msi_left == 0) 1726 return -EINVAL; 1727 1728 roce->rinfo.base_vector = hdev->roce_base_vector; 1729 1730 roce->rinfo.netdev = nic->kinfo.netdev; 1731 roce->rinfo.roce_io_base = hdev->hw.io_base; 1732 1733 roce->pdev = nic->pdev; 1734 roce->ae_algo = nic->ae_algo; 1735 roce->numa_node_mask = nic->numa_node_mask; 1736 1737 return 0; 1738 } 1739 1740 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 1741 { 1742 struct hclgevf_cfg_gro_status_cmd *req; 1743 struct hclgevf_desc desc; 1744 int ret; 1745 1746 if (!hnae3_dev_gro_supported(hdev)) 1747 return 0; 1748 1749 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 1750 false); 1751 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 1752 1753 req->gro_en = cpu_to_le16(en ? 1 : 0); 1754 1755 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1756 if (ret) 1757 dev_err(&hdev->pdev->dev, 1758 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 1759 1760 return ret; 1761 } 1762 1763 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1764 { 1765 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1766 int i, ret; 1767 1768 rss_cfg->rss_size = hdev->rss_size_max; 1769 1770 if (hdev->pdev->revision >= 0x21) { 1771 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 1772 netdev_rss_key_fill(rss_cfg->rss_hash_key, 1773 HCLGEVF_RSS_KEY_SIZE); 1774 1775 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 1776 rss_cfg->rss_hash_key); 1777 if (ret) 1778 return ret; 1779 1780 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 1781 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1782 rss_cfg->rss_tuple_sets.ipv4_udp_en = 1783 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1784 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 1785 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1786 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 1787 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1788 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 1789 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1790 rss_cfg->rss_tuple_sets.ipv6_udp_en = 1791 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1792 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 1793 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1794 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 1795 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1796 1797 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 1798 if (ret) 1799 return ret; 1800 1801 } 1802 1803 /* Initialize RSS indirect table for each vport */ 1804 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1805 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1806 1807 ret = hclgevf_set_rss_indir_table(hdev); 1808 if (ret) 1809 return ret; 1810 1811 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1812 } 1813 1814 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1815 { 1816 /* other vlan config(like, VLAN TX/RX offload) would also be added 1817 * here later 1818 */ 1819 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1820 false); 1821 } 1822 1823 static int hclgevf_ae_start(struct hnae3_handle *handle) 1824 { 1825 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1826 1827 /* reset tqp stats */ 1828 hclgevf_reset_tqp_stats(handle); 1829 1830 hclgevf_request_link_info(hdev); 1831 1832 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1833 mod_timer(&hdev->service_timer, jiffies + HZ); 1834 1835 return 0; 1836 } 1837 1838 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1839 { 1840 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1841 1842 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1843 1844 /* reset tqp stats */ 1845 hclgevf_reset_tqp_stats(handle); 1846 del_timer_sync(&hdev->service_timer); 1847 cancel_work_sync(&hdev->service_task); 1848 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1849 hclgevf_update_link_status(hdev, 0); 1850 } 1851 1852 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 1853 { 1854 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1855 u8 msg_data; 1856 1857 msg_data = alive ? 1 : 0; 1858 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE, 1859 0, &msg_data, 1, false, NULL, 0); 1860 } 1861 1862 static int hclgevf_client_start(struct hnae3_handle *handle) 1863 { 1864 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1865 1866 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 1867 return hclgevf_set_alive(handle, true); 1868 } 1869 1870 static void hclgevf_client_stop(struct hnae3_handle *handle) 1871 { 1872 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1873 int ret; 1874 1875 ret = hclgevf_set_alive(handle, false); 1876 if (ret) 1877 dev_warn(&hdev->pdev->dev, 1878 "%s failed %d\n", __func__, ret); 1879 1880 del_timer_sync(&hdev->keep_alive_timer); 1881 cancel_work_sync(&hdev->keep_alive_task); 1882 } 1883 1884 static void hclgevf_state_init(struct hclgevf_dev *hdev) 1885 { 1886 /* setup tasks for the MBX */ 1887 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1888 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1889 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1890 1891 /* setup tasks for service timer */ 1892 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1893 1894 INIT_WORK(&hdev->service_task, hclgevf_service_task); 1895 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1896 1897 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 1898 1899 mutex_init(&hdev->mbx_resp.mbx_mutex); 1900 1901 /* bring the device down */ 1902 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1903 } 1904 1905 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1906 { 1907 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1908 1909 if (hdev->service_timer.function) 1910 del_timer_sync(&hdev->service_timer); 1911 if (hdev->service_task.func) 1912 cancel_work_sync(&hdev->service_task); 1913 if (hdev->mbx_service_task.func) 1914 cancel_work_sync(&hdev->mbx_service_task); 1915 if (hdev->rst_service_task.func) 1916 cancel_work_sync(&hdev->rst_service_task); 1917 1918 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1919 } 1920 1921 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1922 { 1923 struct pci_dev *pdev = hdev->pdev; 1924 int vectors; 1925 int i; 1926 1927 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 1928 vectors = pci_alloc_irq_vectors(pdev, 1929 hdev->roce_base_msix_offset + 1, 1930 hdev->num_msi, 1931 PCI_IRQ_MSIX); 1932 else 1933 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1934 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1935 1936 if (vectors < 0) { 1937 dev_err(&pdev->dev, 1938 "failed(%d) to allocate MSI/MSI-X vectors\n", 1939 vectors); 1940 return vectors; 1941 } 1942 if (vectors < hdev->num_msi) 1943 dev_warn(&hdev->pdev->dev, 1944 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1945 hdev->num_msi, vectors); 1946 1947 hdev->num_msi = vectors; 1948 hdev->num_msi_left = vectors; 1949 hdev->base_msi_vector = pdev->irq; 1950 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 1951 1952 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1953 sizeof(u16), GFP_KERNEL); 1954 if (!hdev->vector_status) { 1955 pci_free_irq_vectors(pdev); 1956 return -ENOMEM; 1957 } 1958 1959 for (i = 0; i < hdev->num_msi; i++) 1960 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 1961 1962 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 1963 sizeof(int), GFP_KERNEL); 1964 if (!hdev->vector_irq) { 1965 devm_kfree(&pdev->dev, hdev->vector_status); 1966 pci_free_irq_vectors(pdev); 1967 return -ENOMEM; 1968 } 1969 1970 return 0; 1971 } 1972 1973 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 1974 { 1975 struct pci_dev *pdev = hdev->pdev; 1976 1977 devm_kfree(&pdev->dev, hdev->vector_status); 1978 devm_kfree(&pdev->dev, hdev->vector_irq); 1979 pci_free_irq_vectors(pdev); 1980 } 1981 1982 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 1983 { 1984 int ret = 0; 1985 1986 hclgevf_get_misc_vector(hdev); 1987 1988 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 1989 0, "hclgevf_cmd", hdev); 1990 if (ret) { 1991 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 1992 hdev->misc_vector.vector_irq); 1993 return ret; 1994 } 1995 1996 hclgevf_clear_event_cause(hdev, 0); 1997 1998 /* enable misc. vector(vector 0) */ 1999 hclgevf_enable_vector(&hdev->misc_vector, true); 2000 2001 return ret; 2002 } 2003 2004 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2005 { 2006 /* disable misc vector(vector 0) */ 2007 hclgevf_enable_vector(&hdev->misc_vector, false); 2008 synchronize_irq(hdev->misc_vector.vector_irq); 2009 free_irq(hdev->misc_vector.vector_irq, hdev); 2010 hclgevf_free_vector(hdev, 0); 2011 } 2012 2013 static int hclgevf_init_client_instance(struct hnae3_client *client, 2014 struct hnae3_ae_dev *ae_dev) 2015 { 2016 struct hclgevf_dev *hdev = ae_dev->priv; 2017 int ret; 2018 2019 switch (client->type) { 2020 case HNAE3_CLIENT_KNIC: 2021 hdev->nic_client = client; 2022 hdev->nic.client = client; 2023 2024 ret = client->ops->init_instance(&hdev->nic); 2025 if (ret) 2026 goto clear_nic; 2027 2028 hnae3_set_client_init_flag(client, ae_dev, 1); 2029 2030 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 2031 struct hnae3_client *rc = hdev->roce_client; 2032 2033 ret = hclgevf_init_roce_base_info(hdev); 2034 if (ret) 2035 goto clear_roce; 2036 ret = rc->ops->init_instance(&hdev->roce); 2037 if (ret) 2038 goto clear_roce; 2039 2040 hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 2041 1); 2042 } 2043 break; 2044 case HNAE3_CLIENT_UNIC: 2045 hdev->nic_client = client; 2046 hdev->nic.client = client; 2047 2048 ret = client->ops->init_instance(&hdev->nic); 2049 if (ret) 2050 goto clear_nic; 2051 2052 hnae3_set_client_init_flag(client, ae_dev, 1); 2053 break; 2054 case HNAE3_CLIENT_ROCE: 2055 if (hnae3_dev_roce_supported(hdev)) { 2056 hdev->roce_client = client; 2057 hdev->roce.client = client; 2058 } 2059 2060 if (hdev->roce_client && hdev->nic_client) { 2061 ret = hclgevf_init_roce_base_info(hdev); 2062 if (ret) 2063 goto clear_roce; 2064 2065 ret = client->ops->init_instance(&hdev->roce); 2066 if (ret) 2067 goto clear_roce; 2068 } 2069 2070 hnae3_set_client_init_flag(client, ae_dev, 1); 2071 break; 2072 default: 2073 return -EINVAL; 2074 } 2075 2076 return 0; 2077 2078 clear_nic: 2079 hdev->nic_client = NULL; 2080 hdev->nic.client = NULL; 2081 return ret; 2082 clear_roce: 2083 hdev->roce_client = NULL; 2084 hdev->roce.client = NULL; 2085 return ret; 2086 } 2087 2088 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2089 struct hnae3_ae_dev *ae_dev) 2090 { 2091 struct hclgevf_dev *hdev = ae_dev->priv; 2092 2093 /* un-init roce, if it exists */ 2094 if (hdev->roce_client) { 2095 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2096 hdev->roce_client = NULL; 2097 hdev->roce.client = NULL; 2098 } 2099 2100 /* un-init nic/unic, if this was not called by roce client */ 2101 if (client->ops->uninit_instance && hdev->nic_client && 2102 client->type != HNAE3_CLIENT_ROCE) { 2103 client->ops->uninit_instance(&hdev->nic, 0); 2104 hdev->nic_client = NULL; 2105 hdev->nic.client = NULL; 2106 } 2107 } 2108 2109 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2110 { 2111 struct pci_dev *pdev = hdev->pdev; 2112 struct hclgevf_hw *hw; 2113 int ret; 2114 2115 ret = pci_enable_device(pdev); 2116 if (ret) { 2117 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2118 return ret; 2119 } 2120 2121 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2122 if (ret) { 2123 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2124 goto err_disable_device; 2125 } 2126 2127 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2128 if (ret) { 2129 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2130 goto err_disable_device; 2131 } 2132 2133 pci_set_master(pdev); 2134 hw = &hdev->hw; 2135 hw->hdev = hdev; 2136 hw->io_base = pci_iomap(pdev, 2, 0); 2137 if (!hw->io_base) { 2138 dev_err(&pdev->dev, "can't map configuration register space\n"); 2139 ret = -ENOMEM; 2140 goto err_clr_master; 2141 } 2142 2143 return 0; 2144 2145 err_clr_master: 2146 pci_clear_master(pdev); 2147 pci_release_regions(pdev); 2148 err_disable_device: 2149 pci_disable_device(pdev); 2150 2151 return ret; 2152 } 2153 2154 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2155 { 2156 struct pci_dev *pdev = hdev->pdev; 2157 2158 pci_iounmap(pdev, hdev->hw.io_base); 2159 pci_clear_master(pdev); 2160 pci_release_regions(pdev); 2161 pci_disable_device(pdev); 2162 } 2163 2164 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2165 { 2166 struct hclgevf_query_res_cmd *req; 2167 struct hclgevf_desc desc; 2168 int ret; 2169 2170 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2171 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2172 if (ret) { 2173 dev_err(&hdev->pdev->dev, 2174 "query vf resource failed, ret = %d.\n", ret); 2175 return ret; 2176 } 2177 2178 req = (struct hclgevf_query_res_cmd *)desc.data; 2179 2180 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 2181 hdev->roce_base_msix_offset = 2182 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 2183 HCLGEVF_MSIX_OFT_ROCEE_M, 2184 HCLGEVF_MSIX_OFT_ROCEE_S); 2185 hdev->num_roce_msix = 2186 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2187 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2188 2189 /* VF should have NIC vectors and Roce vectors, NIC vectors 2190 * are queued before Roce vectors. The offset is fixed to 64. 2191 */ 2192 hdev->num_msi = hdev->num_roce_msix + 2193 hdev->roce_base_msix_offset; 2194 } else { 2195 hdev->num_msi = 2196 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2197 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2198 } 2199 2200 return 0; 2201 } 2202 2203 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2204 { 2205 struct pci_dev *pdev = hdev->pdev; 2206 int ret = 0; 2207 2208 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2209 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2210 hclgevf_misc_irq_uninit(hdev); 2211 hclgevf_uninit_msi(hdev); 2212 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2213 } 2214 2215 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2216 pci_set_master(pdev); 2217 ret = hclgevf_init_msi(hdev); 2218 if (ret) { 2219 dev_err(&pdev->dev, 2220 "failed(%d) to init MSI/MSI-X\n", ret); 2221 return ret; 2222 } 2223 2224 ret = hclgevf_misc_irq_init(hdev); 2225 if (ret) { 2226 hclgevf_uninit_msi(hdev); 2227 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2228 ret); 2229 return ret; 2230 } 2231 2232 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2233 } 2234 2235 return ret; 2236 } 2237 2238 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2239 { 2240 struct pci_dev *pdev = hdev->pdev; 2241 int ret; 2242 2243 ret = hclgevf_pci_reset(hdev); 2244 if (ret) { 2245 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2246 return ret; 2247 } 2248 2249 ret = hclgevf_cmd_init(hdev); 2250 if (ret) { 2251 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2252 return ret; 2253 } 2254 2255 ret = hclgevf_rss_init_hw(hdev); 2256 if (ret) { 2257 dev_err(&hdev->pdev->dev, 2258 "failed(%d) to initialize RSS\n", ret); 2259 return ret; 2260 } 2261 2262 ret = hclgevf_config_gro(hdev, true); 2263 if (ret) 2264 return ret; 2265 2266 ret = hclgevf_init_vlan_config(hdev); 2267 if (ret) { 2268 dev_err(&hdev->pdev->dev, 2269 "failed(%d) to initialize VLAN config\n", ret); 2270 return ret; 2271 } 2272 2273 dev_info(&hdev->pdev->dev, "Reset done\n"); 2274 2275 return 0; 2276 } 2277 2278 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2279 { 2280 struct pci_dev *pdev = hdev->pdev; 2281 int ret; 2282 2283 ret = hclgevf_pci_init(hdev); 2284 if (ret) { 2285 dev_err(&pdev->dev, "PCI initialization failed\n"); 2286 return ret; 2287 } 2288 2289 ret = hclgevf_cmd_queue_init(hdev); 2290 if (ret) { 2291 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); 2292 goto err_cmd_queue_init; 2293 } 2294 2295 ret = hclgevf_cmd_init(hdev); 2296 if (ret) 2297 goto err_cmd_init; 2298 2299 /* Get vf resource */ 2300 ret = hclgevf_query_vf_resource(hdev); 2301 if (ret) { 2302 dev_err(&hdev->pdev->dev, 2303 "Query vf status error, ret = %d.\n", ret); 2304 goto err_cmd_init; 2305 } 2306 2307 ret = hclgevf_init_msi(hdev); 2308 if (ret) { 2309 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2310 goto err_cmd_init; 2311 } 2312 2313 hclgevf_state_init(hdev); 2314 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2315 2316 ret = hclgevf_misc_irq_init(hdev); 2317 if (ret) { 2318 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2319 ret); 2320 goto err_misc_irq_init; 2321 } 2322 2323 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2324 2325 ret = hclgevf_configure(hdev); 2326 if (ret) { 2327 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2328 goto err_config; 2329 } 2330 2331 ret = hclgevf_alloc_tqps(hdev); 2332 if (ret) { 2333 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2334 goto err_config; 2335 } 2336 2337 ret = hclgevf_set_handle_info(hdev); 2338 if (ret) { 2339 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2340 goto err_config; 2341 } 2342 2343 ret = hclgevf_config_gro(hdev, true); 2344 if (ret) 2345 goto err_config; 2346 2347 /* Initialize RSS for this VF */ 2348 ret = hclgevf_rss_init_hw(hdev); 2349 if (ret) { 2350 dev_err(&hdev->pdev->dev, 2351 "failed(%d) to initialize RSS\n", ret); 2352 goto err_config; 2353 } 2354 2355 ret = hclgevf_init_vlan_config(hdev); 2356 if (ret) { 2357 dev_err(&hdev->pdev->dev, 2358 "failed(%d) to initialize VLAN config\n", ret); 2359 goto err_config; 2360 } 2361 2362 hdev->last_reset_time = jiffies; 2363 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 2364 2365 return 0; 2366 2367 err_config: 2368 hclgevf_misc_irq_uninit(hdev); 2369 err_misc_irq_init: 2370 hclgevf_state_uninit(hdev); 2371 hclgevf_uninit_msi(hdev); 2372 err_cmd_init: 2373 hclgevf_cmd_uninit(hdev); 2374 err_cmd_queue_init: 2375 hclgevf_pci_uninit(hdev); 2376 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2377 return ret; 2378 } 2379 2380 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2381 { 2382 hclgevf_state_uninit(hdev); 2383 2384 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2385 hclgevf_misc_irq_uninit(hdev); 2386 hclgevf_uninit_msi(hdev); 2387 hclgevf_pci_uninit(hdev); 2388 } 2389 2390 hclgevf_cmd_uninit(hdev); 2391 } 2392 2393 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2394 { 2395 struct pci_dev *pdev = ae_dev->pdev; 2396 struct hclgevf_dev *hdev; 2397 int ret; 2398 2399 ret = hclgevf_alloc_hdev(ae_dev); 2400 if (ret) { 2401 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2402 return ret; 2403 } 2404 2405 ret = hclgevf_init_hdev(ae_dev->priv); 2406 if (ret) { 2407 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2408 return ret; 2409 } 2410 2411 hdev = ae_dev->priv; 2412 timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0); 2413 INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task); 2414 2415 return 0; 2416 } 2417 2418 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2419 { 2420 struct hclgevf_dev *hdev = ae_dev->priv; 2421 2422 hclgevf_uninit_hdev(hdev); 2423 ae_dev->priv = NULL; 2424 } 2425 2426 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2427 { 2428 struct hnae3_handle *nic = &hdev->nic; 2429 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2430 2431 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 2432 } 2433 2434 /** 2435 * hclgevf_get_channels - Get the current channels enabled and max supported. 2436 * @handle: hardware information for network interface 2437 * @ch: ethtool channels structure 2438 * 2439 * We don't support separate tx and rx queues as channels. The other count 2440 * represents how many queues are being used for control. max_combined counts 2441 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2442 * q_vectors since we support a lot more queue pairs than q_vectors. 2443 **/ 2444 static void hclgevf_get_channels(struct hnae3_handle *handle, 2445 struct ethtool_channels *ch) 2446 { 2447 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2448 2449 ch->max_combined = hclgevf_get_max_channels(hdev); 2450 ch->other_count = 0; 2451 ch->max_other = 0; 2452 ch->combined_count = hdev->num_tqps; 2453 } 2454 2455 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2456 u16 *alloc_tqps, u16 *max_rss_size) 2457 { 2458 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2459 2460 *alloc_tqps = hdev->num_tqps; 2461 *max_rss_size = hdev->rss_size_max; 2462 } 2463 2464 static int hclgevf_get_status(struct hnae3_handle *handle) 2465 { 2466 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2467 2468 return hdev->hw.mac.link; 2469 } 2470 2471 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2472 u8 *auto_neg, u32 *speed, 2473 u8 *duplex) 2474 { 2475 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2476 2477 if (speed) 2478 *speed = hdev->hw.mac.speed; 2479 if (duplex) 2480 *duplex = hdev->hw.mac.duplex; 2481 if (auto_neg) 2482 *auto_neg = AUTONEG_DISABLE; 2483 } 2484 2485 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 2486 u8 duplex) 2487 { 2488 hdev->hw.mac.speed = speed; 2489 hdev->hw.mac.duplex = duplex; 2490 } 2491 2492 static int hclgevf_gro_en(struct hnae3_handle *handle, int enable) 2493 { 2494 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2495 2496 return hclgevf_config_gro(hdev, enable); 2497 } 2498 2499 static void hclgevf_get_media_type(struct hnae3_handle *handle, 2500 u8 *media_type) 2501 { 2502 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2503 if (media_type) 2504 *media_type = hdev->hw.mac.media_type; 2505 } 2506 2507 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 2508 { 2509 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2510 2511 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2512 } 2513 2514 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 2515 { 2516 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2517 2518 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2519 } 2520 2521 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 2522 { 2523 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2524 2525 return hdev->reset_count; 2526 } 2527 2528 #define MAX_SEPARATE_NUM 4 2529 #define SEPARATOR_VALUE 0xFFFFFFFF 2530 #define REG_NUM_PER_LINE 4 2531 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 2532 2533 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 2534 { 2535 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 2536 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2537 2538 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 2539 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 2540 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 2541 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 2542 2543 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 2544 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 2545 } 2546 2547 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 2548 void *data) 2549 { 2550 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2551 int i, j, reg_um, separator_num; 2552 u32 *reg = data; 2553 2554 *version = hdev->fw_version; 2555 2556 /* fetching per-VF registers values from VF PCIe register space */ 2557 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 2558 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2559 for (i = 0; i < reg_um; i++) 2560 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 2561 for (i = 0; i < separator_num; i++) 2562 *reg++ = SEPARATOR_VALUE; 2563 2564 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 2565 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2566 for (i = 0; i < reg_um; i++) 2567 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 2568 for (i = 0; i < separator_num; i++) 2569 *reg++ = SEPARATOR_VALUE; 2570 2571 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 2572 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2573 for (j = 0; j < hdev->num_tqps; j++) { 2574 for (i = 0; i < reg_um; i++) 2575 *reg++ = hclgevf_read_dev(&hdev->hw, 2576 ring_reg_addr_list[i] + 2577 0x200 * j); 2578 for (i = 0; i < separator_num; i++) 2579 *reg++ = SEPARATOR_VALUE; 2580 } 2581 2582 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 2583 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2584 for (j = 0; j < hdev->num_msi_used - 1; j++) { 2585 for (i = 0; i < reg_um; i++) 2586 *reg++ = hclgevf_read_dev(&hdev->hw, 2587 tqp_intr_reg_addr_list[i] + 2588 4 * j); 2589 for (i = 0; i < separator_num; i++) 2590 *reg++ = SEPARATOR_VALUE; 2591 } 2592 } 2593 2594 static const struct hnae3_ae_ops hclgevf_ops = { 2595 .init_ae_dev = hclgevf_init_ae_dev, 2596 .uninit_ae_dev = hclgevf_uninit_ae_dev, 2597 .flr_prepare = hclgevf_flr_prepare, 2598 .flr_done = hclgevf_flr_done, 2599 .init_client_instance = hclgevf_init_client_instance, 2600 .uninit_client_instance = hclgevf_uninit_client_instance, 2601 .start = hclgevf_ae_start, 2602 .stop = hclgevf_ae_stop, 2603 .client_start = hclgevf_client_start, 2604 .client_stop = hclgevf_client_stop, 2605 .map_ring_to_vector = hclgevf_map_ring_to_vector, 2606 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2607 .get_vector = hclgevf_get_vector, 2608 .put_vector = hclgevf_put_vector, 2609 .reset_queue = hclgevf_reset_tqp, 2610 .set_promisc_mode = hclgevf_set_promisc_mode, 2611 .get_mac_addr = hclgevf_get_mac_addr, 2612 .set_mac_addr = hclgevf_set_mac_addr, 2613 .add_uc_addr = hclgevf_add_uc_addr, 2614 .rm_uc_addr = hclgevf_rm_uc_addr, 2615 .add_mc_addr = hclgevf_add_mc_addr, 2616 .rm_mc_addr = hclgevf_rm_mc_addr, 2617 .get_stats = hclgevf_get_stats, 2618 .update_stats = hclgevf_update_stats, 2619 .get_strings = hclgevf_get_strings, 2620 .get_sset_count = hclgevf_get_sset_count, 2621 .get_rss_key_size = hclgevf_get_rss_key_size, 2622 .get_rss_indir_size = hclgevf_get_rss_indir_size, 2623 .get_rss = hclgevf_get_rss, 2624 .set_rss = hclgevf_set_rss, 2625 .get_rss_tuple = hclgevf_get_rss_tuple, 2626 .set_rss_tuple = hclgevf_set_rss_tuple, 2627 .get_tc_size = hclgevf_get_tc_size, 2628 .get_fw_version = hclgevf_get_fw_version, 2629 .set_vlan_filter = hclgevf_set_vlan_filter, 2630 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 2631 .reset_event = hclgevf_reset_event, 2632 .set_default_reset_request = hclgevf_set_def_reset_request, 2633 .get_channels = hclgevf_get_channels, 2634 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2635 .get_regs_len = hclgevf_get_regs_len, 2636 .get_regs = hclgevf_get_regs, 2637 .get_status = hclgevf_get_status, 2638 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2639 .get_media_type = hclgevf_get_media_type, 2640 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 2641 .ae_dev_resetting = hclgevf_ae_dev_resetting, 2642 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 2643 .set_gro_en = hclgevf_gro_en, 2644 .set_mtu = hclgevf_set_mtu, 2645 }; 2646 2647 static struct hnae3_ae_algo ae_algovf = { 2648 .ops = &hclgevf_ops, 2649 .pdev_id_table = ae_algovf_pci_tbl, 2650 }; 2651 2652 static int hclgevf_init(void) 2653 { 2654 pr_info("%s is initializing\n", HCLGEVF_NAME); 2655 2656 hnae3_register_ae_algo(&ae_algovf); 2657 2658 return 0; 2659 } 2660 2661 static void hclgevf_exit(void) 2662 { 2663 hnae3_unregister_ae_algo(&ae_algovf); 2664 } 2665 module_init(hclgevf_init); 2666 module_exit(hclgevf_exit); 2667 2668 MODULE_LICENSE("GPL"); 2669 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2670 MODULE_DESCRIPTION("HCLGEVF Driver"); 2671 MODULE_VERSION(HCLGEVF_MOD_VERSION); 2672