1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 25 26 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 27 HCLGEVF_CMDQ_TX_ADDR_H_REG, 28 HCLGEVF_CMDQ_TX_DEPTH_REG, 29 HCLGEVF_CMDQ_TX_TAIL_REG, 30 HCLGEVF_CMDQ_TX_HEAD_REG, 31 HCLGEVF_CMDQ_RX_ADDR_L_REG, 32 HCLGEVF_CMDQ_RX_ADDR_H_REG, 33 HCLGEVF_CMDQ_RX_DEPTH_REG, 34 HCLGEVF_CMDQ_RX_TAIL_REG, 35 HCLGEVF_CMDQ_RX_HEAD_REG, 36 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 37 HCLGEVF_CMDQ_INTR_STS_REG, 38 HCLGEVF_CMDQ_INTR_EN_REG, 39 HCLGEVF_CMDQ_INTR_GEN_REG}; 40 41 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 42 HCLGEVF_RST_ING, 43 HCLGEVF_GRO_EN_REG}; 44 45 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 46 HCLGEVF_RING_RX_ADDR_H_REG, 47 HCLGEVF_RING_RX_BD_NUM_REG, 48 HCLGEVF_RING_RX_BD_LENGTH_REG, 49 HCLGEVF_RING_RX_MERGE_EN_REG, 50 HCLGEVF_RING_RX_TAIL_REG, 51 HCLGEVF_RING_RX_HEAD_REG, 52 HCLGEVF_RING_RX_FBD_NUM_REG, 53 HCLGEVF_RING_RX_OFFSET_REG, 54 HCLGEVF_RING_RX_FBD_OFFSET_REG, 55 HCLGEVF_RING_RX_STASH_REG, 56 HCLGEVF_RING_RX_BD_ERR_REG, 57 HCLGEVF_RING_TX_ADDR_L_REG, 58 HCLGEVF_RING_TX_ADDR_H_REG, 59 HCLGEVF_RING_TX_BD_NUM_REG, 60 HCLGEVF_RING_TX_PRIORITY_REG, 61 HCLGEVF_RING_TX_TC_REG, 62 HCLGEVF_RING_TX_MERGE_EN_REG, 63 HCLGEVF_RING_TX_TAIL_REG, 64 HCLGEVF_RING_TX_HEAD_REG, 65 HCLGEVF_RING_TX_FBD_NUM_REG, 66 HCLGEVF_RING_TX_OFFSET_REG, 67 HCLGEVF_RING_TX_EBD_NUM_REG, 68 HCLGEVF_RING_TX_EBD_OFFSET_REG, 69 HCLGEVF_RING_TX_BD_ERR_REG, 70 HCLGEVF_RING_EN_REG}; 71 72 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 73 HCLGEVF_TQP_INTR_GL0_REG, 74 HCLGEVF_TQP_INTR_GL1_REG, 75 HCLGEVF_TQP_INTR_GL2_REG, 76 HCLGEVF_TQP_INTR_RL_REG}; 77 78 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 79 struct hnae3_handle *handle) 80 { 81 return container_of(handle, struct hclgevf_dev, nic); 82 } 83 84 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 85 { 86 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 87 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 88 struct hclgevf_desc desc; 89 struct hclgevf_tqp *tqp; 90 int status; 91 int i; 92 93 for (i = 0; i < kinfo->num_tqps; i++) { 94 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 95 hclgevf_cmd_setup_basic_desc(&desc, 96 HCLGEVF_OPC_QUERY_RX_STATUS, 97 true); 98 99 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 100 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 101 if (status) { 102 dev_err(&hdev->pdev->dev, 103 "Query tqp stat fail, status = %d,queue = %d\n", 104 status, i); 105 return status; 106 } 107 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 108 le32_to_cpu(desc.data[1]); 109 110 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 111 true); 112 113 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 114 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 115 if (status) { 116 dev_err(&hdev->pdev->dev, 117 "Query tqp stat fail, status = %d,queue = %d\n", 118 status, i); 119 return status; 120 } 121 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 122 le32_to_cpu(desc.data[1]); 123 } 124 125 return 0; 126 } 127 128 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 129 { 130 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 131 struct hclgevf_tqp *tqp; 132 u64 *buff = data; 133 int i; 134 135 for (i = 0; i < kinfo->num_tqps; i++) { 136 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 137 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 138 } 139 for (i = 0; i < kinfo->num_tqps; i++) { 140 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 141 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 142 } 143 144 return buff; 145 } 146 147 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 148 { 149 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 150 151 return kinfo->num_tqps * 2; 152 } 153 154 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 155 { 156 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 157 u8 *buff = data; 158 int i = 0; 159 160 for (i = 0; i < kinfo->num_tqps; i++) { 161 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 162 struct hclgevf_tqp, q); 163 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 164 tqp->index); 165 buff += ETH_GSTRING_LEN; 166 } 167 168 for (i = 0; i < kinfo->num_tqps; i++) { 169 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 170 struct hclgevf_tqp, q); 171 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 172 tqp->index); 173 buff += ETH_GSTRING_LEN; 174 } 175 176 return buff; 177 } 178 179 static void hclgevf_update_stats(struct hnae3_handle *handle, 180 struct net_device_stats *net_stats) 181 { 182 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 183 int status; 184 185 status = hclgevf_tqps_update_stats(handle); 186 if (status) 187 dev_err(&hdev->pdev->dev, 188 "VF update of TQPS stats fail, status = %d.\n", 189 status); 190 } 191 192 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 193 { 194 if (strset == ETH_SS_TEST) 195 return -EOPNOTSUPP; 196 else if (strset == ETH_SS_STATS) 197 return hclgevf_tqps_get_sset_count(handle, strset); 198 199 return 0; 200 } 201 202 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 203 u8 *data) 204 { 205 u8 *p = (char *)data; 206 207 if (strset == ETH_SS_STATS) 208 p = hclgevf_tqps_get_strings(handle, p); 209 } 210 211 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 212 { 213 hclgevf_tqps_get_stats(handle, data); 214 } 215 216 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 217 { 218 u8 resp_msg; 219 int status; 220 221 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 222 true, &resp_msg, sizeof(u8)); 223 if (status) { 224 dev_err(&hdev->pdev->dev, 225 "VF request to get TC info from PF failed %d", 226 status); 227 return status; 228 } 229 230 hdev->hw_tc_map = resp_msg; 231 232 return 0; 233 } 234 235 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 236 { 237 #define HCLGEVF_TQPS_RSS_INFO_LEN 8 238 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 239 int status; 240 241 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 242 true, resp_msg, 243 HCLGEVF_TQPS_RSS_INFO_LEN); 244 if (status) { 245 dev_err(&hdev->pdev->dev, 246 "VF request to get tqp info from PF failed %d", 247 status); 248 return status; 249 } 250 251 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 252 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 253 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 254 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 255 256 return 0; 257 } 258 259 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 260 { 261 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 262 u8 msg_data[2], resp_data[2]; 263 u16 qid_in_pf = 0; 264 int ret; 265 266 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 267 268 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, 269 2, true, resp_data, 2); 270 if (!ret) 271 qid_in_pf = *(u16 *)resp_data; 272 273 return qid_in_pf; 274 } 275 276 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 277 { 278 struct hclgevf_tqp *tqp; 279 int i; 280 281 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 282 sizeof(struct hclgevf_tqp), GFP_KERNEL); 283 if (!hdev->htqp) 284 return -ENOMEM; 285 286 tqp = hdev->htqp; 287 288 for (i = 0; i < hdev->num_tqps; i++) { 289 tqp->dev = &hdev->pdev->dev; 290 tqp->index = i; 291 292 tqp->q.ae_algo = &ae_algovf; 293 tqp->q.buf_size = hdev->rx_buf_len; 294 tqp->q.desc_num = hdev->num_desc; 295 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 296 i * HCLGEVF_TQP_REG_SIZE; 297 298 tqp++; 299 } 300 301 return 0; 302 } 303 304 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 305 { 306 struct hnae3_handle *nic = &hdev->nic; 307 struct hnae3_knic_private_info *kinfo; 308 u16 new_tqps = hdev->num_tqps; 309 int i; 310 311 kinfo = &nic->kinfo; 312 kinfo->num_tc = 0; 313 kinfo->num_desc = hdev->num_desc; 314 kinfo->rx_buf_len = hdev->rx_buf_len; 315 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 316 if (hdev->hw_tc_map & BIT(i)) 317 kinfo->num_tc++; 318 319 kinfo->rss_size 320 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 321 new_tqps = kinfo->rss_size * kinfo->num_tc; 322 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 323 324 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 325 sizeof(struct hnae3_queue *), GFP_KERNEL); 326 if (!kinfo->tqp) 327 return -ENOMEM; 328 329 for (i = 0; i < kinfo->num_tqps; i++) { 330 hdev->htqp[i].q.handle = &hdev->nic; 331 hdev->htqp[i].q.tqp_index = i; 332 kinfo->tqp[i] = &hdev->htqp[i].q; 333 } 334 335 return 0; 336 } 337 338 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 339 { 340 int status; 341 u8 resp_msg; 342 343 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 344 0, false, &resp_msg, sizeof(u8)); 345 if (status) 346 dev_err(&hdev->pdev->dev, 347 "VF failed to fetch link status(%d) from PF", status); 348 } 349 350 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 351 { 352 struct hnae3_handle *handle = &hdev->nic; 353 struct hnae3_client *client; 354 355 client = handle->client; 356 357 link_state = 358 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 359 360 if (link_state != hdev->hw.mac.link) { 361 client->ops->link_status_change(handle, !!link_state); 362 hdev->hw.mac.link = link_state; 363 } 364 } 365 366 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 367 { 368 struct hnae3_handle *nic = &hdev->nic; 369 int ret; 370 371 nic->ae_algo = &ae_algovf; 372 nic->pdev = hdev->pdev; 373 nic->numa_node_mask = hdev->numa_node_mask; 374 nic->flags |= HNAE3_SUPPORT_VF; 375 376 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 377 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 378 hdev->ae_dev->dev_type); 379 return -EINVAL; 380 } 381 382 ret = hclgevf_knic_setup(hdev); 383 if (ret) 384 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 385 ret); 386 return ret; 387 } 388 389 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 390 { 391 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 392 dev_warn(&hdev->pdev->dev, 393 "vector(vector_id %d) has been freed.\n", vector_id); 394 return; 395 } 396 397 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 398 hdev->num_msi_left += 1; 399 hdev->num_msi_used -= 1; 400 } 401 402 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 403 struct hnae3_vector_info *vector_info) 404 { 405 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 406 struct hnae3_vector_info *vector = vector_info; 407 int alloc = 0; 408 int i, j; 409 410 vector_num = min(hdev->num_msi_left, vector_num); 411 412 for (j = 0; j < vector_num; j++) { 413 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 414 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 415 vector->vector = pci_irq_vector(hdev->pdev, i); 416 vector->io_addr = hdev->hw.io_base + 417 HCLGEVF_VECTOR_REG_BASE + 418 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 419 hdev->vector_status[i] = 0; 420 hdev->vector_irq[i] = vector->vector; 421 422 vector++; 423 alloc++; 424 425 break; 426 } 427 } 428 } 429 hdev->num_msi_left -= alloc; 430 hdev->num_msi_used += alloc; 431 432 return alloc; 433 } 434 435 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 436 { 437 int i; 438 439 for (i = 0; i < hdev->num_msi; i++) 440 if (vector == hdev->vector_irq[i]) 441 return i; 442 443 return -EINVAL; 444 } 445 446 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 447 const u8 hfunc, const u8 *key) 448 { 449 struct hclgevf_rss_config_cmd *req; 450 struct hclgevf_desc desc; 451 int key_offset; 452 int key_size; 453 int ret; 454 455 req = (struct hclgevf_rss_config_cmd *)desc.data; 456 457 for (key_offset = 0; key_offset < 3; key_offset++) { 458 hclgevf_cmd_setup_basic_desc(&desc, 459 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 460 false); 461 462 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 463 req->hash_config |= 464 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 465 466 if (key_offset == 2) 467 key_size = 468 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 469 else 470 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 471 472 memcpy(req->hash_key, 473 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 474 475 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 476 if (ret) { 477 dev_err(&hdev->pdev->dev, 478 "Configure RSS config fail, status = %d\n", 479 ret); 480 return ret; 481 } 482 } 483 484 return 0; 485 } 486 487 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 488 { 489 return HCLGEVF_RSS_KEY_SIZE; 490 } 491 492 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 493 { 494 return HCLGEVF_RSS_IND_TBL_SIZE; 495 } 496 497 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 498 { 499 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 500 struct hclgevf_rss_indirection_table_cmd *req; 501 struct hclgevf_desc desc; 502 int status; 503 int i, j; 504 505 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 506 507 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 508 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 509 false); 510 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 511 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 512 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 513 req->rss_result[j] = 514 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 515 516 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 517 if (status) { 518 dev_err(&hdev->pdev->dev, 519 "VF failed(=%d) to set RSS indirection table\n", 520 status); 521 return status; 522 } 523 } 524 525 return 0; 526 } 527 528 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 529 { 530 struct hclgevf_rss_tc_mode_cmd *req; 531 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 532 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 533 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 534 struct hclgevf_desc desc; 535 u16 roundup_size; 536 int status; 537 int i; 538 539 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 540 541 roundup_size = roundup_pow_of_two(rss_size); 542 roundup_size = ilog2(roundup_size); 543 544 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 545 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 546 tc_size[i] = roundup_size; 547 tc_offset[i] = rss_size * i; 548 } 549 550 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 551 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 552 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 553 (tc_valid[i] & 0x1)); 554 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 555 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 556 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 557 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 558 } 559 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 560 if (status) 561 dev_err(&hdev->pdev->dev, 562 "VF failed(=%d) to set rss tc mode\n", status); 563 564 return status; 565 } 566 567 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 568 u8 *hfunc) 569 { 570 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 571 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 572 int i; 573 574 if (handle->pdev->revision >= 0x21) { 575 /* Get hash algorithm */ 576 if (hfunc) { 577 switch (rss_cfg->hash_algo) { 578 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 579 *hfunc = ETH_RSS_HASH_TOP; 580 break; 581 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 582 *hfunc = ETH_RSS_HASH_XOR; 583 break; 584 default: 585 *hfunc = ETH_RSS_HASH_UNKNOWN; 586 break; 587 } 588 } 589 590 /* Get the RSS Key required by the user */ 591 if (key) 592 memcpy(key, rss_cfg->rss_hash_key, 593 HCLGEVF_RSS_KEY_SIZE); 594 } 595 596 if (indir) 597 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 598 indir[i] = rss_cfg->rss_indirection_tbl[i]; 599 600 return 0; 601 } 602 603 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 604 const u8 *key, const u8 hfunc) 605 { 606 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 607 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 608 int ret, i; 609 610 if (handle->pdev->revision >= 0x21) { 611 /* Set the RSS Hash Key if specififed by the user */ 612 if (key) { 613 switch (hfunc) { 614 case ETH_RSS_HASH_TOP: 615 rss_cfg->hash_algo = 616 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 617 break; 618 case ETH_RSS_HASH_XOR: 619 rss_cfg->hash_algo = 620 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 621 break; 622 case ETH_RSS_HASH_NO_CHANGE: 623 break; 624 default: 625 return -EINVAL; 626 } 627 628 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 629 key); 630 if (ret) 631 return ret; 632 633 /* Update the shadow RSS key with user specified qids */ 634 memcpy(rss_cfg->rss_hash_key, key, 635 HCLGEVF_RSS_KEY_SIZE); 636 } 637 } 638 639 /* update the shadow RSS table with user specified qids */ 640 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 641 rss_cfg->rss_indirection_tbl[i] = indir[i]; 642 643 /* update the hardware */ 644 return hclgevf_set_rss_indir_table(hdev); 645 } 646 647 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 648 { 649 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 650 651 if (nfc->data & RXH_L4_B_2_3) 652 hash_sets |= HCLGEVF_D_PORT_BIT; 653 else 654 hash_sets &= ~HCLGEVF_D_PORT_BIT; 655 656 if (nfc->data & RXH_IP_SRC) 657 hash_sets |= HCLGEVF_S_IP_BIT; 658 else 659 hash_sets &= ~HCLGEVF_S_IP_BIT; 660 661 if (nfc->data & RXH_IP_DST) 662 hash_sets |= HCLGEVF_D_IP_BIT; 663 else 664 hash_sets &= ~HCLGEVF_D_IP_BIT; 665 666 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 667 hash_sets |= HCLGEVF_V_TAG_BIT; 668 669 return hash_sets; 670 } 671 672 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 673 struct ethtool_rxnfc *nfc) 674 { 675 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 676 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 677 struct hclgevf_rss_input_tuple_cmd *req; 678 struct hclgevf_desc desc; 679 u8 tuple_sets; 680 int ret; 681 682 if (handle->pdev->revision == 0x20) 683 return -EOPNOTSUPP; 684 685 if (nfc->data & 686 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 687 return -EINVAL; 688 689 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 690 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 691 692 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 693 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 694 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 695 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 696 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 697 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 698 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 699 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 700 701 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 702 switch (nfc->flow_type) { 703 case TCP_V4_FLOW: 704 req->ipv4_tcp_en = tuple_sets; 705 break; 706 case TCP_V6_FLOW: 707 req->ipv6_tcp_en = tuple_sets; 708 break; 709 case UDP_V4_FLOW: 710 req->ipv4_udp_en = tuple_sets; 711 break; 712 case UDP_V6_FLOW: 713 req->ipv6_udp_en = tuple_sets; 714 break; 715 case SCTP_V4_FLOW: 716 req->ipv4_sctp_en = tuple_sets; 717 break; 718 case SCTP_V6_FLOW: 719 if ((nfc->data & RXH_L4_B_0_1) || 720 (nfc->data & RXH_L4_B_2_3)) 721 return -EINVAL; 722 723 req->ipv6_sctp_en = tuple_sets; 724 break; 725 case IPV4_FLOW: 726 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 727 break; 728 case IPV6_FLOW: 729 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 730 break; 731 default: 732 return -EINVAL; 733 } 734 735 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 736 if (ret) { 737 dev_err(&hdev->pdev->dev, 738 "Set rss tuple fail, status = %d\n", ret); 739 return ret; 740 } 741 742 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 743 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 744 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 745 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 746 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 747 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 748 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 749 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 750 return 0; 751 } 752 753 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 754 struct ethtool_rxnfc *nfc) 755 { 756 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 757 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 758 u8 tuple_sets; 759 760 if (handle->pdev->revision == 0x20) 761 return -EOPNOTSUPP; 762 763 nfc->data = 0; 764 765 switch (nfc->flow_type) { 766 case TCP_V4_FLOW: 767 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 768 break; 769 case UDP_V4_FLOW: 770 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 771 break; 772 case TCP_V6_FLOW: 773 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 774 break; 775 case UDP_V6_FLOW: 776 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 777 break; 778 case SCTP_V4_FLOW: 779 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 780 break; 781 case SCTP_V6_FLOW: 782 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 783 break; 784 case IPV4_FLOW: 785 case IPV6_FLOW: 786 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 787 break; 788 default: 789 return -EINVAL; 790 } 791 792 if (!tuple_sets) 793 return 0; 794 795 if (tuple_sets & HCLGEVF_D_PORT_BIT) 796 nfc->data |= RXH_L4_B_2_3; 797 if (tuple_sets & HCLGEVF_S_PORT_BIT) 798 nfc->data |= RXH_L4_B_0_1; 799 if (tuple_sets & HCLGEVF_D_IP_BIT) 800 nfc->data |= RXH_IP_DST; 801 if (tuple_sets & HCLGEVF_S_IP_BIT) 802 nfc->data |= RXH_IP_SRC; 803 804 return 0; 805 } 806 807 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 808 struct hclgevf_rss_cfg *rss_cfg) 809 { 810 struct hclgevf_rss_input_tuple_cmd *req; 811 struct hclgevf_desc desc; 812 int ret; 813 814 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 815 816 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 817 818 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 819 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 820 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 821 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 822 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 823 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 824 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 825 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 826 827 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 828 if (ret) 829 dev_err(&hdev->pdev->dev, 830 "Configure rss input fail, status = %d\n", ret); 831 return ret; 832 } 833 834 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 835 { 836 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 837 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 838 839 return rss_cfg->rss_size; 840 } 841 842 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 843 int vector_id, 844 struct hnae3_ring_chain_node *ring_chain) 845 { 846 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 847 struct hnae3_ring_chain_node *node; 848 struct hclge_mbx_vf_to_pf_cmd *req; 849 struct hclgevf_desc desc; 850 int i = 0; 851 int status; 852 u8 type; 853 854 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 855 856 for (node = ring_chain; node; node = node->next) { 857 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 858 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 859 860 if (i == 0) { 861 hclgevf_cmd_setup_basic_desc(&desc, 862 HCLGEVF_OPC_MBX_VF_TO_PF, 863 false); 864 type = en ? 865 HCLGE_MBX_MAP_RING_TO_VECTOR : 866 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 867 req->msg[0] = type; 868 req->msg[1] = vector_id; 869 } 870 871 req->msg[idx_offset] = 872 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 873 req->msg[idx_offset + 1] = node->tqp_index; 874 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 875 HNAE3_RING_GL_IDX_M, 876 HNAE3_RING_GL_IDX_S); 877 878 i++; 879 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 880 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 881 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 882 !node->next) { 883 req->msg[2] = i; 884 885 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 886 if (status) { 887 dev_err(&hdev->pdev->dev, 888 "Map TQP fail, status is %d.\n", 889 status); 890 return status; 891 } 892 i = 0; 893 hclgevf_cmd_setup_basic_desc(&desc, 894 HCLGEVF_OPC_MBX_VF_TO_PF, 895 false); 896 req->msg[0] = type; 897 req->msg[1] = vector_id; 898 } 899 } 900 901 return 0; 902 } 903 904 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 905 struct hnae3_ring_chain_node *ring_chain) 906 { 907 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 908 int vector_id; 909 910 vector_id = hclgevf_get_vector_index(hdev, vector); 911 if (vector_id < 0) { 912 dev_err(&handle->pdev->dev, 913 "Get vector index fail. ret =%d\n", vector_id); 914 return vector_id; 915 } 916 917 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 918 } 919 920 static int hclgevf_unmap_ring_from_vector( 921 struct hnae3_handle *handle, 922 int vector, 923 struct hnae3_ring_chain_node *ring_chain) 924 { 925 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 926 int ret, vector_id; 927 928 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 929 return 0; 930 931 vector_id = hclgevf_get_vector_index(hdev, vector); 932 if (vector_id < 0) { 933 dev_err(&handle->pdev->dev, 934 "Get vector index fail. ret =%d\n", vector_id); 935 return vector_id; 936 } 937 938 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 939 if (ret) 940 dev_err(&handle->pdev->dev, 941 "Unmap ring from vector fail. vector=%d, ret =%d\n", 942 vector_id, 943 ret); 944 945 return ret; 946 } 947 948 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 949 { 950 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 951 int vector_id; 952 953 vector_id = hclgevf_get_vector_index(hdev, vector); 954 if (vector_id < 0) { 955 dev_err(&handle->pdev->dev, 956 "hclgevf_put_vector get vector index fail. ret =%d\n", 957 vector_id); 958 return vector_id; 959 } 960 961 hclgevf_free_vector(hdev, vector_id); 962 963 return 0; 964 } 965 966 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 967 bool en_uc_pmc, bool en_mc_pmc) 968 { 969 struct hclge_mbx_vf_to_pf_cmd *req; 970 struct hclgevf_desc desc; 971 int status; 972 973 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 974 975 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 976 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 977 req->msg[1] = en_uc_pmc ? 1 : 0; 978 req->msg[2] = en_mc_pmc ? 1 : 0; 979 980 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 981 if (status) 982 dev_err(&hdev->pdev->dev, 983 "Set promisc mode fail, status is %d.\n", status); 984 985 return status; 986 } 987 988 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, 989 bool en_uc_pmc, bool en_mc_pmc) 990 { 991 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 992 993 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); 994 } 995 996 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 997 int stream_id, bool enable) 998 { 999 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1000 struct hclgevf_desc desc; 1001 int status; 1002 1003 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1004 1005 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1006 false); 1007 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1008 req->stream_id = cpu_to_le16(stream_id); 1009 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 1010 1011 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1012 if (status) 1013 dev_err(&hdev->pdev->dev, 1014 "TQP enable fail, status =%d.\n", status); 1015 1016 return status; 1017 } 1018 1019 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1020 { 1021 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1022 struct hclgevf_tqp *tqp; 1023 int i; 1024 1025 for (i = 0; i < kinfo->num_tqps; i++) { 1026 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1027 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1028 } 1029 } 1030 1031 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1032 { 1033 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1034 1035 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1036 } 1037 1038 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1039 bool is_first) 1040 { 1041 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1042 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1043 u8 *new_mac_addr = (u8 *)p; 1044 u8 msg_data[ETH_ALEN * 2]; 1045 u16 subcode; 1046 int status; 1047 1048 ether_addr_copy(msg_data, new_mac_addr); 1049 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 1050 1051 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 1052 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1053 1054 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1055 subcode, msg_data, ETH_ALEN * 2, 1056 true, NULL, 0); 1057 if (!status) 1058 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1059 1060 return status; 1061 } 1062 1063 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1064 const unsigned char *addr) 1065 { 1066 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1067 1068 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1069 HCLGE_MBX_MAC_VLAN_UC_ADD, 1070 addr, ETH_ALEN, false, NULL, 0); 1071 } 1072 1073 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1074 const unsigned char *addr) 1075 { 1076 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1077 1078 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1079 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1080 addr, ETH_ALEN, false, NULL, 0); 1081 } 1082 1083 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1084 const unsigned char *addr) 1085 { 1086 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1087 1088 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1089 HCLGE_MBX_MAC_VLAN_MC_ADD, 1090 addr, ETH_ALEN, false, NULL, 0); 1091 } 1092 1093 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1094 const unsigned char *addr) 1095 { 1096 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1097 1098 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1099 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1100 addr, ETH_ALEN, false, NULL, 0); 1101 } 1102 1103 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1104 __be16 proto, u16 vlan_id, 1105 bool is_kill) 1106 { 1107 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1108 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1109 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1110 1111 if (vlan_id > 4095) 1112 return -EINVAL; 1113 1114 if (proto != htons(ETH_P_8021Q)) 1115 return -EPROTONOSUPPORT; 1116 1117 msg_data[0] = is_kill; 1118 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1119 memcpy(&msg_data[3], &proto, sizeof(proto)); 1120 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1121 HCLGE_MBX_VLAN_FILTER, msg_data, 1122 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 1123 } 1124 1125 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1126 { 1127 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1128 u8 msg_data; 1129 1130 msg_data = enable ? 1 : 0; 1131 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1132 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1133 1, false, NULL, 0); 1134 } 1135 1136 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1137 { 1138 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1139 u8 msg_data[2]; 1140 int ret; 1141 1142 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 1143 1144 /* disable vf queue before send queue reset msg to PF */ 1145 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1146 if (ret) 1147 return ret; 1148 1149 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1150 2, true, NULL, 0); 1151 } 1152 1153 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1154 { 1155 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1156 1157 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu, 1158 sizeof(new_mtu), true, NULL, 0); 1159 } 1160 1161 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1162 enum hnae3_reset_notify_type type) 1163 { 1164 struct hnae3_client *client = hdev->nic_client; 1165 struct hnae3_handle *handle = &hdev->nic; 1166 int ret; 1167 1168 if (!client->ops->reset_notify) 1169 return -EOPNOTSUPP; 1170 1171 ret = client->ops->reset_notify(handle, type); 1172 if (ret) 1173 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1174 type, ret); 1175 1176 return ret; 1177 } 1178 1179 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 1180 { 1181 struct hclgevf_dev *hdev = ae_dev->priv; 1182 1183 set_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1184 } 1185 1186 static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev, 1187 unsigned long delay_us, 1188 unsigned long wait_cnt) 1189 { 1190 unsigned long cnt = 0; 1191 1192 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && 1193 cnt++ < wait_cnt) 1194 usleep_range(delay_us, delay_us * 2); 1195 1196 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { 1197 dev_err(&hdev->pdev->dev, 1198 "flr wait timeout\n"); 1199 return -ETIMEDOUT; 1200 } 1201 1202 return 0; 1203 } 1204 1205 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1206 { 1207 #define HCLGEVF_RESET_WAIT_US 20000 1208 #define HCLGEVF_RESET_WAIT_CNT 2000 1209 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1210 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1211 1212 u32 val; 1213 int ret; 1214 1215 /* wait to check the hardware reset completion status */ 1216 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1217 dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val); 1218 1219 if (hdev->reset_type == HNAE3_FLR_RESET) 1220 return hclgevf_flr_poll_timeout(hdev, 1221 HCLGEVF_RESET_WAIT_US, 1222 HCLGEVF_RESET_WAIT_CNT); 1223 1224 ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val, 1225 !(val & HCLGEVF_RST_ING_BITS), 1226 HCLGEVF_RESET_WAIT_US, 1227 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1228 1229 /* hardware completion status should be available by this time */ 1230 if (ret) { 1231 dev_err(&hdev->pdev->dev, 1232 "could'nt get reset done status from h/w, timeout!\n"); 1233 return ret; 1234 } 1235 1236 /* we will wait a bit more to let reset of the stack to complete. This 1237 * might happen in case reset assertion was made by PF. Yes, this also 1238 * means we might end up waiting bit more even for VF reset. 1239 */ 1240 msleep(5000); 1241 1242 return 0; 1243 } 1244 1245 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1246 { 1247 int ret; 1248 1249 /* uninitialize the nic client */ 1250 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1251 if (ret) 1252 return ret; 1253 1254 /* re-initialize the hclge device */ 1255 ret = hclgevf_reset_hdev(hdev); 1256 if (ret) { 1257 dev_err(&hdev->pdev->dev, 1258 "hclge device re-init failed, VF is disabled!\n"); 1259 return ret; 1260 } 1261 1262 /* bring up the nic client again */ 1263 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1264 if (ret) 1265 return ret; 1266 1267 return 0; 1268 } 1269 1270 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1271 { 1272 int ret = 0; 1273 1274 switch (hdev->reset_type) { 1275 case HNAE3_VF_FUNC_RESET: 1276 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1277 0, true, NULL, sizeof(u8)); 1278 break; 1279 case HNAE3_FLR_RESET: 1280 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1281 break; 1282 default: 1283 break; 1284 } 1285 1286 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1287 1288 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1289 hdev->reset_type, ret); 1290 1291 return ret; 1292 } 1293 1294 static int hclgevf_reset(struct hclgevf_dev *hdev) 1295 { 1296 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1297 int ret; 1298 1299 /* Initialize ae_dev reset status as well, in case enet layer wants to 1300 * know if device is undergoing reset 1301 */ 1302 ae_dev->reset_type = hdev->reset_type; 1303 hdev->reset_count++; 1304 rtnl_lock(); 1305 1306 /* bring down the nic to stop any ongoing TX/RX */ 1307 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1308 if (ret) 1309 goto err_reset_lock; 1310 1311 rtnl_unlock(); 1312 1313 ret = hclgevf_reset_prepare_wait(hdev); 1314 if (ret) 1315 goto err_reset; 1316 1317 /* check if VF could successfully fetch the hardware reset completion 1318 * status from the hardware 1319 */ 1320 ret = hclgevf_reset_wait(hdev); 1321 if (ret) { 1322 /* can't do much in this situation, will disable VF */ 1323 dev_err(&hdev->pdev->dev, 1324 "VF failed(=%d) to fetch H/W reset completion status\n", 1325 ret); 1326 goto err_reset; 1327 } 1328 1329 rtnl_lock(); 1330 1331 /* now, re-initialize the nic client and ae device*/ 1332 ret = hclgevf_reset_stack(hdev); 1333 if (ret) { 1334 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1335 goto err_reset_lock; 1336 } 1337 1338 /* bring up the nic to enable TX/RX again */ 1339 ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1340 if (ret) 1341 goto err_reset_lock; 1342 1343 rtnl_unlock(); 1344 1345 hdev->last_reset_time = jiffies; 1346 ae_dev->reset_type = HNAE3_NONE_RESET; 1347 1348 return ret; 1349 err_reset_lock: 1350 rtnl_unlock(); 1351 err_reset: 1352 /* When VF reset failed, only the higher level reset asserted by PF 1353 * can restore it, so re-initialize the command queue to receive 1354 * this higher reset event. 1355 */ 1356 hclgevf_cmd_init(hdev); 1357 dev_err(&hdev->pdev->dev, "failed to reset VF\n"); 1358 1359 return ret; 1360 } 1361 1362 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1363 unsigned long *addr) 1364 { 1365 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1366 1367 /* return the highest priority reset level amongst all */ 1368 if (test_bit(HNAE3_VF_RESET, addr)) { 1369 rst_level = HNAE3_VF_RESET; 1370 clear_bit(HNAE3_VF_RESET, addr); 1371 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1372 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1373 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1374 rst_level = HNAE3_VF_FULL_RESET; 1375 clear_bit(HNAE3_VF_FULL_RESET, addr); 1376 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1377 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1378 rst_level = HNAE3_VF_PF_FUNC_RESET; 1379 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1380 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1381 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1382 rst_level = HNAE3_VF_FUNC_RESET; 1383 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1384 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1385 rst_level = HNAE3_FLR_RESET; 1386 clear_bit(HNAE3_FLR_RESET, addr); 1387 } 1388 1389 return rst_level; 1390 } 1391 1392 static void hclgevf_reset_event(struct pci_dev *pdev, 1393 struct hnae3_handle *handle) 1394 { 1395 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1396 struct hclgevf_dev *hdev = ae_dev->priv; 1397 1398 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1399 1400 if (hdev->default_reset_request) 1401 hdev->reset_level = 1402 hclgevf_get_reset_level(hdev, 1403 &hdev->default_reset_request); 1404 else 1405 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1406 1407 /* reset of this VF requested */ 1408 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1409 hclgevf_reset_task_schedule(hdev); 1410 1411 hdev->last_reset_time = jiffies; 1412 } 1413 1414 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1415 enum hnae3_reset_type rst_type) 1416 { 1417 struct hclgevf_dev *hdev = ae_dev->priv; 1418 1419 set_bit(rst_type, &hdev->default_reset_request); 1420 } 1421 1422 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1423 { 1424 #define HCLGEVF_FLR_WAIT_MS 100 1425 #define HCLGEVF_FLR_WAIT_CNT 50 1426 struct hclgevf_dev *hdev = ae_dev->priv; 1427 int cnt = 0; 1428 1429 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1430 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1431 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); 1432 hclgevf_reset_event(hdev->pdev, NULL); 1433 1434 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && 1435 cnt++ < HCLGEVF_FLR_WAIT_CNT) 1436 msleep(HCLGEVF_FLR_WAIT_MS); 1437 1438 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) 1439 dev_err(&hdev->pdev->dev, 1440 "flr wait down timeout: %d\n", cnt); 1441 } 1442 1443 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1444 { 1445 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1446 1447 return hdev->fw_version; 1448 } 1449 1450 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1451 { 1452 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1453 1454 vector->vector_irq = pci_irq_vector(hdev->pdev, 1455 HCLGEVF_MISC_VECTOR_NUM); 1456 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1457 /* vector status always valid for Vector 0 */ 1458 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1459 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1460 1461 hdev->num_msi_left -= 1; 1462 hdev->num_msi_used += 1; 1463 } 1464 1465 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1466 { 1467 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1468 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1469 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1470 schedule_work(&hdev->rst_service_task); 1471 } 1472 } 1473 1474 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1475 { 1476 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1477 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1478 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1479 schedule_work(&hdev->mbx_service_task); 1480 } 1481 } 1482 1483 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1484 { 1485 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1486 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1487 schedule_work(&hdev->service_task); 1488 } 1489 1490 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1491 { 1492 /* if we have any pending mailbox event then schedule the mbx task */ 1493 if (hdev->mbx_event_pending) 1494 hclgevf_mbx_task_schedule(hdev); 1495 1496 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1497 hclgevf_reset_task_schedule(hdev); 1498 } 1499 1500 static void hclgevf_service_timer(struct timer_list *t) 1501 { 1502 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1503 1504 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1505 1506 hclgevf_task_schedule(hdev); 1507 } 1508 1509 static void hclgevf_reset_service_task(struct work_struct *work) 1510 { 1511 struct hclgevf_dev *hdev = 1512 container_of(work, struct hclgevf_dev, rst_service_task); 1513 int ret; 1514 1515 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1516 return; 1517 1518 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1519 1520 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1521 &hdev->reset_state)) { 1522 /* PF has initmated that it is about to reset the hardware. 1523 * We now have to poll & check if harware has actually completed 1524 * the reset sequence. On hardware reset completion, VF needs to 1525 * reset the client and ae device. 1526 */ 1527 hdev->reset_attempts = 0; 1528 1529 hdev->last_reset_time = jiffies; 1530 while ((hdev->reset_type = 1531 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 1532 != HNAE3_NONE_RESET) { 1533 ret = hclgevf_reset(hdev); 1534 if (ret) 1535 dev_err(&hdev->pdev->dev, 1536 "VF stack reset failed %d.\n", ret); 1537 } 1538 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1539 &hdev->reset_state)) { 1540 /* we could be here when either of below happens: 1541 * 1. reset was initiated due to watchdog timeout due to 1542 * a. IMP was earlier reset and our TX got choked down and 1543 * which resulted in watchdog reacting and inducing VF 1544 * reset. This also means our cmdq would be unreliable. 1545 * b. problem in TX due to other lower layer(example link 1546 * layer not functioning properly etc.) 1547 * 2. VF reset might have been initiated due to some config 1548 * change. 1549 * 1550 * NOTE: Theres no clear way to detect above cases than to react 1551 * to the response of PF for this reset request. PF will ack the 1552 * 1b and 2. cases but we will not get any intimation about 1a 1553 * from PF as cmdq would be in unreliable state i.e. mailbox 1554 * communication between PF and VF would be broken. 1555 */ 1556 1557 /* if we are never geting into pending state it means either: 1558 * 1. PF is not receiving our request which could be due to IMP 1559 * reset 1560 * 2. PF is screwed 1561 * We cannot do much for 2. but to check first we can try reset 1562 * our PCIe + stack and see if it alleviates the problem. 1563 */ 1564 if (hdev->reset_attempts > 3) { 1565 /* prepare for full reset of stack + pcie interface */ 1566 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1567 1568 /* "defer" schedule the reset task again */ 1569 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1570 } else { 1571 hdev->reset_attempts++; 1572 1573 set_bit(hdev->reset_level, &hdev->reset_pending); 1574 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1575 } 1576 hclgevf_reset_task_schedule(hdev); 1577 } 1578 1579 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1580 } 1581 1582 static void hclgevf_mailbox_service_task(struct work_struct *work) 1583 { 1584 struct hclgevf_dev *hdev; 1585 1586 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1587 1588 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1589 return; 1590 1591 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1592 1593 hclgevf_mbx_async_handler(hdev); 1594 1595 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1596 } 1597 1598 static void hclgevf_keep_alive_timer(struct timer_list *t) 1599 { 1600 struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); 1601 1602 schedule_work(&hdev->keep_alive_task); 1603 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 1604 } 1605 1606 static void hclgevf_keep_alive_task(struct work_struct *work) 1607 { 1608 struct hclgevf_dev *hdev; 1609 u8 respmsg; 1610 int ret; 1611 1612 hdev = container_of(work, struct hclgevf_dev, keep_alive_task); 1613 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, 1614 0, false, &respmsg, sizeof(u8)); 1615 if (ret) 1616 dev_err(&hdev->pdev->dev, 1617 "VF sends keep alive cmd failed(=%d)\n", ret); 1618 } 1619 1620 static void hclgevf_service_task(struct work_struct *work) 1621 { 1622 struct hclgevf_dev *hdev; 1623 1624 hdev = container_of(work, struct hclgevf_dev, service_task); 1625 1626 /* request the link status from the PF. PF would be able to tell VF 1627 * about such updates in future so we might remove this later 1628 */ 1629 hclgevf_request_link_info(hdev); 1630 1631 hclgevf_deferred_task_schedule(hdev); 1632 1633 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1634 } 1635 1636 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1637 { 1638 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1639 } 1640 1641 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1642 u32 *clearval) 1643 { 1644 u32 cmdq_src_reg, rst_ing_reg; 1645 1646 /* fetch the events from their corresponding regs */ 1647 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1648 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1649 1650 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) { 1651 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1652 dev_info(&hdev->pdev->dev, 1653 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1654 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1655 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1656 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1657 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); 1658 *clearval = cmdq_src_reg; 1659 return HCLGEVF_VECTOR0_EVENT_RST; 1660 } 1661 1662 /* check for vector0 mailbox(=CMDQ RX) event source */ 1663 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1664 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1665 *clearval = cmdq_src_reg; 1666 return HCLGEVF_VECTOR0_EVENT_MBX; 1667 } 1668 1669 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1670 1671 return HCLGEVF_VECTOR0_EVENT_OTHER; 1672 } 1673 1674 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1675 { 1676 writel(en ? 1 : 0, vector->addr); 1677 } 1678 1679 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1680 { 1681 enum hclgevf_evt_cause event_cause; 1682 struct hclgevf_dev *hdev = data; 1683 u32 clearval; 1684 1685 hclgevf_enable_vector(&hdev->misc_vector, false); 1686 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 1687 1688 switch (event_cause) { 1689 case HCLGEVF_VECTOR0_EVENT_RST: 1690 hclgevf_reset_task_schedule(hdev); 1691 break; 1692 case HCLGEVF_VECTOR0_EVENT_MBX: 1693 hclgevf_mbx_handler(hdev); 1694 break; 1695 default: 1696 break; 1697 } 1698 1699 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 1700 hclgevf_clear_event_cause(hdev, clearval); 1701 hclgevf_enable_vector(&hdev->misc_vector, true); 1702 } 1703 1704 return IRQ_HANDLED; 1705 } 1706 1707 static int hclgevf_configure(struct hclgevf_dev *hdev) 1708 { 1709 int ret; 1710 1711 hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE; 1712 1713 /* get queue configuration from PF */ 1714 ret = hclgevf_get_queue_info(hdev); 1715 if (ret) 1716 return ret; 1717 /* get tc configuration from PF */ 1718 return hclgevf_get_tc_info(hdev); 1719 } 1720 1721 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1722 { 1723 struct pci_dev *pdev = ae_dev->pdev; 1724 struct hclgevf_dev *hdev = ae_dev->priv; 1725 1726 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1727 if (!hdev) 1728 return -ENOMEM; 1729 1730 hdev->pdev = pdev; 1731 hdev->ae_dev = ae_dev; 1732 ae_dev->priv = hdev; 1733 1734 return 0; 1735 } 1736 1737 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1738 { 1739 struct hnae3_handle *roce = &hdev->roce; 1740 struct hnae3_handle *nic = &hdev->nic; 1741 1742 roce->rinfo.num_vectors = hdev->num_roce_msix; 1743 1744 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1745 hdev->num_msi_left == 0) 1746 return -EINVAL; 1747 1748 roce->rinfo.base_vector = hdev->roce_base_vector; 1749 1750 roce->rinfo.netdev = nic->kinfo.netdev; 1751 roce->rinfo.roce_io_base = hdev->hw.io_base; 1752 1753 roce->pdev = nic->pdev; 1754 roce->ae_algo = nic->ae_algo; 1755 roce->numa_node_mask = nic->numa_node_mask; 1756 1757 return 0; 1758 } 1759 1760 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 1761 { 1762 struct hclgevf_cfg_gro_status_cmd *req; 1763 struct hclgevf_desc desc; 1764 int ret; 1765 1766 if (!hnae3_dev_gro_supported(hdev)) 1767 return 0; 1768 1769 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 1770 false); 1771 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 1772 1773 req->gro_en = cpu_to_le16(en ? 1 : 0); 1774 1775 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1776 if (ret) 1777 dev_err(&hdev->pdev->dev, 1778 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 1779 1780 return ret; 1781 } 1782 1783 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1784 { 1785 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1786 int i, ret; 1787 1788 rss_cfg->rss_size = hdev->rss_size_max; 1789 1790 if (hdev->pdev->revision >= 0x21) { 1791 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 1792 netdev_rss_key_fill(rss_cfg->rss_hash_key, 1793 HCLGEVF_RSS_KEY_SIZE); 1794 1795 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 1796 rss_cfg->rss_hash_key); 1797 if (ret) 1798 return ret; 1799 1800 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 1801 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1802 rss_cfg->rss_tuple_sets.ipv4_udp_en = 1803 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1804 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 1805 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1806 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 1807 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1808 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 1809 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1810 rss_cfg->rss_tuple_sets.ipv6_udp_en = 1811 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1812 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 1813 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1814 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 1815 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1816 1817 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 1818 if (ret) 1819 return ret; 1820 1821 } 1822 1823 /* Initialize RSS indirect table for each vport */ 1824 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1825 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1826 1827 ret = hclgevf_set_rss_indir_table(hdev); 1828 if (ret) 1829 return ret; 1830 1831 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1832 } 1833 1834 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1835 { 1836 /* other vlan config(like, VLAN TX/RX offload) would also be added 1837 * here later 1838 */ 1839 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1840 false); 1841 } 1842 1843 static int hclgevf_ae_start(struct hnae3_handle *handle) 1844 { 1845 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1846 1847 /* reset tqp stats */ 1848 hclgevf_reset_tqp_stats(handle); 1849 1850 hclgevf_request_link_info(hdev); 1851 1852 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1853 mod_timer(&hdev->service_timer, jiffies + HZ); 1854 1855 return 0; 1856 } 1857 1858 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1859 { 1860 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1861 1862 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1863 1864 /* reset tqp stats */ 1865 hclgevf_reset_tqp_stats(handle); 1866 del_timer_sync(&hdev->service_timer); 1867 cancel_work_sync(&hdev->service_task); 1868 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1869 hclgevf_update_link_status(hdev, 0); 1870 } 1871 1872 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 1873 { 1874 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1875 u8 msg_data; 1876 1877 msg_data = alive ? 1 : 0; 1878 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE, 1879 0, &msg_data, 1, false, NULL, 0); 1880 } 1881 1882 static int hclgevf_client_start(struct hnae3_handle *handle) 1883 { 1884 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1885 1886 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 1887 return hclgevf_set_alive(handle, true); 1888 } 1889 1890 static void hclgevf_client_stop(struct hnae3_handle *handle) 1891 { 1892 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1893 int ret; 1894 1895 ret = hclgevf_set_alive(handle, false); 1896 if (ret) 1897 dev_warn(&hdev->pdev->dev, 1898 "%s failed %d\n", __func__, ret); 1899 1900 del_timer_sync(&hdev->keep_alive_timer); 1901 cancel_work_sync(&hdev->keep_alive_task); 1902 } 1903 1904 static void hclgevf_state_init(struct hclgevf_dev *hdev) 1905 { 1906 /* setup tasks for the MBX */ 1907 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1908 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1909 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1910 1911 /* setup tasks for service timer */ 1912 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1913 1914 INIT_WORK(&hdev->service_task, hclgevf_service_task); 1915 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1916 1917 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 1918 1919 mutex_init(&hdev->mbx_resp.mbx_mutex); 1920 1921 /* bring the device down */ 1922 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1923 } 1924 1925 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1926 { 1927 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1928 1929 if (hdev->service_timer.function) 1930 del_timer_sync(&hdev->service_timer); 1931 if (hdev->service_task.func) 1932 cancel_work_sync(&hdev->service_task); 1933 if (hdev->mbx_service_task.func) 1934 cancel_work_sync(&hdev->mbx_service_task); 1935 if (hdev->rst_service_task.func) 1936 cancel_work_sync(&hdev->rst_service_task); 1937 1938 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1939 } 1940 1941 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1942 { 1943 struct pci_dev *pdev = hdev->pdev; 1944 int vectors; 1945 int i; 1946 1947 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 1948 vectors = pci_alloc_irq_vectors(pdev, 1949 hdev->roce_base_msix_offset + 1, 1950 hdev->num_msi, 1951 PCI_IRQ_MSIX); 1952 else 1953 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1954 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1955 1956 if (vectors < 0) { 1957 dev_err(&pdev->dev, 1958 "failed(%d) to allocate MSI/MSI-X vectors\n", 1959 vectors); 1960 return vectors; 1961 } 1962 if (vectors < hdev->num_msi) 1963 dev_warn(&hdev->pdev->dev, 1964 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1965 hdev->num_msi, vectors); 1966 1967 hdev->num_msi = vectors; 1968 hdev->num_msi_left = vectors; 1969 hdev->base_msi_vector = pdev->irq; 1970 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 1971 1972 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1973 sizeof(u16), GFP_KERNEL); 1974 if (!hdev->vector_status) { 1975 pci_free_irq_vectors(pdev); 1976 return -ENOMEM; 1977 } 1978 1979 for (i = 0; i < hdev->num_msi; i++) 1980 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 1981 1982 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 1983 sizeof(int), GFP_KERNEL); 1984 if (!hdev->vector_irq) { 1985 devm_kfree(&pdev->dev, hdev->vector_status); 1986 pci_free_irq_vectors(pdev); 1987 return -ENOMEM; 1988 } 1989 1990 return 0; 1991 } 1992 1993 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 1994 { 1995 struct pci_dev *pdev = hdev->pdev; 1996 1997 devm_kfree(&pdev->dev, hdev->vector_status); 1998 devm_kfree(&pdev->dev, hdev->vector_irq); 1999 pci_free_irq_vectors(pdev); 2000 } 2001 2002 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2003 { 2004 int ret = 0; 2005 2006 hclgevf_get_misc_vector(hdev); 2007 2008 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2009 0, "hclgevf_cmd", hdev); 2010 if (ret) { 2011 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2012 hdev->misc_vector.vector_irq); 2013 return ret; 2014 } 2015 2016 hclgevf_clear_event_cause(hdev, 0); 2017 2018 /* enable misc. vector(vector 0) */ 2019 hclgevf_enable_vector(&hdev->misc_vector, true); 2020 2021 return ret; 2022 } 2023 2024 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2025 { 2026 /* disable misc vector(vector 0) */ 2027 hclgevf_enable_vector(&hdev->misc_vector, false); 2028 synchronize_irq(hdev->misc_vector.vector_irq); 2029 free_irq(hdev->misc_vector.vector_irq, hdev); 2030 hclgevf_free_vector(hdev, 0); 2031 } 2032 2033 static int hclgevf_init_client_instance(struct hnae3_client *client, 2034 struct hnae3_ae_dev *ae_dev) 2035 { 2036 struct hclgevf_dev *hdev = ae_dev->priv; 2037 int ret; 2038 2039 switch (client->type) { 2040 case HNAE3_CLIENT_KNIC: 2041 hdev->nic_client = client; 2042 hdev->nic.client = client; 2043 2044 ret = client->ops->init_instance(&hdev->nic); 2045 if (ret) 2046 goto clear_nic; 2047 2048 hnae3_set_client_init_flag(client, ae_dev, 1); 2049 2050 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 2051 struct hnae3_client *rc = hdev->roce_client; 2052 2053 ret = hclgevf_init_roce_base_info(hdev); 2054 if (ret) 2055 goto clear_roce; 2056 ret = rc->ops->init_instance(&hdev->roce); 2057 if (ret) 2058 goto clear_roce; 2059 2060 hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 2061 1); 2062 } 2063 break; 2064 case HNAE3_CLIENT_UNIC: 2065 hdev->nic_client = client; 2066 hdev->nic.client = client; 2067 2068 ret = client->ops->init_instance(&hdev->nic); 2069 if (ret) 2070 goto clear_nic; 2071 2072 hnae3_set_client_init_flag(client, ae_dev, 1); 2073 break; 2074 case HNAE3_CLIENT_ROCE: 2075 if (hnae3_dev_roce_supported(hdev)) { 2076 hdev->roce_client = client; 2077 hdev->roce.client = client; 2078 } 2079 2080 if (hdev->roce_client && hdev->nic_client) { 2081 ret = hclgevf_init_roce_base_info(hdev); 2082 if (ret) 2083 goto clear_roce; 2084 2085 ret = client->ops->init_instance(&hdev->roce); 2086 if (ret) 2087 goto clear_roce; 2088 } 2089 2090 hnae3_set_client_init_flag(client, ae_dev, 1); 2091 break; 2092 default: 2093 return -EINVAL; 2094 } 2095 2096 return 0; 2097 2098 clear_nic: 2099 hdev->nic_client = NULL; 2100 hdev->nic.client = NULL; 2101 return ret; 2102 clear_roce: 2103 hdev->roce_client = NULL; 2104 hdev->roce.client = NULL; 2105 return ret; 2106 } 2107 2108 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2109 struct hnae3_ae_dev *ae_dev) 2110 { 2111 struct hclgevf_dev *hdev = ae_dev->priv; 2112 2113 /* un-init roce, if it exists */ 2114 if (hdev->roce_client) { 2115 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2116 hdev->roce_client = NULL; 2117 hdev->roce.client = NULL; 2118 } 2119 2120 /* un-init nic/unic, if this was not called by roce client */ 2121 if (client->ops->uninit_instance && hdev->nic_client && 2122 client->type != HNAE3_CLIENT_ROCE) { 2123 client->ops->uninit_instance(&hdev->nic, 0); 2124 hdev->nic_client = NULL; 2125 hdev->nic.client = NULL; 2126 } 2127 } 2128 2129 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2130 { 2131 struct pci_dev *pdev = hdev->pdev; 2132 struct hclgevf_hw *hw; 2133 int ret; 2134 2135 ret = pci_enable_device(pdev); 2136 if (ret) { 2137 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2138 return ret; 2139 } 2140 2141 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2142 if (ret) { 2143 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2144 goto err_disable_device; 2145 } 2146 2147 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2148 if (ret) { 2149 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2150 goto err_disable_device; 2151 } 2152 2153 pci_set_master(pdev); 2154 hw = &hdev->hw; 2155 hw->hdev = hdev; 2156 hw->io_base = pci_iomap(pdev, 2, 0); 2157 if (!hw->io_base) { 2158 dev_err(&pdev->dev, "can't map configuration register space\n"); 2159 ret = -ENOMEM; 2160 goto err_clr_master; 2161 } 2162 2163 return 0; 2164 2165 err_clr_master: 2166 pci_clear_master(pdev); 2167 pci_release_regions(pdev); 2168 err_disable_device: 2169 pci_disable_device(pdev); 2170 2171 return ret; 2172 } 2173 2174 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2175 { 2176 struct pci_dev *pdev = hdev->pdev; 2177 2178 pci_iounmap(pdev, hdev->hw.io_base); 2179 pci_clear_master(pdev); 2180 pci_release_regions(pdev); 2181 pci_disable_device(pdev); 2182 } 2183 2184 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2185 { 2186 struct hclgevf_query_res_cmd *req; 2187 struct hclgevf_desc desc; 2188 int ret; 2189 2190 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2191 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2192 if (ret) { 2193 dev_err(&hdev->pdev->dev, 2194 "query vf resource failed, ret = %d.\n", ret); 2195 return ret; 2196 } 2197 2198 req = (struct hclgevf_query_res_cmd *)desc.data; 2199 2200 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 2201 hdev->roce_base_msix_offset = 2202 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 2203 HCLGEVF_MSIX_OFT_ROCEE_M, 2204 HCLGEVF_MSIX_OFT_ROCEE_S); 2205 hdev->num_roce_msix = 2206 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2207 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2208 2209 /* VF should have NIC vectors and Roce vectors, NIC vectors 2210 * are queued before Roce vectors. The offset is fixed to 64. 2211 */ 2212 hdev->num_msi = hdev->num_roce_msix + 2213 hdev->roce_base_msix_offset; 2214 } else { 2215 hdev->num_msi = 2216 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2217 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2218 } 2219 2220 return 0; 2221 } 2222 2223 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2224 { 2225 struct pci_dev *pdev = hdev->pdev; 2226 int ret = 0; 2227 2228 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2229 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2230 hclgevf_misc_irq_uninit(hdev); 2231 hclgevf_uninit_msi(hdev); 2232 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2233 } 2234 2235 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2236 pci_set_master(pdev); 2237 ret = hclgevf_init_msi(hdev); 2238 if (ret) { 2239 dev_err(&pdev->dev, 2240 "failed(%d) to init MSI/MSI-X\n", ret); 2241 return ret; 2242 } 2243 2244 ret = hclgevf_misc_irq_init(hdev); 2245 if (ret) { 2246 hclgevf_uninit_msi(hdev); 2247 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2248 ret); 2249 return ret; 2250 } 2251 2252 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2253 } 2254 2255 return ret; 2256 } 2257 2258 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2259 { 2260 struct pci_dev *pdev = hdev->pdev; 2261 int ret; 2262 2263 ret = hclgevf_pci_reset(hdev); 2264 if (ret) { 2265 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2266 return ret; 2267 } 2268 2269 ret = hclgevf_cmd_init(hdev); 2270 if (ret) { 2271 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2272 return ret; 2273 } 2274 2275 ret = hclgevf_rss_init_hw(hdev); 2276 if (ret) { 2277 dev_err(&hdev->pdev->dev, 2278 "failed(%d) to initialize RSS\n", ret); 2279 return ret; 2280 } 2281 2282 ret = hclgevf_config_gro(hdev, true); 2283 if (ret) 2284 return ret; 2285 2286 ret = hclgevf_init_vlan_config(hdev); 2287 if (ret) { 2288 dev_err(&hdev->pdev->dev, 2289 "failed(%d) to initialize VLAN config\n", ret); 2290 return ret; 2291 } 2292 2293 dev_info(&hdev->pdev->dev, "Reset done\n"); 2294 2295 return 0; 2296 } 2297 2298 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2299 { 2300 struct pci_dev *pdev = hdev->pdev; 2301 int ret; 2302 2303 ret = hclgevf_pci_init(hdev); 2304 if (ret) { 2305 dev_err(&pdev->dev, "PCI initialization failed\n"); 2306 return ret; 2307 } 2308 2309 ret = hclgevf_cmd_queue_init(hdev); 2310 if (ret) { 2311 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); 2312 goto err_cmd_queue_init; 2313 } 2314 2315 ret = hclgevf_cmd_init(hdev); 2316 if (ret) 2317 goto err_cmd_init; 2318 2319 /* Get vf resource */ 2320 ret = hclgevf_query_vf_resource(hdev); 2321 if (ret) { 2322 dev_err(&hdev->pdev->dev, 2323 "Query vf status error, ret = %d.\n", ret); 2324 goto err_cmd_init; 2325 } 2326 2327 ret = hclgevf_init_msi(hdev); 2328 if (ret) { 2329 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2330 goto err_cmd_init; 2331 } 2332 2333 hclgevf_state_init(hdev); 2334 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2335 2336 ret = hclgevf_misc_irq_init(hdev); 2337 if (ret) { 2338 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2339 ret); 2340 goto err_misc_irq_init; 2341 } 2342 2343 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2344 2345 ret = hclgevf_configure(hdev); 2346 if (ret) { 2347 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2348 goto err_config; 2349 } 2350 2351 ret = hclgevf_alloc_tqps(hdev); 2352 if (ret) { 2353 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2354 goto err_config; 2355 } 2356 2357 ret = hclgevf_set_handle_info(hdev); 2358 if (ret) { 2359 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2360 goto err_config; 2361 } 2362 2363 ret = hclgevf_config_gro(hdev, true); 2364 if (ret) 2365 goto err_config; 2366 2367 /* Initialize RSS for this VF */ 2368 ret = hclgevf_rss_init_hw(hdev); 2369 if (ret) { 2370 dev_err(&hdev->pdev->dev, 2371 "failed(%d) to initialize RSS\n", ret); 2372 goto err_config; 2373 } 2374 2375 ret = hclgevf_init_vlan_config(hdev); 2376 if (ret) { 2377 dev_err(&hdev->pdev->dev, 2378 "failed(%d) to initialize VLAN config\n", ret); 2379 goto err_config; 2380 } 2381 2382 hdev->last_reset_time = jiffies; 2383 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 2384 2385 return 0; 2386 2387 err_config: 2388 hclgevf_misc_irq_uninit(hdev); 2389 err_misc_irq_init: 2390 hclgevf_state_uninit(hdev); 2391 hclgevf_uninit_msi(hdev); 2392 err_cmd_init: 2393 hclgevf_cmd_uninit(hdev); 2394 err_cmd_queue_init: 2395 hclgevf_pci_uninit(hdev); 2396 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2397 return ret; 2398 } 2399 2400 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2401 { 2402 hclgevf_state_uninit(hdev); 2403 2404 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2405 hclgevf_misc_irq_uninit(hdev); 2406 hclgevf_uninit_msi(hdev); 2407 } 2408 2409 hclgevf_pci_uninit(hdev); 2410 hclgevf_cmd_uninit(hdev); 2411 } 2412 2413 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2414 { 2415 struct pci_dev *pdev = ae_dev->pdev; 2416 struct hclgevf_dev *hdev; 2417 int ret; 2418 2419 ret = hclgevf_alloc_hdev(ae_dev); 2420 if (ret) { 2421 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2422 return ret; 2423 } 2424 2425 ret = hclgevf_init_hdev(ae_dev->priv); 2426 if (ret) { 2427 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2428 return ret; 2429 } 2430 2431 hdev = ae_dev->priv; 2432 timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0); 2433 INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task); 2434 2435 return 0; 2436 } 2437 2438 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2439 { 2440 struct hclgevf_dev *hdev = ae_dev->priv; 2441 2442 hclgevf_uninit_hdev(hdev); 2443 ae_dev->priv = NULL; 2444 } 2445 2446 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2447 { 2448 struct hnae3_handle *nic = &hdev->nic; 2449 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2450 2451 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 2452 } 2453 2454 /** 2455 * hclgevf_get_channels - Get the current channels enabled and max supported. 2456 * @handle: hardware information for network interface 2457 * @ch: ethtool channels structure 2458 * 2459 * We don't support separate tx and rx queues as channels. The other count 2460 * represents how many queues are being used for control. max_combined counts 2461 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2462 * q_vectors since we support a lot more queue pairs than q_vectors. 2463 **/ 2464 static void hclgevf_get_channels(struct hnae3_handle *handle, 2465 struct ethtool_channels *ch) 2466 { 2467 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2468 2469 ch->max_combined = hclgevf_get_max_channels(hdev); 2470 ch->other_count = 0; 2471 ch->max_other = 0; 2472 ch->combined_count = hdev->num_tqps; 2473 } 2474 2475 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2476 u16 *alloc_tqps, u16 *max_rss_size) 2477 { 2478 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2479 2480 *alloc_tqps = hdev->num_tqps; 2481 *max_rss_size = hdev->rss_size_max; 2482 } 2483 2484 static int hclgevf_get_status(struct hnae3_handle *handle) 2485 { 2486 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2487 2488 return hdev->hw.mac.link; 2489 } 2490 2491 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2492 u8 *auto_neg, u32 *speed, 2493 u8 *duplex) 2494 { 2495 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2496 2497 if (speed) 2498 *speed = hdev->hw.mac.speed; 2499 if (duplex) 2500 *duplex = hdev->hw.mac.duplex; 2501 if (auto_neg) 2502 *auto_neg = AUTONEG_DISABLE; 2503 } 2504 2505 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 2506 u8 duplex) 2507 { 2508 hdev->hw.mac.speed = speed; 2509 hdev->hw.mac.duplex = duplex; 2510 } 2511 2512 static int hclgevf_gro_en(struct hnae3_handle *handle, int enable) 2513 { 2514 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2515 2516 return hclgevf_config_gro(hdev, enable); 2517 } 2518 2519 static void hclgevf_get_media_type(struct hnae3_handle *handle, 2520 u8 *media_type) 2521 { 2522 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2523 if (media_type) 2524 *media_type = hdev->hw.mac.media_type; 2525 } 2526 2527 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 2528 { 2529 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2530 2531 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2532 } 2533 2534 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 2535 { 2536 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2537 2538 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2539 } 2540 2541 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 2542 { 2543 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2544 2545 return hdev->reset_count; 2546 } 2547 2548 #define MAX_SEPARATE_NUM 4 2549 #define SEPARATOR_VALUE 0xFFFFFFFF 2550 #define REG_NUM_PER_LINE 4 2551 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 2552 2553 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 2554 { 2555 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 2556 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2557 2558 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 2559 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 2560 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 2561 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 2562 2563 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 2564 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 2565 } 2566 2567 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 2568 void *data) 2569 { 2570 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2571 int i, j, reg_um, separator_num; 2572 u32 *reg = data; 2573 2574 *version = hdev->fw_version; 2575 2576 /* fetching per-VF registers values from VF PCIe register space */ 2577 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 2578 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2579 for (i = 0; i < reg_um; i++) 2580 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 2581 for (i = 0; i < separator_num; i++) 2582 *reg++ = SEPARATOR_VALUE; 2583 2584 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 2585 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2586 for (i = 0; i < reg_um; i++) 2587 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 2588 for (i = 0; i < separator_num; i++) 2589 *reg++ = SEPARATOR_VALUE; 2590 2591 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 2592 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2593 for (j = 0; j < hdev->num_tqps; j++) { 2594 for (i = 0; i < reg_um; i++) 2595 *reg++ = hclgevf_read_dev(&hdev->hw, 2596 ring_reg_addr_list[i] + 2597 0x200 * j); 2598 for (i = 0; i < separator_num; i++) 2599 *reg++ = SEPARATOR_VALUE; 2600 } 2601 2602 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 2603 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2604 for (j = 0; j < hdev->num_msi_used - 1; j++) { 2605 for (i = 0; i < reg_um; i++) 2606 *reg++ = hclgevf_read_dev(&hdev->hw, 2607 tqp_intr_reg_addr_list[i] + 2608 4 * j); 2609 for (i = 0; i < separator_num; i++) 2610 *reg++ = SEPARATOR_VALUE; 2611 } 2612 } 2613 2614 static const struct hnae3_ae_ops hclgevf_ops = { 2615 .init_ae_dev = hclgevf_init_ae_dev, 2616 .uninit_ae_dev = hclgevf_uninit_ae_dev, 2617 .flr_prepare = hclgevf_flr_prepare, 2618 .flr_done = hclgevf_flr_done, 2619 .init_client_instance = hclgevf_init_client_instance, 2620 .uninit_client_instance = hclgevf_uninit_client_instance, 2621 .start = hclgevf_ae_start, 2622 .stop = hclgevf_ae_stop, 2623 .client_start = hclgevf_client_start, 2624 .client_stop = hclgevf_client_stop, 2625 .map_ring_to_vector = hclgevf_map_ring_to_vector, 2626 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2627 .get_vector = hclgevf_get_vector, 2628 .put_vector = hclgevf_put_vector, 2629 .reset_queue = hclgevf_reset_tqp, 2630 .set_promisc_mode = hclgevf_set_promisc_mode, 2631 .get_mac_addr = hclgevf_get_mac_addr, 2632 .set_mac_addr = hclgevf_set_mac_addr, 2633 .add_uc_addr = hclgevf_add_uc_addr, 2634 .rm_uc_addr = hclgevf_rm_uc_addr, 2635 .add_mc_addr = hclgevf_add_mc_addr, 2636 .rm_mc_addr = hclgevf_rm_mc_addr, 2637 .get_stats = hclgevf_get_stats, 2638 .update_stats = hclgevf_update_stats, 2639 .get_strings = hclgevf_get_strings, 2640 .get_sset_count = hclgevf_get_sset_count, 2641 .get_rss_key_size = hclgevf_get_rss_key_size, 2642 .get_rss_indir_size = hclgevf_get_rss_indir_size, 2643 .get_rss = hclgevf_get_rss, 2644 .set_rss = hclgevf_set_rss, 2645 .get_rss_tuple = hclgevf_get_rss_tuple, 2646 .set_rss_tuple = hclgevf_set_rss_tuple, 2647 .get_tc_size = hclgevf_get_tc_size, 2648 .get_fw_version = hclgevf_get_fw_version, 2649 .set_vlan_filter = hclgevf_set_vlan_filter, 2650 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 2651 .reset_event = hclgevf_reset_event, 2652 .set_default_reset_request = hclgevf_set_def_reset_request, 2653 .get_channels = hclgevf_get_channels, 2654 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2655 .get_regs_len = hclgevf_get_regs_len, 2656 .get_regs = hclgevf_get_regs, 2657 .get_status = hclgevf_get_status, 2658 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2659 .get_media_type = hclgevf_get_media_type, 2660 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 2661 .ae_dev_resetting = hclgevf_ae_dev_resetting, 2662 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 2663 .set_gro_en = hclgevf_gro_en, 2664 .set_mtu = hclgevf_set_mtu, 2665 .get_global_queue_id = hclgevf_get_qid_global, 2666 }; 2667 2668 static struct hnae3_ae_algo ae_algovf = { 2669 .ops = &hclgevf_ops, 2670 .pdev_id_table = ae_algovf_pci_tbl, 2671 }; 2672 2673 static int hclgevf_init(void) 2674 { 2675 pr_info("%s is initializing\n", HCLGEVF_NAME); 2676 2677 hnae3_register_ae_algo(&ae_algovf); 2678 2679 return 0; 2680 } 2681 2682 static void hclgevf_exit(void) 2683 { 2684 hnae3_unregister_ae_algo(&ae_algovf); 2685 } 2686 module_init(hclgevf_init); 2687 module_exit(hclgevf_exit); 2688 2689 MODULE_LICENSE("GPL"); 2690 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2691 MODULE_DESCRIPTION("HCLGEVF Driver"); 2692 MODULE_VERSION(HCLGEVF_MOD_VERSION); 2693