1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 25 26 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 27 HCLGEVF_CMDQ_TX_ADDR_H_REG, 28 HCLGEVF_CMDQ_TX_DEPTH_REG, 29 HCLGEVF_CMDQ_TX_TAIL_REG, 30 HCLGEVF_CMDQ_TX_HEAD_REG, 31 HCLGEVF_CMDQ_RX_ADDR_L_REG, 32 HCLGEVF_CMDQ_RX_ADDR_H_REG, 33 HCLGEVF_CMDQ_RX_DEPTH_REG, 34 HCLGEVF_CMDQ_RX_TAIL_REG, 35 HCLGEVF_CMDQ_RX_HEAD_REG, 36 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 37 HCLGEVF_CMDQ_INTR_STS_REG, 38 HCLGEVF_CMDQ_INTR_EN_REG, 39 HCLGEVF_CMDQ_INTR_GEN_REG}; 40 41 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 42 HCLGEVF_RST_ING, 43 HCLGEVF_GRO_EN_REG}; 44 45 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 46 HCLGEVF_RING_RX_ADDR_H_REG, 47 HCLGEVF_RING_RX_BD_NUM_REG, 48 HCLGEVF_RING_RX_BD_LENGTH_REG, 49 HCLGEVF_RING_RX_MERGE_EN_REG, 50 HCLGEVF_RING_RX_TAIL_REG, 51 HCLGEVF_RING_RX_HEAD_REG, 52 HCLGEVF_RING_RX_FBD_NUM_REG, 53 HCLGEVF_RING_RX_OFFSET_REG, 54 HCLGEVF_RING_RX_FBD_OFFSET_REG, 55 HCLGEVF_RING_RX_STASH_REG, 56 HCLGEVF_RING_RX_BD_ERR_REG, 57 HCLGEVF_RING_TX_ADDR_L_REG, 58 HCLGEVF_RING_TX_ADDR_H_REG, 59 HCLGEVF_RING_TX_BD_NUM_REG, 60 HCLGEVF_RING_TX_PRIORITY_REG, 61 HCLGEVF_RING_TX_TC_REG, 62 HCLGEVF_RING_TX_MERGE_EN_REG, 63 HCLGEVF_RING_TX_TAIL_REG, 64 HCLGEVF_RING_TX_HEAD_REG, 65 HCLGEVF_RING_TX_FBD_NUM_REG, 66 HCLGEVF_RING_TX_OFFSET_REG, 67 HCLGEVF_RING_TX_EBD_NUM_REG, 68 HCLGEVF_RING_TX_EBD_OFFSET_REG, 69 HCLGEVF_RING_TX_BD_ERR_REG, 70 HCLGEVF_RING_EN_REG}; 71 72 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 73 HCLGEVF_TQP_INTR_GL0_REG, 74 HCLGEVF_TQP_INTR_GL1_REG, 75 HCLGEVF_TQP_INTR_GL2_REG, 76 HCLGEVF_TQP_INTR_RL_REG}; 77 78 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 79 struct hnae3_handle *handle) 80 { 81 return container_of(handle, struct hclgevf_dev, nic); 82 } 83 84 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 85 { 86 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 87 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 88 struct hclgevf_desc desc; 89 struct hclgevf_tqp *tqp; 90 int status; 91 int i; 92 93 for (i = 0; i < kinfo->num_tqps; i++) { 94 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 95 hclgevf_cmd_setup_basic_desc(&desc, 96 HCLGEVF_OPC_QUERY_RX_STATUS, 97 true); 98 99 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 100 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 101 if (status) { 102 dev_err(&hdev->pdev->dev, 103 "Query tqp stat fail, status = %d,queue = %d\n", 104 status, i); 105 return status; 106 } 107 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 108 le32_to_cpu(desc.data[1]); 109 110 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 111 true); 112 113 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 114 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 115 if (status) { 116 dev_err(&hdev->pdev->dev, 117 "Query tqp stat fail, status = %d,queue = %d\n", 118 status, i); 119 return status; 120 } 121 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 122 le32_to_cpu(desc.data[1]); 123 } 124 125 return 0; 126 } 127 128 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 129 { 130 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 131 struct hclgevf_tqp *tqp; 132 u64 *buff = data; 133 int i; 134 135 for (i = 0; i < kinfo->num_tqps; i++) { 136 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 137 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 138 } 139 for (i = 0; i < kinfo->num_tqps; i++) { 140 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 141 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 142 } 143 144 return buff; 145 } 146 147 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 148 { 149 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 150 151 return kinfo->num_tqps * 2; 152 } 153 154 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 155 { 156 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 157 u8 *buff = data; 158 int i = 0; 159 160 for (i = 0; i < kinfo->num_tqps; i++) { 161 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 162 struct hclgevf_tqp, q); 163 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 164 tqp->index); 165 buff += ETH_GSTRING_LEN; 166 } 167 168 for (i = 0; i < kinfo->num_tqps; i++) { 169 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 170 struct hclgevf_tqp, q); 171 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 172 tqp->index); 173 buff += ETH_GSTRING_LEN; 174 } 175 176 return buff; 177 } 178 179 static void hclgevf_update_stats(struct hnae3_handle *handle, 180 struct net_device_stats *net_stats) 181 { 182 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 183 int status; 184 185 status = hclgevf_tqps_update_stats(handle); 186 if (status) 187 dev_err(&hdev->pdev->dev, 188 "VF update of TQPS stats fail, status = %d.\n", 189 status); 190 } 191 192 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 193 { 194 if (strset == ETH_SS_TEST) 195 return -EOPNOTSUPP; 196 else if (strset == ETH_SS_STATS) 197 return hclgevf_tqps_get_sset_count(handle, strset); 198 199 return 0; 200 } 201 202 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 203 u8 *data) 204 { 205 u8 *p = (char *)data; 206 207 if (strset == ETH_SS_STATS) 208 p = hclgevf_tqps_get_strings(handle, p); 209 } 210 211 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 212 { 213 hclgevf_tqps_get_stats(handle, data); 214 } 215 216 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 217 { 218 u8 resp_msg; 219 int status; 220 221 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 222 true, &resp_msg, sizeof(u8)); 223 if (status) { 224 dev_err(&hdev->pdev->dev, 225 "VF request to get TC info from PF failed %d", 226 status); 227 return status; 228 } 229 230 hdev->hw_tc_map = resp_msg; 231 232 return 0; 233 } 234 235 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 236 { 237 #define HCLGEVF_TQPS_RSS_INFO_LEN 8 238 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 239 int status; 240 241 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 242 true, resp_msg, 243 HCLGEVF_TQPS_RSS_INFO_LEN); 244 if (status) { 245 dev_err(&hdev->pdev->dev, 246 "VF request to get tqp info from PF failed %d", 247 status); 248 return status; 249 } 250 251 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 252 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 253 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 254 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 255 256 return 0; 257 } 258 259 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 260 { 261 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 262 u8 msg_data[2], resp_data[2]; 263 u16 qid_in_pf = 0; 264 int ret; 265 266 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 267 268 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, 269 2, true, resp_data, 2); 270 if (!ret) 271 qid_in_pf = *(u16 *)resp_data; 272 273 return qid_in_pf; 274 } 275 276 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 277 { 278 struct hclgevf_tqp *tqp; 279 int i; 280 281 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 282 sizeof(struct hclgevf_tqp), GFP_KERNEL); 283 if (!hdev->htqp) 284 return -ENOMEM; 285 286 tqp = hdev->htqp; 287 288 for (i = 0; i < hdev->num_tqps; i++) { 289 tqp->dev = &hdev->pdev->dev; 290 tqp->index = i; 291 292 tqp->q.ae_algo = &ae_algovf; 293 tqp->q.buf_size = hdev->rx_buf_len; 294 tqp->q.desc_num = hdev->num_desc; 295 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 296 i * HCLGEVF_TQP_REG_SIZE; 297 298 tqp++; 299 } 300 301 return 0; 302 } 303 304 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 305 { 306 struct hnae3_handle *nic = &hdev->nic; 307 struct hnae3_knic_private_info *kinfo; 308 u16 new_tqps = hdev->num_tqps; 309 int i; 310 311 kinfo = &nic->kinfo; 312 kinfo->num_tc = 0; 313 kinfo->num_desc = hdev->num_desc; 314 kinfo->rx_buf_len = hdev->rx_buf_len; 315 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 316 if (hdev->hw_tc_map & BIT(i)) 317 kinfo->num_tc++; 318 319 kinfo->rss_size 320 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 321 new_tqps = kinfo->rss_size * kinfo->num_tc; 322 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 323 324 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 325 sizeof(struct hnae3_queue *), GFP_KERNEL); 326 if (!kinfo->tqp) 327 return -ENOMEM; 328 329 for (i = 0; i < kinfo->num_tqps; i++) { 330 hdev->htqp[i].q.handle = &hdev->nic; 331 hdev->htqp[i].q.tqp_index = i; 332 kinfo->tqp[i] = &hdev->htqp[i].q; 333 } 334 335 return 0; 336 } 337 338 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 339 { 340 int status; 341 u8 resp_msg; 342 343 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 344 0, false, &resp_msg, sizeof(u8)); 345 if (status) 346 dev_err(&hdev->pdev->dev, 347 "VF failed to fetch link status(%d) from PF", status); 348 } 349 350 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 351 { 352 struct hnae3_handle *rhandle = &hdev->roce; 353 struct hnae3_handle *handle = &hdev->nic; 354 struct hnae3_client *rclient; 355 struct hnae3_client *client; 356 357 client = handle->client; 358 rclient = hdev->roce_client; 359 360 link_state = 361 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 362 363 if (link_state != hdev->hw.mac.link) { 364 client->ops->link_status_change(handle, !!link_state); 365 if (rclient && rclient->ops->link_status_change) 366 rclient->ops->link_status_change(rhandle, !!link_state); 367 hdev->hw.mac.link = link_state; 368 } 369 } 370 371 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 372 { 373 struct hnae3_handle *nic = &hdev->nic; 374 int ret; 375 376 nic->ae_algo = &ae_algovf; 377 nic->pdev = hdev->pdev; 378 nic->numa_node_mask = hdev->numa_node_mask; 379 nic->flags |= HNAE3_SUPPORT_VF; 380 381 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 382 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 383 hdev->ae_dev->dev_type); 384 return -EINVAL; 385 } 386 387 ret = hclgevf_knic_setup(hdev); 388 if (ret) 389 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 390 ret); 391 return ret; 392 } 393 394 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 395 { 396 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 397 dev_warn(&hdev->pdev->dev, 398 "vector(vector_id %d) has been freed.\n", vector_id); 399 return; 400 } 401 402 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 403 hdev->num_msi_left += 1; 404 hdev->num_msi_used -= 1; 405 } 406 407 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 408 struct hnae3_vector_info *vector_info) 409 { 410 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 411 struct hnae3_vector_info *vector = vector_info; 412 int alloc = 0; 413 int i, j; 414 415 vector_num = min(hdev->num_msi_left, vector_num); 416 417 for (j = 0; j < vector_num; j++) { 418 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 419 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 420 vector->vector = pci_irq_vector(hdev->pdev, i); 421 vector->io_addr = hdev->hw.io_base + 422 HCLGEVF_VECTOR_REG_BASE + 423 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 424 hdev->vector_status[i] = 0; 425 hdev->vector_irq[i] = vector->vector; 426 427 vector++; 428 alloc++; 429 430 break; 431 } 432 } 433 } 434 hdev->num_msi_left -= alloc; 435 hdev->num_msi_used += alloc; 436 437 return alloc; 438 } 439 440 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 441 { 442 int i; 443 444 for (i = 0; i < hdev->num_msi; i++) 445 if (vector == hdev->vector_irq[i]) 446 return i; 447 448 return -EINVAL; 449 } 450 451 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 452 const u8 hfunc, const u8 *key) 453 { 454 struct hclgevf_rss_config_cmd *req; 455 struct hclgevf_desc desc; 456 int key_offset; 457 int key_size; 458 int ret; 459 460 req = (struct hclgevf_rss_config_cmd *)desc.data; 461 462 for (key_offset = 0; key_offset < 3; key_offset++) { 463 hclgevf_cmd_setup_basic_desc(&desc, 464 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 465 false); 466 467 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 468 req->hash_config |= 469 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 470 471 if (key_offset == 2) 472 key_size = 473 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 474 else 475 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 476 477 memcpy(req->hash_key, 478 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 479 480 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 481 if (ret) { 482 dev_err(&hdev->pdev->dev, 483 "Configure RSS config fail, status = %d\n", 484 ret); 485 return ret; 486 } 487 } 488 489 return 0; 490 } 491 492 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 493 { 494 return HCLGEVF_RSS_KEY_SIZE; 495 } 496 497 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 498 { 499 return HCLGEVF_RSS_IND_TBL_SIZE; 500 } 501 502 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 503 { 504 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 505 struct hclgevf_rss_indirection_table_cmd *req; 506 struct hclgevf_desc desc; 507 int status; 508 int i, j; 509 510 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 511 512 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 513 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 514 false); 515 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 516 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 517 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 518 req->rss_result[j] = 519 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 520 521 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 522 if (status) { 523 dev_err(&hdev->pdev->dev, 524 "VF failed(=%d) to set RSS indirection table\n", 525 status); 526 return status; 527 } 528 } 529 530 return 0; 531 } 532 533 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 534 { 535 struct hclgevf_rss_tc_mode_cmd *req; 536 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 537 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 538 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 539 struct hclgevf_desc desc; 540 u16 roundup_size; 541 int status; 542 int i; 543 544 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 545 546 roundup_size = roundup_pow_of_two(rss_size); 547 roundup_size = ilog2(roundup_size); 548 549 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 550 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 551 tc_size[i] = roundup_size; 552 tc_offset[i] = rss_size * i; 553 } 554 555 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 556 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 557 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 558 (tc_valid[i] & 0x1)); 559 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 560 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 561 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 562 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 563 } 564 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 565 if (status) 566 dev_err(&hdev->pdev->dev, 567 "VF failed(=%d) to set rss tc mode\n", status); 568 569 return status; 570 } 571 572 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 573 u8 *hfunc) 574 { 575 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 576 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 577 int i; 578 579 if (handle->pdev->revision >= 0x21) { 580 /* Get hash algorithm */ 581 if (hfunc) { 582 switch (rss_cfg->hash_algo) { 583 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 584 *hfunc = ETH_RSS_HASH_TOP; 585 break; 586 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 587 *hfunc = ETH_RSS_HASH_XOR; 588 break; 589 default: 590 *hfunc = ETH_RSS_HASH_UNKNOWN; 591 break; 592 } 593 } 594 595 /* Get the RSS Key required by the user */ 596 if (key) 597 memcpy(key, rss_cfg->rss_hash_key, 598 HCLGEVF_RSS_KEY_SIZE); 599 } 600 601 if (indir) 602 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 603 indir[i] = rss_cfg->rss_indirection_tbl[i]; 604 605 return 0; 606 } 607 608 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 609 const u8 *key, const u8 hfunc) 610 { 611 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 612 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 613 int ret, i; 614 615 if (handle->pdev->revision >= 0x21) { 616 /* Set the RSS Hash Key if specififed by the user */ 617 if (key) { 618 switch (hfunc) { 619 case ETH_RSS_HASH_TOP: 620 rss_cfg->hash_algo = 621 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 622 break; 623 case ETH_RSS_HASH_XOR: 624 rss_cfg->hash_algo = 625 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 626 break; 627 case ETH_RSS_HASH_NO_CHANGE: 628 break; 629 default: 630 return -EINVAL; 631 } 632 633 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 634 key); 635 if (ret) 636 return ret; 637 638 /* Update the shadow RSS key with user specified qids */ 639 memcpy(rss_cfg->rss_hash_key, key, 640 HCLGEVF_RSS_KEY_SIZE); 641 } 642 } 643 644 /* update the shadow RSS table with user specified qids */ 645 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 646 rss_cfg->rss_indirection_tbl[i] = indir[i]; 647 648 /* update the hardware */ 649 return hclgevf_set_rss_indir_table(hdev); 650 } 651 652 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 653 { 654 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 655 656 if (nfc->data & RXH_L4_B_2_3) 657 hash_sets |= HCLGEVF_D_PORT_BIT; 658 else 659 hash_sets &= ~HCLGEVF_D_PORT_BIT; 660 661 if (nfc->data & RXH_IP_SRC) 662 hash_sets |= HCLGEVF_S_IP_BIT; 663 else 664 hash_sets &= ~HCLGEVF_S_IP_BIT; 665 666 if (nfc->data & RXH_IP_DST) 667 hash_sets |= HCLGEVF_D_IP_BIT; 668 else 669 hash_sets &= ~HCLGEVF_D_IP_BIT; 670 671 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 672 hash_sets |= HCLGEVF_V_TAG_BIT; 673 674 return hash_sets; 675 } 676 677 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 678 struct ethtool_rxnfc *nfc) 679 { 680 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 681 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 682 struct hclgevf_rss_input_tuple_cmd *req; 683 struct hclgevf_desc desc; 684 u8 tuple_sets; 685 int ret; 686 687 if (handle->pdev->revision == 0x20) 688 return -EOPNOTSUPP; 689 690 if (nfc->data & 691 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 692 return -EINVAL; 693 694 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 695 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 696 697 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 698 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 699 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 700 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 701 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 702 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 703 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 704 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 705 706 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 707 switch (nfc->flow_type) { 708 case TCP_V4_FLOW: 709 req->ipv4_tcp_en = tuple_sets; 710 break; 711 case TCP_V6_FLOW: 712 req->ipv6_tcp_en = tuple_sets; 713 break; 714 case UDP_V4_FLOW: 715 req->ipv4_udp_en = tuple_sets; 716 break; 717 case UDP_V6_FLOW: 718 req->ipv6_udp_en = tuple_sets; 719 break; 720 case SCTP_V4_FLOW: 721 req->ipv4_sctp_en = tuple_sets; 722 break; 723 case SCTP_V6_FLOW: 724 if ((nfc->data & RXH_L4_B_0_1) || 725 (nfc->data & RXH_L4_B_2_3)) 726 return -EINVAL; 727 728 req->ipv6_sctp_en = tuple_sets; 729 break; 730 case IPV4_FLOW: 731 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 732 break; 733 case IPV6_FLOW: 734 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 735 break; 736 default: 737 return -EINVAL; 738 } 739 740 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 741 if (ret) { 742 dev_err(&hdev->pdev->dev, 743 "Set rss tuple fail, status = %d\n", ret); 744 return ret; 745 } 746 747 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 748 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 749 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 750 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 751 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 752 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 753 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 754 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 755 return 0; 756 } 757 758 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 759 struct ethtool_rxnfc *nfc) 760 { 761 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 762 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 763 u8 tuple_sets; 764 765 if (handle->pdev->revision == 0x20) 766 return -EOPNOTSUPP; 767 768 nfc->data = 0; 769 770 switch (nfc->flow_type) { 771 case TCP_V4_FLOW: 772 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 773 break; 774 case UDP_V4_FLOW: 775 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 776 break; 777 case TCP_V6_FLOW: 778 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 779 break; 780 case UDP_V6_FLOW: 781 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 782 break; 783 case SCTP_V4_FLOW: 784 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 785 break; 786 case SCTP_V6_FLOW: 787 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 788 break; 789 case IPV4_FLOW: 790 case IPV6_FLOW: 791 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 792 break; 793 default: 794 return -EINVAL; 795 } 796 797 if (!tuple_sets) 798 return 0; 799 800 if (tuple_sets & HCLGEVF_D_PORT_BIT) 801 nfc->data |= RXH_L4_B_2_3; 802 if (tuple_sets & HCLGEVF_S_PORT_BIT) 803 nfc->data |= RXH_L4_B_0_1; 804 if (tuple_sets & HCLGEVF_D_IP_BIT) 805 nfc->data |= RXH_IP_DST; 806 if (tuple_sets & HCLGEVF_S_IP_BIT) 807 nfc->data |= RXH_IP_SRC; 808 809 return 0; 810 } 811 812 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 813 struct hclgevf_rss_cfg *rss_cfg) 814 { 815 struct hclgevf_rss_input_tuple_cmd *req; 816 struct hclgevf_desc desc; 817 int ret; 818 819 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 820 821 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 822 823 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 824 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 825 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 826 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 827 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 828 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 829 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 830 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 831 832 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 833 if (ret) 834 dev_err(&hdev->pdev->dev, 835 "Configure rss input fail, status = %d\n", ret); 836 return ret; 837 } 838 839 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 840 { 841 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 842 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 843 844 return rss_cfg->rss_size; 845 } 846 847 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 848 int vector_id, 849 struct hnae3_ring_chain_node *ring_chain) 850 { 851 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 852 struct hnae3_ring_chain_node *node; 853 struct hclge_mbx_vf_to_pf_cmd *req; 854 struct hclgevf_desc desc; 855 int i = 0; 856 int status; 857 u8 type; 858 859 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 860 861 for (node = ring_chain; node; node = node->next) { 862 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 863 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 864 865 if (i == 0) { 866 hclgevf_cmd_setup_basic_desc(&desc, 867 HCLGEVF_OPC_MBX_VF_TO_PF, 868 false); 869 type = en ? 870 HCLGE_MBX_MAP_RING_TO_VECTOR : 871 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 872 req->msg[0] = type; 873 req->msg[1] = vector_id; 874 } 875 876 req->msg[idx_offset] = 877 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 878 req->msg[idx_offset + 1] = node->tqp_index; 879 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 880 HNAE3_RING_GL_IDX_M, 881 HNAE3_RING_GL_IDX_S); 882 883 i++; 884 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 885 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 886 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 887 !node->next) { 888 req->msg[2] = i; 889 890 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 891 if (status) { 892 dev_err(&hdev->pdev->dev, 893 "Map TQP fail, status is %d.\n", 894 status); 895 return status; 896 } 897 i = 0; 898 hclgevf_cmd_setup_basic_desc(&desc, 899 HCLGEVF_OPC_MBX_VF_TO_PF, 900 false); 901 req->msg[0] = type; 902 req->msg[1] = vector_id; 903 } 904 } 905 906 return 0; 907 } 908 909 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 910 struct hnae3_ring_chain_node *ring_chain) 911 { 912 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 913 int vector_id; 914 915 vector_id = hclgevf_get_vector_index(hdev, vector); 916 if (vector_id < 0) { 917 dev_err(&handle->pdev->dev, 918 "Get vector index fail. ret =%d\n", vector_id); 919 return vector_id; 920 } 921 922 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 923 } 924 925 static int hclgevf_unmap_ring_from_vector( 926 struct hnae3_handle *handle, 927 int vector, 928 struct hnae3_ring_chain_node *ring_chain) 929 { 930 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 931 int ret, vector_id; 932 933 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 934 return 0; 935 936 vector_id = hclgevf_get_vector_index(hdev, vector); 937 if (vector_id < 0) { 938 dev_err(&handle->pdev->dev, 939 "Get vector index fail. ret =%d\n", vector_id); 940 return vector_id; 941 } 942 943 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 944 if (ret) 945 dev_err(&handle->pdev->dev, 946 "Unmap ring from vector fail. vector=%d, ret =%d\n", 947 vector_id, 948 ret); 949 950 return ret; 951 } 952 953 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 954 { 955 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 956 int vector_id; 957 958 vector_id = hclgevf_get_vector_index(hdev, vector); 959 if (vector_id < 0) { 960 dev_err(&handle->pdev->dev, 961 "hclgevf_put_vector get vector index fail. ret =%d\n", 962 vector_id); 963 return vector_id; 964 } 965 966 hclgevf_free_vector(hdev, vector_id); 967 968 return 0; 969 } 970 971 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 972 bool en_uc_pmc, bool en_mc_pmc) 973 { 974 struct hclge_mbx_vf_to_pf_cmd *req; 975 struct hclgevf_desc desc; 976 int status; 977 978 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 979 980 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 981 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 982 req->msg[1] = en_uc_pmc ? 1 : 0; 983 req->msg[2] = en_mc_pmc ? 1 : 0; 984 985 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 986 if (status) 987 dev_err(&hdev->pdev->dev, 988 "Set promisc mode fail, status is %d.\n", status); 989 990 return status; 991 } 992 993 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, 994 bool en_uc_pmc, bool en_mc_pmc) 995 { 996 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 997 998 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); 999 } 1000 1001 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 1002 int stream_id, bool enable) 1003 { 1004 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1005 struct hclgevf_desc desc; 1006 int status; 1007 1008 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1009 1010 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1011 false); 1012 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1013 req->stream_id = cpu_to_le16(stream_id); 1014 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 1015 1016 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1017 if (status) 1018 dev_err(&hdev->pdev->dev, 1019 "TQP enable fail, status =%d.\n", status); 1020 1021 return status; 1022 } 1023 1024 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1025 { 1026 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1027 struct hclgevf_tqp *tqp; 1028 int i; 1029 1030 for (i = 0; i < kinfo->num_tqps; i++) { 1031 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1032 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1033 } 1034 } 1035 1036 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1037 { 1038 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1039 1040 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1041 } 1042 1043 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1044 bool is_first) 1045 { 1046 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1047 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1048 u8 *new_mac_addr = (u8 *)p; 1049 u8 msg_data[ETH_ALEN * 2]; 1050 u16 subcode; 1051 int status; 1052 1053 ether_addr_copy(msg_data, new_mac_addr); 1054 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 1055 1056 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 1057 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1058 1059 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1060 subcode, msg_data, ETH_ALEN * 2, 1061 true, NULL, 0); 1062 if (!status) 1063 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1064 1065 return status; 1066 } 1067 1068 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1069 const unsigned char *addr) 1070 { 1071 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1072 1073 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1074 HCLGE_MBX_MAC_VLAN_UC_ADD, 1075 addr, ETH_ALEN, false, NULL, 0); 1076 } 1077 1078 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1079 const unsigned char *addr) 1080 { 1081 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1082 1083 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1084 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1085 addr, ETH_ALEN, false, NULL, 0); 1086 } 1087 1088 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1089 const unsigned char *addr) 1090 { 1091 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1092 1093 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1094 HCLGE_MBX_MAC_VLAN_MC_ADD, 1095 addr, ETH_ALEN, false, NULL, 0); 1096 } 1097 1098 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1099 const unsigned char *addr) 1100 { 1101 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1102 1103 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1104 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1105 addr, ETH_ALEN, false, NULL, 0); 1106 } 1107 1108 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1109 __be16 proto, u16 vlan_id, 1110 bool is_kill) 1111 { 1112 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1113 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1114 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1115 1116 if (vlan_id > 4095) 1117 return -EINVAL; 1118 1119 if (proto != htons(ETH_P_8021Q)) 1120 return -EPROTONOSUPPORT; 1121 1122 msg_data[0] = is_kill; 1123 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1124 memcpy(&msg_data[3], &proto, sizeof(proto)); 1125 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1126 HCLGE_MBX_VLAN_FILTER, msg_data, 1127 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 1128 } 1129 1130 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1131 { 1132 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1133 u8 msg_data; 1134 1135 msg_data = enable ? 1 : 0; 1136 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1137 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1138 1, false, NULL, 0); 1139 } 1140 1141 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1142 { 1143 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1144 u8 msg_data[2]; 1145 int ret; 1146 1147 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 1148 1149 /* disable vf queue before send queue reset msg to PF */ 1150 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1151 if (ret) 1152 return ret; 1153 1154 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1155 2, true, NULL, 0); 1156 } 1157 1158 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1159 { 1160 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1161 1162 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu, 1163 sizeof(new_mtu), true, NULL, 0); 1164 } 1165 1166 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1167 enum hnae3_reset_notify_type type) 1168 { 1169 struct hnae3_client *client = hdev->nic_client; 1170 struct hnae3_handle *handle = &hdev->nic; 1171 int ret; 1172 1173 if (!client->ops->reset_notify) 1174 return -EOPNOTSUPP; 1175 1176 ret = client->ops->reset_notify(handle, type); 1177 if (ret) 1178 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1179 type, ret); 1180 1181 return ret; 1182 } 1183 1184 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 1185 { 1186 struct hclgevf_dev *hdev = ae_dev->priv; 1187 1188 set_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1189 } 1190 1191 static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev, 1192 unsigned long delay_us, 1193 unsigned long wait_cnt) 1194 { 1195 unsigned long cnt = 0; 1196 1197 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && 1198 cnt++ < wait_cnt) 1199 usleep_range(delay_us, delay_us * 2); 1200 1201 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { 1202 dev_err(&hdev->pdev->dev, 1203 "flr wait timeout\n"); 1204 return -ETIMEDOUT; 1205 } 1206 1207 return 0; 1208 } 1209 1210 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1211 { 1212 #define HCLGEVF_RESET_WAIT_US 20000 1213 #define HCLGEVF_RESET_WAIT_CNT 2000 1214 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1215 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1216 1217 u32 val; 1218 int ret; 1219 1220 /* wait to check the hardware reset completion status */ 1221 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1222 dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val); 1223 1224 if (hdev->reset_type == HNAE3_FLR_RESET) 1225 return hclgevf_flr_poll_timeout(hdev, 1226 HCLGEVF_RESET_WAIT_US, 1227 HCLGEVF_RESET_WAIT_CNT); 1228 1229 ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val, 1230 !(val & HCLGEVF_RST_ING_BITS), 1231 HCLGEVF_RESET_WAIT_US, 1232 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1233 1234 /* hardware completion status should be available by this time */ 1235 if (ret) { 1236 dev_err(&hdev->pdev->dev, 1237 "could'nt get reset done status from h/w, timeout!\n"); 1238 return ret; 1239 } 1240 1241 /* we will wait a bit more to let reset of the stack to complete. This 1242 * might happen in case reset assertion was made by PF. Yes, this also 1243 * means we might end up waiting bit more even for VF reset. 1244 */ 1245 msleep(5000); 1246 1247 return 0; 1248 } 1249 1250 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1251 { 1252 int ret; 1253 1254 /* uninitialize the nic client */ 1255 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1256 if (ret) 1257 return ret; 1258 1259 /* re-initialize the hclge device */ 1260 ret = hclgevf_reset_hdev(hdev); 1261 if (ret) { 1262 dev_err(&hdev->pdev->dev, 1263 "hclge device re-init failed, VF is disabled!\n"); 1264 return ret; 1265 } 1266 1267 /* bring up the nic client again */ 1268 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1269 if (ret) 1270 return ret; 1271 1272 return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); 1273 } 1274 1275 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1276 { 1277 int ret = 0; 1278 1279 switch (hdev->reset_type) { 1280 case HNAE3_VF_FUNC_RESET: 1281 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1282 0, true, NULL, sizeof(u8)); 1283 break; 1284 case HNAE3_FLR_RESET: 1285 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1286 break; 1287 default: 1288 break; 1289 } 1290 1291 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1292 1293 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1294 hdev->reset_type, ret); 1295 1296 return ret; 1297 } 1298 1299 static int hclgevf_reset(struct hclgevf_dev *hdev) 1300 { 1301 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1302 int ret; 1303 1304 /* Initialize ae_dev reset status as well, in case enet layer wants to 1305 * know if device is undergoing reset 1306 */ 1307 ae_dev->reset_type = hdev->reset_type; 1308 hdev->reset_count++; 1309 rtnl_lock(); 1310 1311 /* bring down the nic to stop any ongoing TX/RX */ 1312 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1313 if (ret) 1314 goto err_reset_lock; 1315 1316 rtnl_unlock(); 1317 1318 ret = hclgevf_reset_prepare_wait(hdev); 1319 if (ret) 1320 goto err_reset; 1321 1322 /* check if VF could successfully fetch the hardware reset completion 1323 * status from the hardware 1324 */ 1325 ret = hclgevf_reset_wait(hdev); 1326 if (ret) { 1327 /* can't do much in this situation, will disable VF */ 1328 dev_err(&hdev->pdev->dev, 1329 "VF failed(=%d) to fetch H/W reset completion status\n", 1330 ret); 1331 goto err_reset; 1332 } 1333 1334 rtnl_lock(); 1335 1336 /* now, re-initialize the nic client and ae device*/ 1337 ret = hclgevf_reset_stack(hdev); 1338 if (ret) { 1339 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1340 goto err_reset_lock; 1341 } 1342 1343 /* bring up the nic to enable TX/RX again */ 1344 ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1345 if (ret) 1346 goto err_reset_lock; 1347 1348 rtnl_unlock(); 1349 1350 hdev->last_reset_time = jiffies; 1351 ae_dev->reset_type = HNAE3_NONE_RESET; 1352 1353 return ret; 1354 err_reset_lock: 1355 rtnl_unlock(); 1356 err_reset: 1357 /* When VF reset failed, only the higher level reset asserted by PF 1358 * can restore it, so re-initialize the command queue to receive 1359 * this higher reset event. 1360 */ 1361 hclgevf_cmd_init(hdev); 1362 dev_err(&hdev->pdev->dev, "failed to reset VF\n"); 1363 1364 return ret; 1365 } 1366 1367 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1368 unsigned long *addr) 1369 { 1370 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1371 1372 /* return the highest priority reset level amongst all */ 1373 if (test_bit(HNAE3_VF_RESET, addr)) { 1374 rst_level = HNAE3_VF_RESET; 1375 clear_bit(HNAE3_VF_RESET, addr); 1376 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1377 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1378 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1379 rst_level = HNAE3_VF_FULL_RESET; 1380 clear_bit(HNAE3_VF_FULL_RESET, addr); 1381 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1382 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1383 rst_level = HNAE3_VF_PF_FUNC_RESET; 1384 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1385 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1386 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1387 rst_level = HNAE3_VF_FUNC_RESET; 1388 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1389 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1390 rst_level = HNAE3_FLR_RESET; 1391 clear_bit(HNAE3_FLR_RESET, addr); 1392 } 1393 1394 return rst_level; 1395 } 1396 1397 static void hclgevf_reset_event(struct pci_dev *pdev, 1398 struct hnae3_handle *handle) 1399 { 1400 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1401 struct hclgevf_dev *hdev = ae_dev->priv; 1402 1403 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1404 1405 if (hdev->default_reset_request) 1406 hdev->reset_level = 1407 hclgevf_get_reset_level(hdev, 1408 &hdev->default_reset_request); 1409 else 1410 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1411 1412 /* reset of this VF requested */ 1413 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1414 hclgevf_reset_task_schedule(hdev); 1415 1416 hdev->last_reset_time = jiffies; 1417 } 1418 1419 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1420 enum hnae3_reset_type rst_type) 1421 { 1422 struct hclgevf_dev *hdev = ae_dev->priv; 1423 1424 set_bit(rst_type, &hdev->default_reset_request); 1425 } 1426 1427 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1428 { 1429 #define HCLGEVF_FLR_WAIT_MS 100 1430 #define HCLGEVF_FLR_WAIT_CNT 50 1431 struct hclgevf_dev *hdev = ae_dev->priv; 1432 int cnt = 0; 1433 1434 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1435 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1436 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); 1437 hclgevf_reset_event(hdev->pdev, NULL); 1438 1439 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && 1440 cnt++ < HCLGEVF_FLR_WAIT_CNT) 1441 msleep(HCLGEVF_FLR_WAIT_MS); 1442 1443 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) 1444 dev_err(&hdev->pdev->dev, 1445 "flr wait down timeout: %d\n", cnt); 1446 } 1447 1448 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1449 { 1450 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1451 1452 return hdev->fw_version; 1453 } 1454 1455 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1456 { 1457 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1458 1459 vector->vector_irq = pci_irq_vector(hdev->pdev, 1460 HCLGEVF_MISC_VECTOR_NUM); 1461 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1462 /* vector status always valid for Vector 0 */ 1463 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1464 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1465 1466 hdev->num_msi_left -= 1; 1467 hdev->num_msi_used += 1; 1468 } 1469 1470 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1471 { 1472 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1473 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1474 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1475 schedule_work(&hdev->rst_service_task); 1476 } 1477 } 1478 1479 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1480 { 1481 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1482 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1483 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1484 schedule_work(&hdev->mbx_service_task); 1485 } 1486 } 1487 1488 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1489 { 1490 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1491 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1492 schedule_work(&hdev->service_task); 1493 } 1494 1495 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1496 { 1497 /* if we have any pending mailbox event then schedule the mbx task */ 1498 if (hdev->mbx_event_pending) 1499 hclgevf_mbx_task_schedule(hdev); 1500 1501 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1502 hclgevf_reset_task_schedule(hdev); 1503 } 1504 1505 static void hclgevf_service_timer(struct timer_list *t) 1506 { 1507 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1508 1509 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1510 1511 hclgevf_task_schedule(hdev); 1512 } 1513 1514 static void hclgevf_reset_service_task(struct work_struct *work) 1515 { 1516 struct hclgevf_dev *hdev = 1517 container_of(work, struct hclgevf_dev, rst_service_task); 1518 int ret; 1519 1520 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1521 return; 1522 1523 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1524 1525 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1526 &hdev->reset_state)) { 1527 /* PF has initmated that it is about to reset the hardware. 1528 * We now have to poll & check if harware has actually completed 1529 * the reset sequence. On hardware reset completion, VF needs to 1530 * reset the client and ae device. 1531 */ 1532 hdev->reset_attempts = 0; 1533 1534 hdev->last_reset_time = jiffies; 1535 while ((hdev->reset_type = 1536 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 1537 != HNAE3_NONE_RESET) { 1538 ret = hclgevf_reset(hdev); 1539 if (ret) 1540 dev_err(&hdev->pdev->dev, 1541 "VF stack reset failed %d.\n", ret); 1542 } 1543 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1544 &hdev->reset_state)) { 1545 /* we could be here when either of below happens: 1546 * 1. reset was initiated due to watchdog timeout due to 1547 * a. IMP was earlier reset and our TX got choked down and 1548 * which resulted in watchdog reacting and inducing VF 1549 * reset. This also means our cmdq would be unreliable. 1550 * b. problem in TX due to other lower layer(example link 1551 * layer not functioning properly etc.) 1552 * 2. VF reset might have been initiated due to some config 1553 * change. 1554 * 1555 * NOTE: Theres no clear way to detect above cases than to react 1556 * to the response of PF for this reset request. PF will ack the 1557 * 1b and 2. cases but we will not get any intimation about 1a 1558 * from PF as cmdq would be in unreliable state i.e. mailbox 1559 * communication between PF and VF would be broken. 1560 */ 1561 1562 /* if we are never geting into pending state it means either: 1563 * 1. PF is not receiving our request which could be due to IMP 1564 * reset 1565 * 2. PF is screwed 1566 * We cannot do much for 2. but to check first we can try reset 1567 * our PCIe + stack and see if it alleviates the problem. 1568 */ 1569 if (hdev->reset_attempts > 3) { 1570 /* prepare for full reset of stack + pcie interface */ 1571 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1572 1573 /* "defer" schedule the reset task again */ 1574 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1575 } else { 1576 hdev->reset_attempts++; 1577 1578 set_bit(hdev->reset_level, &hdev->reset_pending); 1579 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1580 } 1581 hclgevf_reset_task_schedule(hdev); 1582 } 1583 1584 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1585 } 1586 1587 static void hclgevf_mailbox_service_task(struct work_struct *work) 1588 { 1589 struct hclgevf_dev *hdev; 1590 1591 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1592 1593 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1594 return; 1595 1596 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1597 1598 hclgevf_mbx_async_handler(hdev); 1599 1600 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1601 } 1602 1603 static void hclgevf_keep_alive_timer(struct timer_list *t) 1604 { 1605 struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); 1606 1607 schedule_work(&hdev->keep_alive_task); 1608 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 1609 } 1610 1611 static void hclgevf_keep_alive_task(struct work_struct *work) 1612 { 1613 struct hclgevf_dev *hdev; 1614 u8 respmsg; 1615 int ret; 1616 1617 hdev = container_of(work, struct hclgevf_dev, keep_alive_task); 1618 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, 1619 0, false, &respmsg, sizeof(u8)); 1620 if (ret) 1621 dev_err(&hdev->pdev->dev, 1622 "VF sends keep alive cmd failed(=%d)\n", ret); 1623 } 1624 1625 static void hclgevf_service_task(struct work_struct *work) 1626 { 1627 struct hclgevf_dev *hdev; 1628 1629 hdev = container_of(work, struct hclgevf_dev, service_task); 1630 1631 /* request the link status from the PF. PF would be able to tell VF 1632 * about such updates in future so we might remove this later 1633 */ 1634 hclgevf_request_link_info(hdev); 1635 1636 hclgevf_deferred_task_schedule(hdev); 1637 1638 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1639 } 1640 1641 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1642 { 1643 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1644 } 1645 1646 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1647 u32 *clearval) 1648 { 1649 u32 cmdq_src_reg, rst_ing_reg; 1650 1651 /* fetch the events from their corresponding regs */ 1652 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1653 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1654 1655 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) { 1656 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1657 dev_info(&hdev->pdev->dev, 1658 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1659 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1660 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1661 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1662 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); 1663 *clearval = cmdq_src_reg; 1664 return HCLGEVF_VECTOR0_EVENT_RST; 1665 } 1666 1667 /* check for vector0 mailbox(=CMDQ RX) event source */ 1668 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1669 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1670 *clearval = cmdq_src_reg; 1671 return HCLGEVF_VECTOR0_EVENT_MBX; 1672 } 1673 1674 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1675 1676 return HCLGEVF_VECTOR0_EVENT_OTHER; 1677 } 1678 1679 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1680 { 1681 writel(en ? 1 : 0, vector->addr); 1682 } 1683 1684 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1685 { 1686 enum hclgevf_evt_cause event_cause; 1687 struct hclgevf_dev *hdev = data; 1688 u32 clearval; 1689 1690 hclgevf_enable_vector(&hdev->misc_vector, false); 1691 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 1692 1693 switch (event_cause) { 1694 case HCLGEVF_VECTOR0_EVENT_RST: 1695 hclgevf_reset_task_schedule(hdev); 1696 break; 1697 case HCLGEVF_VECTOR0_EVENT_MBX: 1698 hclgevf_mbx_handler(hdev); 1699 break; 1700 default: 1701 break; 1702 } 1703 1704 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 1705 hclgevf_clear_event_cause(hdev, clearval); 1706 hclgevf_enable_vector(&hdev->misc_vector, true); 1707 } 1708 1709 return IRQ_HANDLED; 1710 } 1711 1712 static int hclgevf_configure(struct hclgevf_dev *hdev) 1713 { 1714 int ret; 1715 1716 hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE; 1717 1718 /* get queue configuration from PF */ 1719 ret = hclgevf_get_queue_info(hdev); 1720 if (ret) 1721 return ret; 1722 /* get tc configuration from PF */ 1723 return hclgevf_get_tc_info(hdev); 1724 } 1725 1726 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1727 { 1728 struct pci_dev *pdev = ae_dev->pdev; 1729 struct hclgevf_dev *hdev; 1730 1731 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1732 if (!hdev) 1733 return -ENOMEM; 1734 1735 hdev->pdev = pdev; 1736 hdev->ae_dev = ae_dev; 1737 ae_dev->priv = hdev; 1738 1739 return 0; 1740 } 1741 1742 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1743 { 1744 struct hnae3_handle *roce = &hdev->roce; 1745 struct hnae3_handle *nic = &hdev->nic; 1746 1747 roce->rinfo.num_vectors = hdev->num_roce_msix; 1748 1749 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1750 hdev->num_msi_left == 0) 1751 return -EINVAL; 1752 1753 roce->rinfo.base_vector = hdev->roce_base_vector; 1754 1755 roce->rinfo.netdev = nic->kinfo.netdev; 1756 roce->rinfo.roce_io_base = hdev->hw.io_base; 1757 1758 roce->pdev = nic->pdev; 1759 roce->ae_algo = nic->ae_algo; 1760 roce->numa_node_mask = nic->numa_node_mask; 1761 1762 return 0; 1763 } 1764 1765 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 1766 { 1767 struct hclgevf_cfg_gro_status_cmd *req; 1768 struct hclgevf_desc desc; 1769 int ret; 1770 1771 if (!hnae3_dev_gro_supported(hdev)) 1772 return 0; 1773 1774 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 1775 false); 1776 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 1777 1778 req->gro_en = cpu_to_le16(en ? 1 : 0); 1779 1780 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1781 if (ret) 1782 dev_err(&hdev->pdev->dev, 1783 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 1784 1785 return ret; 1786 } 1787 1788 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1789 { 1790 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1791 int i, ret; 1792 1793 rss_cfg->rss_size = hdev->rss_size_max; 1794 1795 if (hdev->pdev->revision >= 0x21) { 1796 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 1797 netdev_rss_key_fill(rss_cfg->rss_hash_key, 1798 HCLGEVF_RSS_KEY_SIZE); 1799 1800 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 1801 rss_cfg->rss_hash_key); 1802 if (ret) 1803 return ret; 1804 1805 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 1806 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1807 rss_cfg->rss_tuple_sets.ipv4_udp_en = 1808 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1809 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 1810 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1811 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 1812 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1813 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 1814 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1815 rss_cfg->rss_tuple_sets.ipv6_udp_en = 1816 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1817 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 1818 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1819 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 1820 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1821 1822 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 1823 if (ret) 1824 return ret; 1825 1826 } 1827 1828 /* Initialize RSS indirect table for each vport */ 1829 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1830 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1831 1832 ret = hclgevf_set_rss_indir_table(hdev); 1833 if (ret) 1834 return ret; 1835 1836 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1837 } 1838 1839 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1840 { 1841 /* other vlan config(like, VLAN TX/RX offload) would also be added 1842 * here later 1843 */ 1844 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1845 false); 1846 } 1847 1848 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 1849 { 1850 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1851 1852 if (enable) { 1853 mod_timer(&hdev->service_timer, jiffies + HZ); 1854 } else { 1855 del_timer_sync(&hdev->service_timer); 1856 cancel_work_sync(&hdev->service_task); 1857 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1858 } 1859 } 1860 1861 static int hclgevf_ae_start(struct hnae3_handle *handle) 1862 { 1863 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1864 1865 /* reset tqp stats */ 1866 hclgevf_reset_tqp_stats(handle); 1867 1868 hclgevf_request_link_info(hdev); 1869 1870 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1871 1872 return 0; 1873 } 1874 1875 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1876 { 1877 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1878 int i; 1879 1880 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1881 1882 for (i = 0; i < handle->kinfo.num_tqps; i++) 1883 hclgevf_reset_tqp(handle, i); 1884 1885 /* reset tqp stats */ 1886 hclgevf_reset_tqp_stats(handle); 1887 hclgevf_update_link_status(hdev, 0); 1888 } 1889 1890 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 1891 { 1892 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1893 u8 msg_data; 1894 1895 msg_data = alive ? 1 : 0; 1896 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE, 1897 0, &msg_data, 1, false, NULL, 0); 1898 } 1899 1900 static int hclgevf_client_start(struct hnae3_handle *handle) 1901 { 1902 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1903 1904 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 1905 return hclgevf_set_alive(handle, true); 1906 } 1907 1908 static void hclgevf_client_stop(struct hnae3_handle *handle) 1909 { 1910 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1911 int ret; 1912 1913 ret = hclgevf_set_alive(handle, false); 1914 if (ret) 1915 dev_warn(&hdev->pdev->dev, 1916 "%s failed %d\n", __func__, ret); 1917 1918 del_timer_sync(&hdev->keep_alive_timer); 1919 cancel_work_sync(&hdev->keep_alive_task); 1920 } 1921 1922 static void hclgevf_state_init(struct hclgevf_dev *hdev) 1923 { 1924 /* setup tasks for the MBX */ 1925 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1926 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1927 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1928 1929 /* setup tasks for service timer */ 1930 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1931 1932 INIT_WORK(&hdev->service_task, hclgevf_service_task); 1933 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1934 1935 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 1936 1937 mutex_init(&hdev->mbx_resp.mbx_mutex); 1938 1939 /* bring the device down */ 1940 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1941 } 1942 1943 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1944 { 1945 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1946 1947 if (hdev->service_timer.function) 1948 del_timer_sync(&hdev->service_timer); 1949 if (hdev->service_task.func) 1950 cancel_work_sync(&hdev->service_task); 1951 if (hdev->mbx_service_task.func) 1952 cancel_work_sync(&hdev->mbx_service_task); 1953 if (hdev->rst_service_task.func) 1954 cancel_work_sync(&hdev->rst_service_task); 1955 1956 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1957 } 1958 1959 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1960 { 1961 struct pci_dev *pdev = hdev->pdev; 1962 int vectors; 1963 int i; 1964 1965 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 1966 vectors = pci_alloc_irq_vectors(pdev, 1967 hdev->roce_base_msix_offset + 1, 1968 hdev->num_msi, 1969 PCI_IRQ_MSIX); 1970 else 1971 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1972 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1973 1974 if (vectors < 0) { 1975 dev_err(&pdev->dev, 1976 "failed(%d) to allocate MSI/MSI-X vectors\n", 1977 vectors); 1978 return vectors; 1979 } 1980 if (vectors < hdev->num_msi) 1981 dev_warn(&hdev->pdev->dev, 1982 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1983 hdev->num_msi, vectors); 1984 1985 hdev->num_msi = vectors; 1986 hdev->num_msi_left = vectors; 1987 hdev->base_msi_vector = pdev->irq; 1988 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 1989 1990 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1991 sizeof(u16), GFP_KERNEL); 1992 if (!hdev->vector_status) { 1993 pci_free_irq_vectors(pdev); 1994 return -ENOMEM; 1995 } 1996 1997 for (i = 0; i < hdev->num_msi; i++) 1998 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 1999 2000 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2001 sizeof(int), GFP_KERNEL); 2002 if (!hdev->vector_irq) { 2003 devm_kfree(&pdev->dev, hdev->vector_status); 2004 pci_free_irq_vectors(pdev); 2005 return -ENOMEM; 2006 } 2007 2008 return 0; 2009 } 2010 2011 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2012 { 2013 struct pci_dev *pdev = hdev->pdev; 2014 2015 devm_kfree(&pdev->dev, hdev->vector_status); 2016 devm_kfree(&pdev->dev, hdev->vector_irq); 2017 pci_free_irq_vectors(pdev); 2018 } 2019 2020 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2021 { 2022 int ret = 0; 2023 2024 hclgevf_get_misc_vector(hdev); 2025 2026 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2027 0, "hclgevf_cmd", hdev); 2028 if (ret) { 2029 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2030 hdev->misc_vector.vector_irq); 2031 return ret; 2032 } 2033 2034 hclgevf_clear_event_cause(hdev, 0); 2035 2036 /* enable misc. vector(vector 0) */ 2037 hclgevf_enable_vector(&hdev->misc_vector, true); 2038 2039 return ret; 2040 } 2041 2042 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2043 { 2044 /* disable misc vector(vector 0) */ 2045 hclgevf_enable_vector(&hdev->misc_vector, false); 2046 synchronize_irq(hdev->misc_vector.vector_irq); 2047 free_irq(hdev->misc_vector.vector_irq, hdev); 2048 hclgevf_free_vector(hdev, 0); 2049 } 2050 2051 static int hclgevf_init_client_instance(struct hnae3_client *client, 2052 struct hnae3_ae_dev *ae_dev) 2053 { 2054 struct hclgevf_dev *hdev = ae_dev->priv; 2055 int ret; 2056 2057 switch (client->type) { 2058 case HNAE3_CLIENT_KNIC: 2059 hdev->nic_client = client; 2060 hdev->nic.client = client; 2061 2062 ret = client->ops->init_instance(&hdev->nic); 2063 if (ret) 2064 goto clear_nic; 2065 2066 hnae3_set_client_init_flag(client, ae_dev, 1); 2067 2068 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 2069 struct hnae3_client *rc = hdev->roce_client; 2070 2071 ret = hclgevf_init_roce_base_info(hdev); 2072 if (ret) 2073 goto clear_roce; 2074 ret = rc->ops->init_instance(&hdev->roce); 2075 if (ret) 2076 goto clear_roce; 2077 2078 hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 2079 1); 2080 } 2081 break; 2082 case HNAE3_CLIENT_UNIC: 2083 hdev->nic_client = client; 2084 hdev->nic.client = client; 2085 2086 ret = client->ops->init_instance(&hdev->nic); 2087 if (ret) 2088 goto clear_nic; 2089 2090 hnae3_set_client_init_flag(client, ae_dev, 1); 2091 break; 2092 case HNAE3_CLIENT_ROCE: 2093 if (hnae3_dev_roce_supported(hdev)) { 2094 hdev->roce_client = client; 2095 hdev->roce.client = client; 2096 } 2097 2098 if (hdev->roce_client && hdev->nic_client) { 2099 ret = hclgevf_init_roce_base_info(hdev); 2100 if (ret) 2101 goto clear_roce; 2102 2103 ret = client->ops->init_instance(&hdev->roce); 2104 if (ret) 2105 goto clear_roce; 2106 } 2107 2108 hnae3_set_client_init_flag(client, ae_dev, 1); 2109 break; 2110 default: 2111 return -EINVAL; 2112 } 2113 2114 return 0; 2115 2116 clear_nic: 2117 hdev->nic_client = NULL; 2118 hdev->nic.client = NULL; 2119 return ret; 2120 clear_roce: 2121 hdev->roce_client = NULL; 2122 hdev->roce.client = NULL; 2123 return ret; 2124 } 2125 2126 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2127 struct hnae3_ae_dev *ae_dev) 2128 { 2129 struct hclgevf_dev *hdev = ae_dev->priv; 2130 2131 /* un-init roce, if it exists */ 2132 if (hdev->roce_client) { 2133 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2134 hdev->roce_client = NULL; 2135 hdev->roce.client = NULL; 2136 } 2137 2138 /* un-init nic/unic, if this was not called by roce client */ 2139 if (client->ops->uninit_instance && hdev->nic_client && 2140 client->type != HNAE3_CLIENT_ROCE) { 2141 client->ops->uninit_instance(&hdev->nic, 0); 2142 hdev->nic_client = NULL; 2143 hdev->nic.client = NULL; 2144 } 2145 } 2146 2147 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2148 { 2149 struct pci_dev *pdev = hdev->pdev; 2150 struct hclgevf_hw *hw; 2151 int ret; 2152 2153 ret = pci_enable_device(pdev); 2154 if (ret) { 2155 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2156 return ret; 2157 } 2158 2159 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2160 if (ret) { 2161 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2162 goto err_disable_device; 2163 } 2164 2165 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2166 if (ret) { 2167 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2168 goto err_disable_device; 2169 } 2170 2171 pci_set_master(pdev); 2172 hw = &hdev->hw; 2173 hw->hdev = hdev; 2174 hw->io_base = pci_iomap(pdev, 2, 0); 2175 if (!hw->io_base) { 2176 dev_err(&pdev->dev, "can't map configuration register space\n"); 2177 ret = -ENOMEM; 2178 goto err_clr_master; 2179 } 2180 2181 return 0; 2182 2183 err_clr_master: 2184 pci_clear_master(pdev); 2185 pci_release_regions(pdev); 2186 err_disable_device: 2187 pci_disable_device(pdev); 2188 2189 return ret; 2190 } 2191 2192 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2193 { 2194 struct pci_dev *pdev = hdev->pdev; 2195 2196 pci_iounmap(pdev, hdev->hw.io_base); 2197 pci_clear_master(pdev); 2198 pci_release_regions(pdev); 2199 pci_disable_device(pdev); 2200 } 2201 2202 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2203 { 2204 struct hclgevf_query_res_cmd *req; 2205 struct hclgevf_desc desc; 2206 int ret; 2207 2208 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2209 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2210 if (ret) { 2211 dev_err(&hdev->pdev->dev, 2212 "query vf resource failed, ret = %d.\n", ret); 2213 return ret; 2214 } 2215 2216 req = (struct hclgevf_query_res_cmd *)desc.data; 2217 2218 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 2219 hdev->roce_base_msix_offset = 2220 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 2221 HCLGEVF_MSIX_OFT_ROCEE_M, 2222 HCLGEVF_MSIX_OFT_ROCEE_S); 2223 hdev->num_roce_msix = 2224 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2225 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2226 2227 /* VF should have NIC vectors and Roce vectors, NIC vectors 2228 * are queued before Roce vectors. The offset is fixed to 64. 2229 */ 2230 hdev->num_msi = hdev->num_roce_msix + 2231 hdev->roce_base_msix_offset; 2232 } else { 2233 hdev->num_msi = 2234 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2235 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2236 } 2237 2238 return 0; 2239 } 2240 2241 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2242 { 2243 struct pci_dev *pdev = hdev->pdev; 2244 int ret = 0; 2245 2246 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2247 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2248 hclgevf_misc_irq_uninit(hdev); 2249 hclgevf_uninit_msi(hdev); 2250 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2251 } 2252 2253 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2254 pci_set_master(pdev); 2255 ret = hclgevf_init_msi(hdev); 2256 if (ret) { 2257 dev_err(&pdev->dev, 2258 "failed(%d) to init MSI/MSI-X\n", ret); 2259 return ret; 2260 } 2261 2262 ret = hclgevf_misc_irq_init(hdev); 2263 if (ret) { 2264 hclgevf_uninit_msi(hdev); 2265 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2266 ret); 2267 return ret; 2268 } 2269 2270 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2271 } 2272 2273 return ret; 2274 } 2275 2276 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2277 { 2278 struct pci_dev *pdev = hdev->pdev; 2279 int ret; 2280 2281 ret = hclgevf_pci_reset(hdev); 2282 if (ret) { 2283 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2284 return ret; 2285 } 2286 2287 ret = hclgevf_cmd_init(hdev); 2288 if (ret) { 2289 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2290 return ret; 2291 } 2292 2293 ret = hclgevf_rss_init_hw(hdev); 2294 if (ret) { 2295 dev_err(&hdev->pdev->dev, 2296 "failed(%d) to initialize RSS\n", ret); 2297 return ret; 2298 } 2299 2300 ret = hclgevf_config_gro(hdev, true); 2301 if (ret) 2302 return ret; 2303 2304 ret = hclgevf_init_vlan_config(hdev); 2305 if (ret) { 2306 dev_err(&hdev->pdev->dev, 2307 "failed(%d) to initialize VLAN config\n", ret); 2308 return ret; 2309 } 2310 2311 dev_info(&hdev->pdev->dev, "Reset done\n"); 2312 2313 return 0; 2314 } 2315 2316 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2317 { 2318 struct pci_dev *pdev = hdev->pdev; 2319 int ret; 2320 2321 ret = hclgevf_pci_init(hdev); 2322 if (ret) { 2323 dev_err(&pdev->dev, "PCI initialization failed\n"); 2324 return ret; 2325 } 2326 2327 ret = hclgevf_cmd_queue_init(hdev); 2328 if (ret) { 2329 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); 2330 goto err_cmd_queue_init; 2331 } 2332 2333 ret = hclgevf_cmd_init(hdev); 2334 if (ret) 2335 goto err_cmd_init; 2336 2337 /* Get vf resource */ 2338 ret = hclgevf_query_vf_resource(hdev); 2339 if (ret) { 2340 dev_err(&hdev->pdev->dev, 2341 "Query vf status error, ret = %d.\n", ret); 2342 goto err_cmd_init; 2343 } 2344 2345 ret = hclgevf_init_msi(hdev); 2346 if (ret) { 2347 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2348 goto err_cmd_init; 2349 } 2350 2351 hclgevf_state_init(hdev); 2352 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2353 2354 ret = hclgevf_misc_irq_init(hdev); 2355 if (ret) { 2356 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2357 ret); 2358 goto err_misc_irq_init; 2359 } 2360 2361 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2362 2363 ret = hclgevf_configure(hdev); 2364 if (ret) { 2365 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2366 goto err_config; 2367 } 2368 2369 ret = hclgevf_alloc_tqps(hdev); 2370 if (ret) { 2371 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2372 goto err_config; 2373 } 2374 2375 ret = hclgevf_set_handle_info(hdev); 2376 if (ret) { 2377 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2378 goto err_config; 2379 } 2380 2381 ret = hclgevf_config_gro(hdev, true); 2382 if (ret) 2383 goto err_config; 2384 2385 /* Initialize RSS for this VF */ 2386 ret = hclgevf_rss_init_hw(hdev); 2387 if (ret) { 2388 dev_err(&hdev->pdev->dev, 2389 "failed(%d) to initialize RSS\n", ret); 2390 goto err_config; 2391 } 2392 2393 ret = hclgevf_init_vlan_config(hdev); 2394 if (ret) { 2395 dev_err(&hdev->pdev->dev, 2396 "failed(%d) to initialize VLAN config\n", ret); 2397 goto err_config; 2398 } 2399 2400 hdev->last_reset_time = jiffies; 2401 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 2402 2403 return 0; 2404 2405 err_config: 2406 hclgevf_misc_irq_uninit(hdev); 2407 err_misc_irq_init: 2408 hclgevf_state_uninit(hdev); 2409 hclgevf_uninit_msi(hdev); 2410 err_cmd_init: 2411 hclgevf_cmd_uninit(hdev); 2412 err_cmd_queue_init: 2413 hclgevf_pci_uninit(hdev); 2414 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2415 return ret; 2416 } 2417 2418 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2419 { 2420 hclgevf_state_uninit(hdev); 2421 2422 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2423 hclgevf_misc_irq_uninit(hdev); 2424 hclgevf_uninit_msi(hdev); 2425 } 2426 2427 hclgevf_pci_uninit(hdev); 2428 hclgevf_cmd_uninit(hdev); 2429 } 2430 2431 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2432 { 2433 struct pci_dev *pdev = ae_dev->pdev; 2434 struct hclgevf_dev *hdev; 2435 int ret; 2436 2437 ret = hclgevf_alloc_hdev(ae_dev); 2438 if (ret) { 2439 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2440 return ret; 2441 } 2442 2443 ret = hclgevf_init_hdev(ae_dev->priv); 2444 if (ret) { 2445 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2446 return ret; 2447 } 2448 2449 hdev = ae_dev->priv; 2450 timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0); 2451 INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task); 2452 2453 return 0; 2454 } 2455 2456 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2457 { 2458 struct hclgevf_dev *hdev = ae_dev->priv; 2459 2460 hclgevf_uninit_hdev(hdev); 2461 ae_dev->priv = NULL; 2462 } 2463 2464 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2465 { 2466 struct hnae3_handle *nic = &hdev->nic; 2467 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2468 2469 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 2470 } 2471 2472 /** 2473 * hclgevf_get_channels - Get the current channels enabled and max supported. 2474 * @handle: hardware information for network interface 2475 * @ch: ethtool channels structure 2476 * 2477 * We don't support separate tx and rx queues as channels. The other count 2478 * represents how many queues are being used for control. max_combined counts 2479 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2480 * q_vectors since we support a lot more queue pairs than q_vectors. 2481 **/ 2482 static void hclgevf_get_channels(struct hnae3_handle *handle, 2483 struct ethtool_channels *ch) 2484 { 2485 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2486 2487 ch->max_combined = hclgevf_get_max_channels(hdev); 2488 ch->other_count = 0; 2489 ch->max_other = 0; 2490 ch->combined_count = hdev->num_tqps; 2491 } 2492 2493 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2494 u16 *alloc_tqps, u16 *max_rss_size) 2495 { 2496 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2497 2498 *alloc_tqps = hdev->num_tqps; 2499 *max_rss_size = hdev->rss_size_max; 2500 } 2501 2502 static int hclgevf_get_status(struct hnae3_handle *handle) 2503 { 2504 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2505 2506 return hdev->hw.mac.link; 2507 } 2508 2509 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2510 u8 *auto_neg, u32 *speed, 2511 u8 *duplex) 2512 { 2513 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2514 2515 if (speed) 2516 *speed = hdev->hw.mac.speed; 2517 if (duplex) 2518 *duplex = hdev->hw.mac.duplex; 2519 if (auto_neg) 2520 *auto_neg = AUTONEG_DISABLE; 2521 } 2522 2523 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 2524 u8 duplex) 2525 { 2526 hdev->hw.mac.speed = speed; 2527 hdev->hw.mac.duplex = duplex; 2528 } 2529 2530 static int hclgevf_gro_en(struct hnae3_handle *handle, int enable) 2531 { 2532 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2533 2534 return hclgevf_config_gro(hdev, enable); 2535 } 2536 2537 static void hclgevf_get_media_type(struct hnae3_handle *handle, 2538 u8 *media_type) 2539 { 2540 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2541 if (media_type) 2542 *media_type = hdev->hw.mac.media_type; 2543 } 2544 2545 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 2546 { 2547 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2548 2549 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2550 } 2551 2552 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 2553 { 2554 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2555 2556 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2557 } 2558 2559 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 2560 { 2561 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2562 2563 return hdev->reset_count; 2564 } 2565 2566 #define MAX_SEPARATE_NUM 4 2567 #define SEPARATOR_VALUE 0xFFFFFFFF 2568 #define REG_NUM_PER_LINE 4 2569 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 2570 2571 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 2572 { 2573 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 2574 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2575 2576 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 2577 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 2578 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 2579 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 2580 2581 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 2582 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 2583 } 2584 2585 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 2586 void *data) 2587 { 2588 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2589 int i, j, reg_um, separator_num; 2590 u32 *reg = data; 2591 2592 *version = hdev->fw_version; 2593 2594 /* fetching per-VF registers values from VF PCIe register space */ 2595 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 2596 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2597 for (i = 0; i < reg_um; i++) 2598 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 2599 for (i = 0; i < separator_num; i++) 2600 *reg++ = SEPARATOR_VALUE; 2601 2602 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 2603 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2604 for (i = 0; i < reg_um; i++) 2605 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 2606 for (i = 0; i < separator_num; i++) 2607 *reg++ = SEPARATOR_VALUE; 2608 2609 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 2610 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2611 for (j = 0; j < hdev->num_tqps; j++) { 2612 for (i = 0; i < reg_um; i++) 2613 *reg++ = hclgevf_read_dev(&hdev->hw, 2614 ring_reg_addr_list[i] + 2615 0x200 * j); 2616 for (i = 0; i < separator_num; i++) 2617 *reg++ = SEPARATOR_VALUE; 2618 } 2619 2620 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 2621 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2622 for (j = 0; j < hdev->num_msi_used - 1; j++) { 2623 for (i = 0; i < reg_um; i++) 2624 *reg++ = hclgevf_read_dev(&hdev->hw, 2625 tqp_intr_reg_addr_list[i] + 2626 4 * j); 2627 for (i = 0; i < separator_num; i++) 2628 *reg++ = SEPARATOR_VALUE; 2629 } 2630 } 2631 2632 static const struct hnae3_ae_ops hclgevf_ops = { 2633 .init_ae_dev = hclgevf_init_ae_dev, 2634 .uninit_ae_dev = hclgevf_uninit_ae_dev, 2635 .flr_prepare = hclgevf_flr_prepare, 2636 .flr_done = hclgevf_flr_done, 2637 .init_client_instance = hclgevf_init_client_instance, 2638 .uninit_client_instance = hclgevf_uninit_client_instance, 2639 .start = hclgevf_ae_start, 2640 .stop = hclgevf_ae_stop, 2641 .client_start = hclgevf_client_start, 2642 .client_stop = hclgevf_client_stop, 2643 .map_ring_to_vector = hclgevf_map_ring_to_vector, 2644 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2645 .get_vector = hclgevf_get_vector, 2646 .put_vector = hclgevf_put_vector, 2647 .reset_queue = hclgevf_reset_tqp, 2648 .set_promisc_mode = hclgevf_set_promisc_mode, 2649 .get_mac_addr = hclgevf_get_mac_addr, 2650 .set_mac_addr = hclgevf_set_mac_addr, 2651 .add_uc_addr = hclgevf_add_uc_addr, 2652 .rm_uc_addr = hclgevf_rm_uc_addr, 2653 .add_mc_addr = hclgevf_add_mc_addr, 2654 .rm_mc_addr = hclgevf_rm_mc_addr, 2655 .get_stats = hclgevf_get_stats, 2656 .update_stats = hclgevf_update_stats, 2657 .get_strings = hclgevf_get_strings, 2658 .get_sset_count = hclgevf_get_sset_count, 2659 .get_rss_key_size = hclgevf_get_rss_key_size, 2660 .get_rss_indir_size = hclgevf_get_rss_indir_size, 2661 .get_rss = hclgevf_get_rss, 2662 .set_rss = hclgevf_set_rss, 2663 .get_rss_tuple = hclgevf_get_rss_tuple, 2664 .set_rss_tuple = hclgevf_set_rss_tuple, 2665 .get_tc_size = hclgevf_get_tc_size, 2666 .get_fw_version = hclgevf_get_fw_version, 2667 .set_vlan_filter = hclgevf_set_vlan_filter, 2668 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 2669 .reset_event = hclgevf_reset_event, 2670 .set_default_reset_request = hclgevf_set_def_reset_request, 2671 .get_channels = hclgevf_get_channels, 2672 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2673 .get_regs_len = hclgevf_get_regs_len, 2674 .get_regs = hclgevf_get_regs, 2675 .get_status = hclgevf_get_status, 2676 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2677 .get_media_type = hclgevf_get_media_type, 2678 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 2679 .ae_dev_resetting = hclgevf_ae_dev_resetting, 2680 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 2681 .set_gro_en = hclgevf_gro_en, 2682 .set_mtu = hclgevf_set_mtu, 2683 .get_global_queue_id = hclgevf_get_qid_global, 2684 .set_timer_task = hclgevf_set_timer_task, 2685 }; 2686 2687 static struct hnae3_ae_algo ae_algovf = { 2688 .ops = &hclgevf_ops, 2689 .pdev_id_table = ae_algovf_pci_tbl, 2690 }; 2691 2692 static int hclgevf_init(void) 2693 { 2694 pr_info("%s is initializing\n", HCLGEVF_NAME); 2695 2696 hnae3_register_ae_algo(&ae_algovf); 2697 2698 return 0; 2699 } 2700 2701 static void hclgevf_exit(void) 2702 { 2703 hnae3_unregister_ae_algo(&ae_algovf); 2704 } 2705 module_init(hclgevf_init); 2706 module_exit(hclgevf_exit); 2707 2708 MODULE_LICENSE("GPL"); 2709 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2710 MODULE_DESCRIPTION("HCLGEVF Driver"); 2711 MODULE_VERSION(HCLGEVF_MOD_VERSION); 2712