1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 25 26 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 27 HCLGEVF_CMDQ_TX_ADDR_H_REG, 28 HCLGEVF_CMDQ_TX_DEPTH_REG, 29 HCLGEVF_CMDQ_TX_TAIL_REG, 30 HCLGEVF_CMDQ_TX_HEAD_REG, 31 HCLGEVF_CMDQ_RX_ADDR_L_REG, 32 HCLGEVF_CMDQ_RX_ADDR_H_REG, 33 HCLGEVF_CMDQ_RX_DEPTH_REG, 34 HCLGEVF_CMDQ_RX_TAIL_REG, 35 HCLGEVF_CMDQ_RX_HEAD_REG, 36 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 37 HCLGEVF_CMDQ_INTR_STS_REG, 38 HCLGEVF_CMDQ_INTR_EN_REG, 39 HCLGEVF_CMDQ_INTR_GEN_REG}; 40 41 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 42 HCLGEVF_RST_ING, 43 HCLGEVF_GRO_EN_REG}; 44 45 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 46 HCLGEVF_RING_RX_ADDR_H_REG, 47 HCLGEVF_RING_RX_BD_NUM_REG, 48 HCLGEVF_RING_RX_BD_LENGTH_REG, 49 HCLGEVF_RING_RX_MERGE_EN_REG, 50 HCLGEVF_RING_RX_TAIL_REG, 51 HCLGEVF_RING_RX_HEAD_REG, 52 HCLGEVF_RING_RX_FBD_NUM_REG, 53 HCLGEVF_RING_RX_OFFSET_REG, 54 HCLGEVF_RING_RX_FBD_OFFSET_REG, 55 HCLGEVF_RING_RX_STASH_REG, 56 HCLGEVF_RING_RX_BD_ERR_REG, 57 HCLGEVF_RING_TX_ADDR_L_REG, 58 HCLGEVF_RING_TX_ADDR_H_REG, 59 HCLGEVF_RING_TX_BD_NUM_REG, 60 HCLGEVF_RING_TX_PRIORITY_REG, 61 HCLGEVF_RING_TX_TC_REG, 62 HCLGEVF_RING_TX_MERGE_EN_REG, 63 HCLGEVF_RING_TX_TAIL_REG, 64 HCLGEVF_RING_TX_HEAD_REG, 65 HCLGEVF_RING_TX_FBD_NUM_REG, 66 HCLGEVF_RING_TX_OFFSET_REG, 67 HCLGEVF_RING_TX_EBD_NUM_REG, 68 HCLGEVF_RING_TX_EBD_OFFSET_REG, 69 HCLGEVF_RING_TX_BD_ERR_REG, 70 HCLGEVF_RING_EN_REG}; 71 72 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 73 HCLGEVF_TQP_INTR_GL0_REG, 74 HCLGEVF_TQP_INTR_GL1_REG, 75 HCLGEVF_TQP_INTR_GL2_REG, 76 HCLGEVF_TQP_INTR_RL_REG}; 77 78 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 79 struct hnae3_handle *handle) 80 { 81 return container_of(handle, struct hclgevf_dev, nic); 82 } 83 84 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 85 { 86 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 87 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 88 struct hclgevf_desc desc; 89 struct hclgevf_tqp *tqp; 90 int status; 91 int i; 92 93 for (i = 0; i < kinfo->num_tqps; i++) { 94 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 95 hclgevf_cmd_setup_basic_desc(&desc, 96 HCLGEVF_OPC_QUERY_RX_STATUS, 97 true); 98 99 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 100 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 101 if (status) { 102 dev_err(&hdev->pdev->dev, 103 "Query tqp stat fail, status = %d,queue = %d\n", 104 status, i); 105 return status; 106 } 107 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 108 le32_to_cpu(desc.data[1]); 109 110 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 111 true); 112 113 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 114 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 115 if (status) { 116 dev_err(&hdev->pdev->dev, 117 "Query tqp stat fail, status = %d,queue = %d\n", 118 status, i); 119 return status; 120 } 121 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 122 le32_to_cpu(desc.data[1]); 123 } 124 125 return 0; 126 } 127 128 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 129 { 130 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 131 struct hclgevf_tqp *tqp; 132 u64 *buff = data; 133 int i; 134 135 for (i = 0; i < kinfo->num_tqps; i++) { 136 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 137 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 138 } 139 for (i = 0; i < kinfo->num_tqps; i++) { 140 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 141 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 142 } 143 144 return buff; 145 } 146 147 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 148 { 149 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 150 151 return kinfo->num_tqps * 2; 152 } 153 154 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 155 { 156 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 157 u8 *buff = data; 158 int i = 0; 159 160 for (i = 0; i < kinfo->num_tqps; i++) { 161 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 162 struct hclgevf_tqp, q); 163 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 164 tqp->index); 165 buff += ETH_GSTRING_LEN; 166 } 167 168 for (i = 0; i < kinfo->num_tqps; i++) { 169 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 170 struct hclgevf_tqp, q); 171 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 172 tqp->index); 173 buff += ETH_GSTRING_LEN; 174 } 175 176 return buff; 177 } 178 179 static void hclgevf_update_stats(struct hnae3_handle *handle, 180 struct net_device_stats *net_stats) 181 { 182 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 183 int status; 184 185 status = hclgevf_tqps_update_stats(handle); 186 if (status) 187 dev_err(&hdev->pdev->dev, 188 "VF update of TQPS stats fail, status = %d.\n", 189 status); 190 } 191 192 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 193 { 194 if (strset == ETH_SS_TEST) 195 return -EOPNOTSUPP; 196 else if (strset == ETH_SS_STATS) 197 return hclgevf_tqps_get_sset_count(handle, strset); 198 199 return 0; 200 } 201 202 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 203 u8 *data) 204 { 205 u8 *p = (char *)data; 206 207 if (strset == ETH_SS_STATS) 208 p = hclgevf_tqps_get_strings(handle, p); 209 } 210 211 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 212 { 213 hclgevf_tqps_get_stats(handle, data); 214 } 215 216 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 217 { 218 u8 resp_msg; 219 int status; 220 221 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 222 true, &resp_msg, sizeof(u8)); 223 if (status) { 224 dev_err(&hdev->pdev->dev, 225 "VF request to get TC info from PF failed %d", 226 status); 227 return status; 228 } 229 230 hdev->hw_tc_map = resp_msg; 231 232 return 0; 233 } 234 235 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 236 { 237 #define HCLGEVF_TQPS_RSS_INFO_LEN 8 238 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 239 int status; 240 241 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 242 true, resp_msg, 243 HCLGEVF_TQPS_RSS_INFO_LEN); 244 if (status) { 245 dev_err(&hdev->pdev->dev, 246 "VF request to get tqp info from PF failed %d", 247 status); 248 return status; 249 } 250 251 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 252 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 253 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 254 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 255 256 return 0; 257 } 258 259 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 260 { 261 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 262 u8 msg_data[2], resp_data[2]; 263 u16 qid_in_pf = 0; 264 int ret; 265 266 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 267 268 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, 269 2, true, resp_data, 2); 270 if (!ret) 271 qid_in_pf = *(u16 *)resp_data; 272 273 return qid_in_pf; 274 } 275 276 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 277 { 278 struct hclgevf_tqp *tqp; 279 int i; 280 281 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 282 sizeof(struct hclgevf_tqp), GFP_KERNEL); 283 if (!hdev->htqp) 284 return -ENOMEM; 285 286 tqp = hdev->htqp; 287 288 for (i = 0; i < hdev->num_tqps; i++) { 289 tqp->dev = &hdev->pdev->dev; 290 tqp->index = i; 291 292 tqp->q.ae_algo = &ae_algovf; 293 tqp->q.buf_size = hdev->rx_buf_len; 294 tqp->q.desc_num = hdev->num_desc; 295 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 296 i * HCLGEVF_TQP_REG_SIZE; 297 298 tqp++; 299 } 300 301 return 0; 302 } 303 304 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 305 { 306 struct hnae3_handle *nic = &hdev->nic; 307 struct hnae3_knic_private_info *kinfo; 308 u16 new_tqps = hdev->num_tqps; 309 int i; 310 311 kinfo = &nic->kinfo; 312 kinfo->num_tc = 0; 313 kinfo->num_desc = hdev->num_desc; 314 kinfo->rx_buf_len = hdev->rx_buf_len; 315 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 316 if (hdev->hw_tc_map & BIT(i)) 317 kinfo->num_tc++; 318 319 kinfo->rss_size 320 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 321 new_tqps = kinfo->rss_size * kinfo->num_tc; 322 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 323 324 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 325 sizeof(struct hnae3_queue *), GFP_KERNEL); 326 if (!kinfo->tqp) 327 return -ENOMEM; 328 329 for (i = 0; i < kinfo->num_tqps; i++) { 330 hdev->htqp[i].q.handle = &hdev->nic; 331 hdev->htqp[i].q.tqp_index = i; 332 kinfo->tqp[i] = &hdev->htqp[i].q; 333 } 334 335 return 0; 336 } 337 338 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 339 { 340 int status; 341 u8 resp_msg; 342 343 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 344 0, false, &resp_msg, sizeof(u8)); 345 if (status) 346 dev_err(&hdev->pdev->dev, 347 "VF failed to fetch link status(%d) from PF", status); 348 } 349 350 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 351 { 352 struct hnae3_handle *rhandle = &hdev->roce; 353 struct hnae3_handle *handle = &hdev->nic; 354 struct hnae3_client *rclient; 355 struct hnae3_client *client; 356 357 client = handle->client; 358 rclient = hdev->roce_client; 359 360 link_state = 361 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 362 363 if (link_state != hdev->hw.mac.link) { 364 client->ops->link_status_change(handle, !!link_state); 365 if (rclient && rclient->ops->link_status_change) 366 rclient->ops->link_status_change(rhandle, !!link_state); 367 hdev->hw.mac.link = link_state; 368 } 369 } 370 371 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 372 { 373 struct hnae3_handle *nic = &hdev->nic; 374 int ret; 375 376 nic->ae_algo = &ae_algovf; 377 nic->pdev = hdev->pdev; 378 nic->numa_node_mask = hdev->numa_node_mask; 379 nic->flags |= HNAE3_SUPPORT_VF; 380 381 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 382 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 383 hdev->ae_dev->dev_type); 384 return -EINVAL; 385 } 386 387 ret = hclgevf_knic_setup(hdev); 388 if (ret) 389 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 390 ret); 391 return ret; 392 } 393 394 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 395 { 396 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 397 dev_warn(&hdev->pdev->dev, 398 "vector(vector_id %d) has been freed.\n", vector_id); 399 return; 400 } 401 402 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 403 hdev->num_msi_left += 1; 404 hdev->num_msi_used -= 1; 405 } 406 407 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 408 struct hnae3_vector_info *vector_info) 409 { 410 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 411 struct hnae3_vector_info *vector = vector_info; 412 int alloc = 0; 413 int i, j; 414 415 vector_num = min(hdev->num_msi_left, vector_num); 416 417 for (j = 0; j < vector_num; j++) { 418 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 419 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 420 vector->vector = pci_irq_vector(hdev->pdev, i); 421 vector->io_addr = hdev->hw.io_base + 422 HCLGEVF_VECTOR_REG_BASE + 423 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 424 hdev->vector_status[i] = 0; 425 hdev->vector_irq[i] = vector->vector; 426 427 vector++; 428 alloc++; 429 430 break; 431 } 432 } 433 } 434 hdev->num_msi_left -= alloc; 435 hdev->num_msi_used += alloc; 436 437 return alloc; 438 } 439 440 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 441 { 442 int i; 443 444 for (i = 0; i < hdev->num_msi; i++) 445 if (vector == hdev->vector_irq[i]) 446 return i; 447 448 return -EINVAL; 449 } 450 451 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 452 const u8 hfunc, const u8 *key) 453 { 454 struct hclgevf_rss_config_cmd *req; 455 struct hclgevf_desc desc; 456 int key_offset; 457 int key_size; 458 int ret; 459 460 req = (struct hclgevf_rss_config_cmd *)desc.data; 461 462 for (key_offset = 0; key_offset < 3; key_offset++) { 463 hclgevf_cmd_setup_basic_desc(&desc, 464 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 465 false); 466 467 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 468 req->hash_config |= 469 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 470 471 if (key_offset == 2) 472 key_size = 473 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 474 else 475 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 476 477 memcpy(req->hash_key, 478 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 479 480 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 481 if (ret) { 482 dev_err(&hdev->pdev->dev, 483 "Configure RSS config fail, status = %d\n", 484 ret); 485 return ret; 486 } 487 } 488 489 return 0; 490 } 491 492 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 493 { 494 return HCLGEVF_RSS_KEY_SIZE; 495 } 496 497 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 498 { 499 return HCLGEVF_RSS_IND_TBL_SIZE; 500 } 501 502 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 503 { 504 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 505 struct hclgevf_rss_indirection_table_cmd *req; 506 struct hclgevf_desc desc; 507 int status; 508 int i, j; 509 510 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 511 512 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 513 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 514 false); 515 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 516 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 517 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 518 req->rss_result[j] = 519 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 520 521 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 522 if (status) { 523 dev_err(&hdev->pdev->dev, 524 "VF failed(=%d) to set RSS indirection table\n", 525 status); 526 return status; 527 } 528 } 529 530 return 0; 531 } 532 533 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 534 { 535 struct hclgevf_rss_tc_mode_cmd *req; 536 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 537 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 538 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 539 struct hclgevf_desc desc; 540 u16 roundup_size; 541 int status; 542 int i; 543 544 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 545 546 roundup_size = roundup_pow_of_two(rss_size); 547 roundup_size = ilog2(roundup_size); 548 549 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 550 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 551 tc_size[i] = roundup_size; 552 tc_offset[i] = rss_size * i; 553 } 554 555 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 556 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 557 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 558 (tc_valid[i] & 0x1)); 559 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 560 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 561 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 562 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 563 } 564 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 565 if (status) 566 dev_err(&hdev->pdev->dev, 567 "VF failed(=%d) to set rss tc mode\n", status); 568 569 return status; 570 } 571 572 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 573 u8 *hfunc) 574 { 575 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 576 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 577 int i; 578 579 if (handle->pdev->revision >= 0x21) { 580 /* Get hash algorithm */ 581 if (hfunc) { 582 switch (rss_cfg->hash_algo) { 583 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 584 *hfunc = ETH_RSS_HASH_TOP; 585 break; 586 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 587 *hfunc = ETH_RSS_HASH_XOR; 588 break; 589 default: 590 *hfunc = ETH_RSS_HASH_UNKNOWN; 591 break; 592 } 593 } 594 595 /* Get the RSS Key required by the user */ 596 if (key) 597 memcpy(key, rss_cfg->rss_hash_key, 598 HCLGEVF_RSS_KEY_SIZE); 599 } 600 601 if (indir) 602 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 603 indir[i] = rss_cfg->rss_indirection_tbl[i]; 604 605 return 0; 606 } 607 608 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 609 const u8 *key, const u8 hfunc) 610 { 611 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 612 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 613 int ret, i; 614 615 if (handle->pdev->revision >= 0x21) { 616 /* Set the RSS Hash Key if specififed by the user */ 617 if (key) { 618 switch (hfunc) { 619 case ETH_RSS_HASH_TOP: 620 rss_cfg->hash_algo = 621 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 622 break; 623 case ETH_RSS_HASH_XOR: 624 rss_cfg->hash_algo = 625 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 626 break; 627 case ETH_RSS_HASH_NO_CHANGE: 628 break; 629 default: 630 return -EINVAL; 631 } 632 633 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 634 key); 635 if (ret) 636 return ret; 637 638 /* Update the shadow RSS key with user specified qids */ 639 memcpy(rss_cfg->rss_hash_key, key, 640 HCLGEVF_RSS_KEY_SIZE); 641 } 642 } 643 644 /* update the shadow RSS table with user specified qids */ 645 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 646 rss_cfg->rss_indirection_tbl[i] = indir[i]; 647 648 /* update the hardware */ 649 return hclgevf_set_rss_indir_table(hdev); 650 } 651 652 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 653 { 654 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 655 656 if (nfc->data & RXH_L4_B_2_3) 657 hash_sets |= HCLGEVF_D_PORT_BIT; 658 else 659 hash_sets &= ~HCLGEVF_D_PORT_BIT; 660 661 if (nfc->data & RXH_IP_SRC) 662 hash_sets |= HCLGEVF_S_IP_BIT; 663 else 664 hash_sets &= ~HCLGEVF_S_IP_BIT; 665 666 if (nfc->data & RXH_IP_DST) 667 hash_sets |= HCLGEVF_D_IP_BIT; 668 else 669 hash_sets &= ~HCLGEVF_D_IP_BIT; 670 671 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 672 hash_sets |= HCLGEVF_V_TAG_BIT; 673 674 return hash_sets; 675 } 676 677 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 678 struct ethtool_rxnfc *nfc) 679 { 680 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 681 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 682 struct hclgevf_rss_input_tuple_cmd *req; 683 struct hclgevf_desc desc; 684 u8 tuple_sets; 685 int ret; 686 687 if (handle->pdev->revision == 0x20) 688 return -EOPNOTSUPP; 689 690 if (nfc->data & 691 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 692 return -EINVAL; 693 694 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 695 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 696 697 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 698 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 699 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 700 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 701 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 702 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 703 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 704 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 705 706 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 707 switch (nfc->flow_type) { 708 case TCP_V4_FLOW: 709 req->ipv4_tcp_en = tuple_sets; 710 break; 711 case TCP_V6_FLOW: 712 req->ipv6_tcp_en = tuple_sets; 713 break; 714 case UDP_V4_FLOW: 715 req->ipv4_udp_en = tuple_sets; 716 break; 717 case UDP_V6_FLOW: 718 req->ipv6_udp_en = tuple_sets; 719 break; 720 case SCTP_V4_FLOW: 721 req->ipv4_sctp_en = tuple_sets; 722 break; 723 case SCTP_V6_FLOW: 724 if ((nfc->data & RXH_L4_B_0_1) || 725 (nfc->data & RXH_L4_B_2_3)) 726 return -EINVAL; 727 728 req->ipv6_sctp_en = tuple_sets; 729 break; 730 case IPV4_FLOW: 731 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 732 break; 733 case IPV6_FLOW: 734 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 735 break; 736 default: 737 return -EINVAL; 738 } 739 740 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 741 if (ret) { 742 dev_err(&hdev->pdev->dev, 743 "Set rss tuple fail, status = %d\n", ret); 744 return ret; 745 } 746 747 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 748 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 749 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 750 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 751 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 752 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 753 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 754 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 755 return 0; 756 } 757 758 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 759 struct ethtool_rxnfc *nfc) 760 { 761 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 762 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 763 u8 tuple_sets; 764 765 if (handle->pdev->revision == 0x20) 766 return -EOPNOTSUPP; 767 768 nfc->data = 0; 769 770 switch (nfc->flow_type) { 771 case TCP_V4_FLOW: 772 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 773 break; 774 case UDP_V4_FLOW: 775 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 776 break; 777 case TCP_V6_FLOW: 778 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 779 break; 780 case UDP_V6_FLOW: 781 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 782 break; 783 case SCTP_V4_FLOW: 784 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 785 break; 786 case SCTP_V6_FLOW: 787 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 788 break; 789 case IPV4_FLOW: 790 case IPV6_FLOW: 791 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 792 break; 793 default: 794 return -EINVAL; 795 } 796 797 if (!tuple_sets) 798 return 0; 799 800 if (tuple_sets & HCLGEVF_D_PORT_BIT) 801 nfc->data |= RXH_L4_B_2_3; 802 if (tuple_sets & HCLGEVF_S_PORT_BIT) 803 nfc->data |= RXH_L4_B_0_1; 804 if (tuple_sets & HCLGEVF_D_IP_BIT) 805 nfc->data |= RXH_IP_DST; 806 if (tuple_sets & HCLGEVF_S_IP_BIT) 807 nfc->data |= RXH_IP_SRC; 808 809 return 0; 810 } 811 812 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 813 struct hclgevf_rss_cfg *rss_cfg) 814 { 815 struct hclgevf_rss_input_tuple_cmd *req; 816 struct hclgevf_desc desc; 817 int ret; 818 819 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 820 821 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 822 823 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 824 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 825 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 826 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 827 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 828 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 829 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 830 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 831 832 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 833 if (ret) 834 dev_err(&hdev->pdev->dev, 835 "Configure rss input fail, status = %d\n", ret); 836 return ret; 837 } 838 839 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 840 { 841 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 842 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 843 844 return rss_cfg->rss_size; 845 } 846 847 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 848 int vector_id, 849 struct hnae3_ring_chain_node *ring_chain) 850 { 851 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 852 struct hnae3_ring_chain_node *node; 853 struct hclge_mbx_vf_to_pf_cmd *req; 854 struct hclgevf_desc desc; 855 int i = 0; 856 int status; 857 u8 type; 858 859 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 860 861 for (node = ring_chain; node; node = node->next) { 862 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 863 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 864 865 if (i == 0) { 866 hclgevf_cmd_setup_basic_desc(&desc, 867 HCLGEVF_OPC_MBX_VF_TO_PF, 868 false); 869 type = en ? 870 HCLGE_MBX_MAP_RING_TO_VECTOR : 871 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 872 req->msg[0] = type; 873 req->msg[1] = vector_id; 874 } 875 876 req->msg[idx_offset] = 877 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 878 req->msg[idx_offset + 1] = node->tqp_index; 879 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 880 HNAE3_RING_GL_IDX_M, 881 HNAE3_RING_GL_IDX_S); 882 883 i++; 884 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 885 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 886 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 887 !node->next) { 888 req->msg[2] = i; 889 890 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 891 if (status) { 892 dev_err(&hdev->pdev->dev, 893 "Map TQP fail, status is %d.\n", 894 status); 895 return status; 896 } 897 i = 0; 898 hclgevf_cmd_setup_basic_desc(&desc, 899 HCLGEVF_OPC_MBX_VF_TO_PF, 900 false); 901 req->msg[0] = type; 902 req->msg[1] = vector_id; 903 } 904 } 905 906 return 0; 907 } 908 909 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 910 struct hnae3_ring_chain_node *ring_chain) 911 { 912 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 913 int vector_id; 914 915 vector_id = hclgevf_get_vector_index(hdev, vector); 916 if (vector_id < 0) { 917 dev_err(&handle->pdev->dev, 918 "Get vector index fail. ret =%d\n", vector_id); 919 return vector_id; 920 } 921 922 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 923 } 924 925 static int hclgevf_unmap_ring_from_vector( 926 struct hnae3_handle *handle, 927 int vector, 928 struct hnae3_ring_chain_node *ring_chain) 929 { 930 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 931 int ret, vector_id; 932 933 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 934 return 0; 935 936 vector_id = hclgevf_get_vector_index(hdev, vector); 937 if (vector_id < 0) { 938 dev_err(&handle->pdev->dev, 939 "Get vector index fail. ret =%d\n", vector_id); 940 return vector_id; 941 } 942 943 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 944 if (ret) 945 dev_err(&handle->pdev->dev, 946 "Unmap ring from vector fail. vector=%d, ret =%d\n", 947 vector_id, 948 ret); 949 950 return ret; 951 } 952 953 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 954 { 955 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 956 int vector_id; 957 958 vector_id = hclgevf_get_vector_index(hdev, vector); 959 if (vector_id < 0) { 960 dev_err(&handle->pdev->dev, 961 "hclgevf_put_vector get vector index fail. ret =%d\n", 962 vector_id); 963 return vector_id; 964 } 965 966 hclgevf_free_vector(hdev, vector_id); 967 968 return 0; 969 } 970 971 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 972 bool en_bc_pmc) 973 { 974 struct hclge_mbx_vf_to_pf_cmd *req; 975 struct hclgevf_desc desc; 976 int ret; 977 978 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 979 980 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 981 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 982 req->msg[1] = en_bc_pmc ? 1 : 0; 983 984 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 985 if (ret) 986 dev_err(&hdev->pdev->dev, 987 "Set promisc mode fail, status is %d.\n", ret); 988 989 return ret; 990 } 991 992 static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc) 993 { 994 return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc); 995 } 996 997 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 998 int stream_id, bool enable) 999 { 1000 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1001 struct hclgevf_desc desc; 1002 int status; 1003 1004 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1005 1006 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1007 false); 1008 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1009 req->stream_id = cpu_to_le16(stream_id); 1010 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 1011 1012 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1013 if (status) 1014 dev_err(&hdev->pdev->dev, 1015 "TQP enable fail, status =%d.\n", status); 1016 1017 return status; 1018 } 1019 1020 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1021 { 1022 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1023 struct hclgevf_tqp *tqp; 1024 int i; 1025 1026 for (i = 0; i < kinfo->num_tqps; i++) { 1027 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1028 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1029 } 1030 } 1031 1032 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1033 { 1034 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1035 1036 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1037 } 1038 1039 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1040 bool is_first) 1041 { 1042 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1043 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1044 u8 *new_mac_addr = (u8 *)p; 1045 u8 msg_data[ETH_ALEN * 2]; 1046 u16 subcode; 1047 int status; 1048 1049 ether_addr_copy(msg_data, new_mac_addr); 1050 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 1051 1052 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 1053 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1054 1055 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1056 subcode, msg_data, ETH_ALEN * 2, 1057 true, NULL, 0); 1058 if (!status) 1059 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1060 1061 return status; 1062 } 1063 1064 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1065 const unsigned char *addr) 1066 { 1067 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1068 1069 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1070 HCLGE_MBX_MAC_VLAN_UC_ADD, 1071 addr, ETH_ALEN, false, NULL, 0); 1072 } 1073 1074 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1075 const unsigned char *addr) 1076 { 1077 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1078 1079 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1080 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1081 addr, ETH_ALEN, false, NULL, 0); 1082 } 1083 1084 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1085 const unsigned char *addr) 1086 { 1087 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1088 1089 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1090 HCLGE_MBX_MAC_VLAN_MC_ADD, 1091 addr, ETH_ALEN, false, NULL, 0); 1092 } 1093 1094 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1095 const unsigned char *addr) 1096 { 1097 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1098 1099 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1100 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1101 addr, ETH_ALEN, false, NULL, 0); 1102 } 1103 1104 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1105 __be16 proto, u16 vlan_id, 1106 bool is_kill) 1107 { 1108 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1109 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1110 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1111 1112 if (vlan_id > 4095) 1113 return -EINVAL; 1114 1115 if (proto != htons(ETH_P_8021Q)) 1116 return -EPROTONOSUPPORT; 1117 1118 msg_data[0] = is_kill; 1119 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1120 memcpy(&msg_data[3], &proto, sizeof(proto)); 1121 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1122 HCLGE_MBX_VLAN_FILTER, msg_data, 1123 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 1124 } 1125 1126 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1127 { 1128 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1129 u8 msg_data; 1130 1131 msg_data = enable ? 1 : 0; 1132 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1133 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1134 1, false, NULL, 0); 1135 } 1136 1137 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1138 { 1139 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1140 u8 msg_data[2]; 1141 int ret; 1142 1143 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 1144 1145 /* disable vf queue before send queue reset msg to PF */ 1146 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1147 if (ret) 1148 return ret; 1149 1150 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1151 2, true, NULL, 0); 1152 } 1153 1154 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1155 { 1156 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1157 1158 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu, 1159 sizeof(new_mtu), true, NULL, 0); 1160 } 1161 1162 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1163 enum hnae3_reset_notify_type type) 1164 { 1165 struct hnae3_client *client = hdev->nic_client; 1166 struct hnae3_handle *handle = &hdev->nic; 1167 int ret; 1168 1169 if (!client->ops->reset_notify) 1170 return -EOPNOTSUPP; 1171 1172 ret = client->ops->reset_notify(handle, type); 1173 if (ret) 1174 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1175 type, ret); 1176 1177 return ret; 1178 } 1179 1180 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 1181 { 1182 struct hclgevf_dev *hdev = ae_dev->priv; 1183 1184 set_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1185 } 1186 1187 static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev, 1188 unsigned long delay_us, 1189 unsigned long wait_cnt) 1190 { 1191 unsigned long cnt = 0; 1192 1193 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && 1194 cnt++ < wait_cnt) 1195 usleep_range(delay_us, delay_us * 2); 1196 1197 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { 1198 dev_err(&hdev->pdev->dev, 1199 "flr wait timeout\n"); 1200 return -ETIMEDOUT; 1201 } 1202 1203 return 0; 1204 } 1205 1206 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1207 { 1208 #define HCLGEVF_RESET_WAIT_US 20000 1209 #define HCLGEVF_RESET_WAIT_CNT 2000 1210 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1211 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1212 1213 u32 val; 1214 int ret; 1215 1216 /* wait to check the hardware reset completion status */ 1217 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1218 dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val); 1219 1220 if (hdev->reset_type == HNAE3_FLR_RESET) 1221 return hclgevf_flr_poll_timeout(hdev, 1222 HCLGEVF_RESET_WAIT_US, 1223 HCLGEVF_RESET_WAIT_CNT); 1224 1225 ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val, 1226 !(val & HCLGEVF_RST_ING_BITS), 1227 HCLGEVF_RESET_WAIT_US, 1228 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1229 1230 /* hardware completion status should be available by this time */ 1231 if (ret) { 1232 dev_err(&hdev->pdev->dev, 1233 "could'nt get reset done status from h/w, timeout!\n"); 1234 return ret; 1235 } 1236 1237 /* we will wait a bit more to let reset of the stack to complete. This 1238 * might happen in case reset assertion was made by PF. Yes, this also 1239 * means we might end up waiting bit more even for VF reset. 1240 */ 1241 msleep(5000); 1242 1243 return 0; 1244 } 1245 1246 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1247 { 1248 int ret; 1249 1250 /* uninitialize the nic client */ 1251 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1252 if (ret) 1253 return ret; 1254 1255 /* re-initialize the hclge device */ 1256 ret = hclgevf_reset_hdev(hdev); 1257 if (ret) { 1258 dev_err(&hdev->pdev->dev, 1259 "hclge device re-init failed, VF is disabled!\n"); 1260 return ret; 1261 } 1262 1263 /* bring up the nic client again */ 1264 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1265 if (ret) 1266 return ret; 1267 1268 return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); 1269 } 1270 1271 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1272 { 1273 int ret = 0; 1274 1275 switch (hdev->reset_type) { 1276 case HNAE3_VF_FUNC_RESET: 1277 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1278 0, true, NULL, sizeof(u8)); 1279 break; 1280 case HNAE3_FLR_RESET: 1281 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1282 break; 1283 default: 1284 break; 1285 } 1286 1287 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1288 1289 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1290 hdev->reset_type, ret); 1291 1292 return ret; 1293 } 1294 1295 static int hclgevf_reset(struct hclgevf_dev *hdev) 1296 { 1297 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1298 int ret; 1299 1300 /* Initialize ae_dev reset status as well, in case enet layer wants to 1301 * know if device is undergoing reset 1302 */ 1303 ae_dev->reset_type = hdev->reset_type; 1304 hdev->reset_count++; 1305 rtnl_lock(); 1306 1307 /* bring down the nic to stop any ongoing TX/RX */ 1308 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1309 if (ret) 1310 goto err_reset_lock; 1311 1312 rtnl_unlock(); 1313 1314 ret = hclgevf_reset_prepare_wait(hdev); 1315 if (ret) 1316 goto err_reset; 1317 1318 /* check if VF could successfully fetch the hardware reset completion 1319 * status from the hardware 1320 */ 1321 ret = hclgevf_reset_wait(hdev); 1322 if (ret) { 1323 /* can't do much in this situation, will disable VF */ 1324 dev_err(&hdev->pdev->dev, 1325 "VF failed(=%d) to fetch H/W reset completion status\n", 1326 ret); 1327 goto err_reset; 1328 } 1329 1330 rtnl_lock(); 1331 1332 /* now, re-initialize the nic client and ae device*/ 1333 ret = hclgevf_reset_stack(hdev); 1334 if (ret) { 1335 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1336 goto err_reset_lock; 1337 } 1338 1339 /* bring up the nic to enable TX/RX again */ 1340 ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1341 if (ret) 1342 goto err_reset_lock; 1343 1344 rtnl_unlock(); 1345 1346 hdev->last_reset_time = jiffies; 1347 ae_dev->reset_type = HNAE3_NONE_RESET; 1348 1349 return ret; 1350 err_reset_lock: 1351 rtnl_unlock(); 1352 err_reset: 1353 /* When VF reset failed, only the higher level reset asserted by PF 1354 * can restore it, so re-initialize the command queue to receive 1355 * this higher reset event. 1356 */ 1357 hclgevf_cmd_init(hdev); 1358 dev_err(&hdev->pdev->dev, "failed to reset VF\n"); 1359 1360 return ret; 1361 } 1362 1363 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1364 unsigned long *addr) 1365 { 1366 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1367 1368 /* return the highest priority reset level amongst all */ 1369 if (test_bit(HNAE3_VF_RESET, addr)) { 1370 rst_level = HNAE3_VF_RESET; 1371 clear_bit(HNAE3_VF_RESET, addr); 1372 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1373 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1374 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1375 rst_level = HNAE3_VF_FULL_RESET; 1376 clear_bit(HNAE3_VF_FULL_RESET, addr); 1377 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1378 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1379 rst_level = HNAE3_VF_PF_FUNC_RESET; 1380 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1381 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1382 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1383 rst_level = HNAE3_VF_FUNC_RESET; 1384 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1385 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1386 rst_level = HNAE3_FLR_RESET; 1387 clear_bit(HNAE3_FLR_RESET, addr); 1388 } 1389 1390 return rst_level; 1391 } 1392 1393 static void hclgevf_reset_event(struct pci_dev *pdev, 1394 struct hnae3_handle *handle) 1395 { 1396 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1397 struct hclgevf_dev *hdev = ae_dev->priv; 1398 1399 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1400 1401 if (hdev->default_reset_request) 1402 hdev->reset_level = 1403 hclgevf_get_reset_level(hdev, 1404 &hdev->default_reset_request); 1405 else 1406 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1407 1408 /* reset of this VF requested */ 1409 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1410 hclgevf_reset_task_schedule(hdev); 1411 1412 hdev->last_reset_time = jiffies; 1413 } 1414 1415 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1416 enum hnae3_reset_type rst_type) 1417 { 1418 struct hclgevf_dev *hdev = ae_dev->priv; 1419 1420 set_bit(rst_type, &hdev->default_reset_request); 1421 } 1422 1423 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1424 { 1425 #define HCLGEVF_FLR_WAIT_MS 100 1426 #define HCLGEVF_FLR_WAIT_CNT 50 1427 struct hclgevf_dev *hdev = ae_dev->priv; 1428 int cnt = 0; 1429 1430 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1431 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1432 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); 1433 hclgevf_reset_event(hdev->pdev, NULL); 1434 1435 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && 1436 cnt++ < HCLGEVF_FLR_WAIT_CNT) 1437 msleep(HCLGEVF_FLR_WAIT_MS); 1438 1439 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) 1440 dev_err(&hdev->pdev->dev, 1441 "flr wait down timeout: %d\n", cnt); 1442 } 1443 1444 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1445 { 1446 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1447 1448 return hdev->fw_version; 1449 } 1450 1451 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1452 { 1453 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1454 1455 vector->vector_irq = pci_irq_vector(hdev->pdev, 1456 HCLGEVF_MISC_VECTOR_NUM); 1457 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1458 /* vector status always valid for Vector 0 */ 1459 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1460 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1461 1462 hdev->num_msi_left -= 1; 1463 hdev->num_msi_used += 1; 1464 } 1465 1466 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1467 { 1468 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1469 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1470 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1471 schedule_work(&hdev->rst_service_task); 1472 } 1473 } 1474 1475 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1476 { 1477 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1478 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1479 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1480 schedule_work(&hdev->mbx_service_task); 1481 } 1482 } 1483 1484 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1485 { 1486 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1487 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1488 schedule_work(&hdev->service_task); 1489 } 1490 1491 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1492 { 1493 /* if we have any pending mailbox event then schedule the mbx task */ 1494 if (hdev->mbx_event_pending) 1495 hclgevf_mbx_task_schedule(hdev); 1496 1497 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1498 hclgevf_reset_task_schedule(hdev); 1499 } 1500 1501 static void hclgevf_service_timer(struct timer_list *t) 1502 { 1503 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1504 1505 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1506 1507 hclgevf_task_schedule(hdev); 1508 } 1509 1510 static void hclgevf_reset_service_task(struct work_struct *work) 1511 { 1512 struct hclgevf_dev *hdev = 1513 container_of(work, struct hclgevf_dev, rst_service_task); 1514 int ret; 1515 1516 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1517 return; 1518 1519 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1520 1521 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1522 &hdev->reset_state)) { 1523 /* PF has initmated that it is about to reset the hardware. 1524 * We now have to poll & check if harware has actually completed 1525 * the reset sequence. On hardware reset completion, VF needs to 1526 * reset the client and ae device. 1527 */ 1528 hdev->reset_attempts = 0; 1529 1530 hdev->last_reset_time = jiffies; 1531 while ((hdev->reset_type = 1532 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 1533 != HNAE3_NONE_RESET) { 1534 ret = hclgevf_reset(hdev); 1535 if (ret) 1536 dev_err(&hdev->pdev->dev, 1537 "VF stack reset failed %d.\n", ret); 1538 } 1539 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1540 &hdev->reset_state)) { 1541 /* we could be here when either of below happens: 1542 * 1. reset was initiated due to watchdog timeout due to 1543 * a. IMP was earlier reset and our TX got choked down and 1544 * which resulted in watchdog reacting and inducing VF 1545 * reset. This also means our cmdq would be unreliable. 1546 * b. problem in TX due to other lower layer(example link 1547 * layer not functioning properly etc.) 1548 * 2. VF reset might have been initiated due to some config 1549 * change. 1550 * 1551 * NOTE: Theres no clear way to detect above cases than to react 1552 * to the response of PF for this reset request. PF will ack the 1553 * 1b and 2. cases but we will not get any intimation about 1a 1554 * from PF as cmdq would be in unreliable state i.e. mailbox 1555 * communication between PF and VF would be broken. 1556 */ 1557 1558 /* if we are never geting into pending state it means either: 1559 * 1. PF is not receiving our request which could be due to IMP 1560 * reset 1561 * 2. PF is screwed 1562 * We cannot do much for 2. but to check first we can try reset 1563 * our PCIe + stack and see if it alleviates the problem. 1564 */ 1565 if (hdev->reset_attempts > 3) { 1566 /* prepare for full reset of stack + pcie interface */ 1567 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1568 1569 /* "defer" schedule the reset task again */ 1570 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1571 } else { 1572 hdev->reset_attempts++; 1573 1574 set_bit(hdev->reset_level, &hdev->reset_pending); 1575 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1576 } 1577 hclgevf_reset_task_schedule(hdev); 1578 } 1579 1580 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1581 } 1582 1583 static void hclgevf_mailbox_service_task(struct work_struct *work) 1584 { 1585 struct hclgevf_dev *hdev; 1586 1587 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1588 1589 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1590 return; 1591 1592 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1593 1594 hclgevf_mbx_async_handler(hdev); 1595 1596 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1597 } 1598 1599 static void hclgevf_keep_alive_timer(struct timer_list *t) 1600 { 1601 struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); 1602 1603 schedule_work(&hdev->keep_alive_task); 1604 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 1605 } 1606 1607 static void hclgevf_keep_alive_task(struct work_struct *work) 1608 { 1609 struct hclgevf_dev *hdev; 1610 u8 respmsg; 1611 int ret; 1612 1613 hdev = container_of(work, struct hclgevf_dev, keep_alive_task); 1614 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, 1615 0, false, &respmsg, sizeof(u8)); 1616 if (ret) 1617 dev_err(&hdev->pdev->dev, 1618 "VF sends keep alive cmd failed(=%d)\n", ret); 1619 } 1620 1621 static void hclgevf_service_task(struct work_struct *work) 1622 { 1623 struct hclgevf_dev *hdev; 1624 1625 hdev = container_of(work, struct hclgevf_dev, service_task); 1626 1627 /* request the link status from the PF. PF would be able to tell VF 1628 * about such updates in future so we might remove this later 1629 */ 1630 hclgevf_request_link_info(hdev); 1631 1632 hclgevf_deferred_task_schedule(hdev); 1633 1634 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1635 } 1636 1637 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1638 { 1639 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1640 } 1641 1642 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1643 u32 *clearval) 1644 { 1645 u32 cmdq_src_reg, rst_ing_reg; 1646 1647 /* fetch the events from their corresponding regs */ 1648 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1649 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1650 1651 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) { 1652 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1653 dev_info(&hdev->pdev->dev, 1654 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1655 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1656 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1657 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1658 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); 1659 *clearval = cmdq_src_reg; 1660 return HCLGEVF_VECTOR0_EVENT_RST; 1661 } 1662 1663 /* check for vector0 mailbox(=CMDQ RX) event source */ 1664 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1665 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1666 *clearval = cmdq_src_reg; 1667 return HCLGEVF_VECTOR0_EVENT_MBX; 1668 } 1669 1670 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1671 1672 return HCLGEVF_VECTOR0_EVENT_OTHER; 1673 } 1674 1675 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1676 { 1677 writel(en ? 1 : 0, vector->addr); 1678 } 1679 1680 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1681 { 1682 enum hclgevf_evt_cause event_cause; 1683 struct hclgevf_dev *hdev = data; 1684 u32 clearval; 1685 1686 hclgevf_enable_vector(&hdev->misc_vector, false); 1687 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 1688 1689 switch (event_cause) { 1690 case HCLGEVF_VECTOR0_EVENT_RST: 1691 hclgevf_reset_task_schedule(hdev); 1692 break; 1693 case HCLGEVF_VECTOR0_EVENT_MBX: 1694 hclgevf_mbx_handler(hdev); 1695 break; 1696 default: 1697 break; 1698 } 1699 1700 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 1701 hclgevf_clear_event_cause(hdev, clearval); 1702 hclgevf_enable_vector(&hdev->misc_vector, true); 1703 } 1704 1705 return IRQ_HANDLED; 1706 } 1707 1708 static int hclgevf_configure(struct hclgevf_dev *hdev) 1709 { 1710 int ret; 1711 1712 hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE; 1713 1714 /* get queue configuration from PF */ 1715 ret = hclgevf_get_queue_info(hdev); 1716 if (ret) 1717 return ret; 1718 /* get tc configuration from PF */ 1719 return hclgevf_get_tc_info(hdev); 1720 } 1721 1722 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1723 { 1724 struct pci_dev *pdev = ae_dev->pdev; 1725 struct hclgevf_dev *hdev; 1726 1727 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1728 if (!hdev) 1729 return -ENOMEM; 1730 1731 hdev->pdev = pdev; 1732 hdev->ae_dev = ae_dev; 1733 ae_dev->priv = hdev; 1734 1735 return 0; 1736 } 1737 1738 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1739 { 1740 struct hnae3_handle *roce = &hdev->roce; 1741 struct hnae3_handle *nic = &hdev->nic; 1742 1743 roce->rinfo.num_vectors = hdev->num_roce_msix; 1744 1745 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1746 hdev->num_msi_left == 0) 1747 return -EINVAL; 1748 1749 roce->rinfo.base_vector = hdev->roce_base_vector; 1750 1751 roce->rinfo.netdev = nic->kinfo.netdev; 1752 roce->rinfo.roce_io_base = hdev->hw.io_base; 1753 1754 roce->pdev = nic->pdev; 1755 roce->ae_algo = nic->ae_algo; 1756 roce->numa_node_mask = nic->numa_node_mask; 1757 1758 return 0; 1759 } 1760 1761 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 1762 { 1763 struct hclgevf_cfg_gro_status_cmd *req; 1764 struct hclgevf_desc desc; 1765 int ret; 1766 1767 if (!hnae3_dev_gro_supported(hdev)) 1768 return 0; 1769 1770 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 1771 false); 1772 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 1773 1774 req->gro_en = cpu_to_le16(en ? 1 : 0); 1775 1776 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1777 if (ret) 1778 dev_err(&hdev->pdev->dev, 1779 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 1780 1781 return ret; 1782 } 1783 1784 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1785 { 1786 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1787 int i, ret; 1788 1789 rss_cfg->rss_size = hdev->rss_size_max; 1790 1791 if (hdev->pdev->revision >= 0x21) { 1792 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 1793 netdev_rss_key_fill(rss_cfg->rss_hash_key, 1794 HCLGEVF_RSS_KEY_SIZE); 1795 1796 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 1797 rss_cfg->rss_hash_key); 1798 if (ret) 1799 return ret; 1800 1801 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 1802 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1803 rss_cfg->rss_tuple_sets.ipv4_udp_en = 1804 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1805 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 1806 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1807 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 1808 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1809 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 1810 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1811 rss_cfg->rss_tuple_sets.ipv6_udp_en = 1812 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1813 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 1814 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1815 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 1816 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1817 1818 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 1819 if (ret) 1820 return ret; 1821 1822 } 1823 1824 /* Initialize RSS indirect table for each vport */ 1825 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1826 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1827 1828 ret = hclgevf_set_rss_indir_table(hdev); 1829 if (ret) 1830 return ret; 1831 1832 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1833 } 1834 1835 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1836 { 1837 /* other vlan config(like, VLAN TX/RX offload) would also be added 1838 * here later 1839 */ 1840 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1841 false); 1842 } 1843 1844 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 1845 { 1846 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1847 1848 if (enable) { 1849 mod_timer(&hdev->service_timer, jiffies + HZ); 1850 } else { 1851 del_timer_sync(&hdev->service_timer); 1852 cancel_work_sync(&hdev->service_task); 1853 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1854 } 1855 } 1856 1857 static int hclgevf_ae_start(struct hnae3_handle *handle) 1858 { 1859 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1860 1861 /* reset tqp stats */ 1862 hclgevf_reset_tqp_stats(handle); 1863 1864 hclgevf_request_link_info(hdev); 1865 1866 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1867 1868 return 0; 1869 } 1870 1871 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1872 { 1873 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1874 int i; 1875 1876 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1877 1878 for (i = 0; i < handle->kinfo.num_tqps; i++) 1879 hclgevf_reset_tqp(handle, i); 1880 1881 /* reset tqp stats */ 1882 hclgevf_reset_tqp_stats(handle); 1883 hclgevf_update_link_status(hdev, 0); 1884 } 1885 1886 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 1887 { 1888 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1889 u8 msg_data; 1890 1891 msg_data = alive ? 1 : 0; 1892 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE, 1893 0, &msg_data, 1, false, NULL, 0); 1894 } 1895 1896 static int hclgevf_client_start(struct hnae3_handle *handle) 1897 { 1898 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1899 1900 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 1901 return hclgevf_set_alive(handle, true); 1902 } 1903 1904 static void hclgevf_client_stop(struct hnae3_handle *handle) 1905 { 1906 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1907 int ret; 1908 1909 ret = hclgevf_set_alive(handle, false); 1910 if (ret) 1911 dev_warn(&hdev->pdev->dev, 1912 "%s failed %d\n", __func__, ret); 1913 1914 del_timer_sync(&hdev->keep_alive_timer); 1915 cancel_work_sync(&hdev->keep_alive_task); 1916 } 1917 1918 static void hclgevf_state_init(struct hclgevf_dev *hdev) 1919 { 1920 /* setup tasks for the MBX */ 1921 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1922 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1923 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1924 1925 /* setup tasks for service timer */ 1926 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1927 1928 INIT_WORK(&hdev->service_task, hclgevf_service_task); 1929 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1930 1931 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 1932 1933 mutex_init(&hdev->mbx_resp.mbx_mutex); 1934 1935 /* bring the device down */ 1936 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1937 } 1938 1939 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1940 { 1941 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1942 1943 if (hdev->service_timer.function) 1944 del_timer_sync(&hdev->service_timer); 1945 if (hdev->service_task.func) 1946 cancel_work_sync(&hdev->service_task); 1947 if (hdev->mbx_service_task.func) 1948 cancel_work_sync(&hdev->mbx_service_task); 1949 if (hdev->rst_service_task.func) 1950 cancel_work_sync(&hdev->rst_service_task); 1951 1952 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1953 } 1954 1955 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1956 { 1957 struct pci_dev *pdev = hdev->pdev; 1958 int vectors; 1959 int i; 1960 1961 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 1962 vectors = pci_alloc_irq_vectors(pdev, 1963 hdev->roce_base_msix_offset + 1, 1964 hdev->num_msi, 1965 PCI_IRQ_MSIX); 1966 else 1967 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1968 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1969 1970 if (vectors < 0) { 1971 dev_err(&pdev->dev, 1972 "failed(%d) to allocate MSI/MSI-X vectors\n", 1973 vectors); 1974 return vectors; 1975 } 1976 if (vectors < hdev->num_msi) 1977 dev_warn(&hdev->pdev->dev, 1978 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1979 hdev->num_msi, vectors); 1980 1981 hdev->num_msi = vectors; 1982 hdev->num_msi_left = vectors; 1983 hdev->base_msi_vector = pdev->irq; 1984 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 1985 1986 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1987 sizeof(u16), GFP_KERNEL); 1988 if (!hdev->vector_status) { 1989 pci_free_irq_vectors(pdev); 1990 return -ENOMEM; 1991 } 1992 1993 for (i = 0; i < hdev->num_msi; i++) 1994 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 1995 1996 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 1997 sizeof(int), GFP_KERNEL); 1998 if (!hdev->vector_irq) { 1999 devm_kfree(&pdev->dev, hdev->vector_status); 2000 pci_free_irq_vectors(pdev); 2001 return -ENOMEM; 2002 } 2003 2004 return 0; 2005 } 2006 2007 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2008 { 2009 struct pci_dev *pdev = hdev->pdev; 2010 2011 devm_kfree(&pdev->dev, hdev->vector_status); 2012 devm_kfree(&pdev->dev, hdev->vector_irq); 2013 pci_free_irq_vectors(pdev); 2014 } 2015 2016 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2017 { 2018 int ret = 0; 2019 2020 hclgevf_get_misc_vector(hdev); 2021 2022 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2023 0, "hclgevf_cmd", hdev); 2024 if (ret) { 2025 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2026 hdev->misc_vector.vector_irq); 2027 return ret; 2028 } 2029 2030 hclgevf_clear_event_cause(hdev, 0); 2031 2032 /* enable misc. vector(vector 0) */ 2033 hclgevf_enable_vector(&hdev->misc_vector, true); 2034 2035 return ret; 2036 } 2037 2038 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2039 { 2040 /* disable misc vector(vector 0) */ 2041 hclgevf_enable_vector(&hdev->misc_vector, false); 2042 synchronize_irq(hdev->misc_vector.vector_irq); 2043 free_irq(hdev->misc_vector.vector_irq, hdev); 2044 hclgevf_free_vector(hdev, 0); 2045 } 2046 2047 static int hclgevf_init_client_instance(struct hnae3_client *client, 2048 struct hnae3_ae_dev *ae_dev) 2049 { 2050 struct hclgevf_dev *hdev = ae_dev->priv; 2051 int ret; 2052 2053 switch (client->type) { 2054 case HNAE3_CLIENT_KNIC: 2055 hdev->nic_client = client; 2056 hdev->nic.client = client; 2057 2058 ret = client->ops->init_instance(&hdev->nic); 2059 if (ret) 2060 goto clear_nic; 2061 2062 hnae3_set_client_init_flag(client, ae_dev, 1); 2063 2064 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 2065 struct hnae3_client *rc = hdev->roce_client; 2066 2067 ret = hclgevf_init_roce_base_info(hdev); 2068 if (ret) 2069 goto clear_roce; 2070 ret = rc->ops->init_instance(&hdev->roce); 2071 if (ret) 2072 goto clear_roce; 2073 2074 hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 2075 1); 2076 } 2077 break; 2078 case HNAE3_CLIENT_UNIC: 2079 hdev->nic_client = client; 2080 hdev->nic.client = client; 2081 2082 ret = client->ops->init_instance(&hdev->nic); 2083 if (ret) 2084 goto clear_nic; 2085 2086 hnae3_set_client_init_flag(client, ae_dev, 1); 2087 break; 2088 case HNAE3_CLIENT_ROCE: 2089 if (hnae3_dev_roce_supported(hdev)) { 2090 hdev->roce_client = client; 2091 hdev->roce.client = client; 2092 } 2093 2094 if (hdev->roce_client && hdev->nic_client) { 2095 ret = hclgevf_init_roce_base_info(hdev); 2096 if (ret) 2097 goto clear_roce; 2098 2099 ret = client->ops->init_instance(&hdev->roce); 2100 if (ret) 2101 goto clear_roce; 2102 } 2103 2104 hnae3_set_client_init_flag(client, ae_dev, 1); 2105 break; 2106 default: 2107 return -EINVAL; 2108 } 2109 2110 return 0; 2111 2112 clear_nic: 2113 hdev->nic_client = NULL; 2114 hdev->nic.client = NULL; 2115 return ret; 2116 clear_roce: 2117 hdev->roce_client = NULL; 2118 hdev->roce.client = NULL; 2119 return ret; 2120 } 2121 2122 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2123 struct hnae3_ae_dev *ae_dev) 2124 { 2125 struct hclgevf_dev *hdev = ae_dev->priv; 2126 2127 /* un-init roce, if it exists */ 2128 if (hdev->roce_client) { 2129 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2130 hdev->roce_client = NULL; 2131 hdev->roce.client = NULL; 2132 } 2133 2134 /* un-init nic/unic, if this was not called by roce client */ 2135 if (client->ops->uninit_instance && hdev->nic_client && 2136 client->type != HNAE3_CLIENT_ROCE) { 2137 client->ops->uninit_instance(&hdev->nic, 0); 2138 hdev->nic_client = NULL; 2139 hdev->nic.client = NULL; 2140 } 2141 } 2142 2143 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2144 { 2145 struct pci_dev *pdev = hdev->pdev; 2146 struct hclgevf_hw *hw; 2147 int ret; 2148 2149 ret = pci_enable_device(pdev); 2150 if (ret) { 2151 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2152 return ret; 2153 } 2154 2155 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2156 if (ret) { 2157 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2158 goto err_disable_device; 2159 } 2160 2161 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2162 if (ret) { 2163 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2164 goto err_disable_device; 2165 } 2166 2167 pci_set_master(pdev); 2168 hw = &hdev->hw; 2169 hw->hdev = hdev; 2170 hw->io_base = pci_iomap(pdev, 2, 0); 2171 if (!hw->io_base) { 2172 dev_err(&pdev->dev, "can't map configuration register space\n"); 2173 ret = -ENOMEM; 2174 goto err_clr_master; 2175 } 2176 2177 return 0; 2178 2179 err_clr_master: 2180 pci_clear_master(pdev); 2181 pci_release_regions(pdev); 2182 err_disable_device: 2183 pci_disable_device(pdev); 2184 2185 return ret; 2186 } 2187 2188 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2189 { 2190 struct pci_dev *pdev = hdev->pdev; 2191 2192 pci_iounmap(pdev, hdev->hw.io_base); 2193 pci_clear_master(pdev); 2194 pci_release_regions(pdev); 2195 pci_disable_device(pdev); 2196 } 2197 2198 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2199 { 2200 struct hclgevf_query_res_cmd *req; 2201 struct hclgevf_desc desc; 2202 int ret; 2203 2204 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2205 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2206 if (ret) { 2207 dev_err(&hdev->pdev->dev, 2208 "query vf resource failed, ret = %d.\n", ret); 2209 return ret; 2210 } 2211 2212 req = (struct hclgevf_query_res_cmd *)desc.data; 2213 2214 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 2215 hdev->roce_base_msix_offset = 2216 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 2217 HCLGEVF_MSIX_OFT_ROCEE_M, 2218 HCLGEVF_MSIX_OFT_ROCEE_S); 2219 hdev->num_roce_msix = 2220 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2221 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2222 2223 /* VF should have NIC vectors and Roce vectors, NIC vectors 2224 * are queued before Roce vectors. The offset is fixed to 64. 2225 */ 2226 hdev->num_msi = hdev->num_roce_msix + 2227 hdev->roce_base_msix_offset; 2228 } else { 2229 hdev->num_msi = 2230 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2231 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2232 } 2233 2234 return 0; 2235 } 2236 2237 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2238 { 2239 struct pci_dev *pdev = hdev->pdev; 2240 int ret = 0; 2241 2242 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2243 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2244 hclgevf_misc_irq_uninit(hdev); 2245 hclgevf_uninit_msi(hdev); 2246 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2247 } 2248 2249 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2250 pci_set_master(pdev); 2251 ret = hclgevf_init_msi(hdev); 2252 if (ret) { 2253 dev_err(&pdev->dev, 2254 "failed(%d) to init MSI/MSI-X\n", ret); 2255 return ret; 2256 } 2257 2258 ret = hclgevf_misc_irq_init(hdev); 2259 if (ret) { 2260 hclgevf_uninit_msi(hdev); 2261 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2262 ret); 2263 return ret; 2264 } 2265 2266 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2267 } 2268 2269 return ret; 2270 } 2271 2272 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2273 { 2274 struct pci_dev *pdev = hdev->pdev; 2275 int ret; 2276 2277 ret = hclgevf_pci_reset(hdev); 2278 if (ret) { 2279 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2280 return ret; 2281 } 2282 2283 ret = hclgevf_cmd_init(hdev); 2284 if (ret) { 2285 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2286 return ret; 2287 } 2288 2289 ret = hclgevf_rss_init_hw(hdev); 2290 if (ret) { 2291 dev_err(&hdev->pdev->dev, 2292 "failed(%d) to initialize RSS\n", ret); 2293 return ret; 2294 } 2295 2296 ret = hclgevf_config_gro(hdev, true); 2297 if (ret) 2298 return ret; 2299 2300 ret = hclgevf_init_vlan_config(hdev); 2301 if (ret) { 2302 dev_err(&hdev->pdev->dev, 2303 "failed(%d) to initialize VLAN config\n", ret); 2304 return ret; 2305 } 2306 2307 dev_info(&hdev->pdev->dev, "Reset done\n"); 2308 2309 return 0; 2310 } 2311 2312 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2313 { 2314 struct pci_dev *pdev = hdev->pdev; 2315 int ret; 2316 2317 ret = hclgevf_pci_init(hdev); 2318 if (ret) { 2319 dev_err(&pdev->dev, "PCI initialization failed\n"); 2320 return ret; 2321 } 2322 2323 ret = hclgevf_cmd_queue_init(hdev); 2324 if (ret) { 2325 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); 2326 goto err_cmd_queue_init; 2327 } 2328 2329 ret = hclgevf_cmd_init(hdev); 2330 if (ret) 2331 goto err_cmd_init; 2332 2333 /* Get vf resource */ 2334 ret = hclgevf_query_vf_resource(hdev); 2335 if (ret) { 2336 dev_err(&hdev->pdev->dev, 2337 "Query vf status error, ret = %d.\n", ret); 2338 goto err_cmd_init; 2339 } 2340 2341 ret = hclgevf_init_msi(hdev); 2342 if (ret) { 2343 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2344 goto err_cmd_init; 2345 } 2346 2347 hclgevf_state_init(hdev); 2348 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2349 2350 ret = hclgevf_misc_irq_init(hdev); 2351 if (ret) { 2352 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2353 ret); 2354 goto err_misc_irq_init; 2355 } 2356 2357 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2358 2359 ret = hclgevf_configure(hdev); 2360 if (ret) { 2361 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2362 goto err_config; 2363 } 2364 2365 ret = hclgevf_alloc_tqps(hdev); 2366 if (ret) { 2367 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2368 goto err_config; 2369 } 2370 2371 ret = hclgevf_set_handle_info(hdev); 2372 if (ret) { 2373 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2374 goto err_config; 2375 } 2376 2377 ret = hclgevf_config_gro(hdev, true); 2378 if (ret) 2379 goto err_config; 2380 2381 /* vf is not allowed to enable unicast/multicast promisc mode. 2382 * For revision 0x20, default to disable broadcast promisc mode, 2383 * firmware makes sure broadcast packets can be accepted. 2384 * For revision 0x21, default to enable broadcast promisc mode. 2385 */ 2386 ret = hclgevf_set_promisc_mode(hdev, true); 2387 if (ret) 2388 goto err_config; 2389 2390 /* Initialize RSS for this VF */ 2391 ret = hclgevf_rss_init_hw(hdev); 2392 if (ret) { 2393 dev_err(&hdev->pdev->dev, 2394 "failed(%d) to initialize RSS\n", ret); 2395 goto err_config; 2396 } 2397 2398 ret = hclgevf_init_vlan_config(hdev); 2399 if (ret) { 2400 dev_err(&hdev->pdev->dev, 2401 "failed(%d) to initialize VLAN config\n", ret); 2402 goto err_config; 2403 } 2404 2405 hdev->last_reset_time = jiffies; 2406 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 2407 2408 return 0; 2409 2410 err_config: 2411 hclgevf_misc_irq_uninit(hdev); 2412 err_misc_irq_init: 2413 hclgevf_state_uninit(hdev); 2414 hclgevf_uninit_msi(hdev); 2415 err_cmd_init: 2416 hclgevf_cmd_uninit(hdev); 2417 err_cmd_queue_init: 2418 hclgevf_pci_uninit(hdev); 2419 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2420 return ret; 2421 } 2422 2423 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2424 { 2425 hclgevf_state_uninit(hdev); 2426 2427 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2428 hclgevf_misc_irq_uninit(hdev); 2429 hclgevf_uninit_msi(hdev); 2430 } 2431 2432 hclgevf_pci_uninit(hdev); 2433 hclgevf_cmd_uninit(hdev); 2434 } 2435 2436 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2437 { 2438 struct pci_dev *pdev = ae_dev->pdev; 2439 struct hclgevf_dev *hdev; 2440 int ret; 2441 2442 ret = hclgevf_alloc_hdev(ae_dev); 2443 if (ret) { 2444 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2445 return ret; 2446 } 2447 2448 ret = hclgevf_init_hdev(ae_dev->priv); 2449 if (ret) { 2450 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2451 return ret; 2452 } 2453 2454 hdev = ae_dev->priv; 2455 timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0); 2456 INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task); 2457 2458 return 0; 2459 } 2460 2461 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2462 { 2463 struct hclgevf_dev *hdev = ae_dev->priv; 2464 2465 hclgevf_uninit_hdev(hdev); 2466 ae_dev->priv = NULL; 2467 } 2468 2469 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2470 { 2471 struct hnae3_handle *nic = &hdev->nic; 2472 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2473 2474 return min_t(u32, hdev->rss_size_max, 2475 hdev->num_tqps / kinfo->num_tc); 2476 } 2477 2478 /** 2479 * hclgevf_get_channels - Get the current channels enabled and max supported. 2480 * @handle: hardware information for network interface 2481 * @ch: ethtool channels structure 2482 * 2483 * We don't support separate tx and rx queues as channels. The other count 2484 * represents how many queues are being used for control. max_combined counts 2485 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2486 * q_vectors since we support a lot more queue pairs than q_vectors. 2487 **/ 2488 static void hclgevf_get_channels(struct hnae3_handle *handle, 2489 struct ethtool_channels *ch) 2490 { 2491 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2492 2493 ch->max_combined = hclgevf_get_max_channels(hdev); 2494 ch->other_count = 0; 2495 ch->max_other = 0; 2496 ch->combined_count = handle->kinfo.rss_size; 2497 } 2498 2499 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2500 u16 *alloc_tqps, u16 *max_rss_size) 2501 { 2502 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2503 2504 *alloc_tqps = hdev->num_tqps; 2505 *max_rss_size = hdev->rss_size_max; 2506 } 2507 2508 static int hclgevf_get_status(struct hnae3_handle *handle) 2509 { 2510 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2511 2512 return hdev->hw.mac.link; 2513 } 2514 2515 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2516 u8 *auto_neg, u32 *speed, 2517 u8 *duplex) 2518 { 2519 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2520 2521 if (speed) 2522 *speed = hdev->hw.mac.speed; 2523 if (duplex) 2524 *duplex = hdev->hw.mac.duplex; 2525 if (auto_neg) 2526 *auto_neg = AUTONEG_DISABLE; 2527 } 2528 2529 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 2530 u8 duplex) 2531 { 2532 hdev->hw.mac.speed = speed; 2533 hdev->hw.mac.duplex = duplex; 2534 } 2535 2536 static int hclgevf_gro_en(struct hnae3_handle *handle, int enable) 2537 { 2538 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2539 2540 return hclgevf_config_gro(hdev, enable); 2541 } 2542 2543 static void hclgevf_get_media_type(struct hnae3_handle *handle, 2544 u8 *media_type) 2545 { 2546 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2547 if (media_type) 2548 *media_type = hdev->hw.mac.media_type; 2549 } 2550 2551 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 2552 { 2553 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2554 2555 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2556 } 2557 2558 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 2559 { 2560 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2561 2562 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2563 } 2564 2565 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 2566 { 2567 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2568 2569 return hdev->reset_count; 2570 } 2571 2572 #define MAX_SEPARATE_NUM 4 2573 #define SEPARATOR_VALUE 0xFFFFFFFF 2574 #define REG_NUM_PER_LINE 4 2575 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 2576 2577 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 2578 { 2579 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 2580 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2581 2582 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 2583 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 2584 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 2585 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 2586 2587 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 2588 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 2589 } 2590 2591 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 2592 void *data) 2593 { 2594 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2595 int i, j, reg_um, separator_num; 2596 u32 *reg = data; 2597 2598 *version = hdev->fw_version; 2599 2600 /* fetching per-VF registers values from VF PCIe register space */ 2601 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 2602 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2603 for (i = 0; i < reg_um; i++) 2604 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 2605 for (i = 0; i < separator_num; i++) 2606 *reg++ = SEPARATOR_VALUE; 2607 2608 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 2609 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2610 for (i = 0; i < reg_um; i++) 2611 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 2612 for (i = 0; i < separator_num; i++) 2613 *reg++ = SEPARATOR_VALUE; 2614 2615 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 2616 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2617 for (j = 0; j < hdev->num_tqps; j++) { 2618 for (i = 0; i < reg_um; i++) 2619 *reg++ = hclgevf_read_dev(&hdev->hw, 2620 ring_reg_addr_list[i] + 2621 0x200 * j); 2622 for (i = 0; i < separator_num; i++) 2623 *reg++ = SEPARATOR_VALUE; 2624 } 2625 2626 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 2627 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2628 for (j = 0; j < hdev->num_msi_used - 1; j++) { 2629 for (i = 0; i < reg_um; i++) 2630 *reg++ = hclgevf_read_dev(&hdev->hw, 2631 tqp_intr_reg_addr_list[i] + 2632 4 * j); 2633 for (i = 0; i < separator_num; i++) 2634 *reg++ = SEPARATOR_VALUE; 2635 } 2636 } 2637 2638 static const struct hnae3_ae_ops hclgevf_ops = { 2639 .init_ae_dev = hclgevf_init_ae_dev, 2640 .uninit_ae_dev = hclgevf_uninit_ae_dev, 2641 .flr_prepare = hclgevf_flr_prepare, 2642 .flr_done = hclgevf_flr_done, 2643 .init_client_instance = hclgevf_init_client_instance, 2644 .uninit_client_instance = hclgevf_uninit_client_instance, 2645 .start = hclgevf_ae_start, 2646 .stop = hclgevf_ae_stop, 2647 .client_start = hclgevf_client_start, 2648 .client_stop = hclgevf_client_stop, 2649 .map_ring_to_vector = hclgevf_map_ring_to_vector, 2650 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2651 .get_vector = hclgevf_get_vector, 2652 .put_vector = hclgevf_put_vector, 2653 .reset_queue = hclgevf_reset_tqp, 2654 .get_mac_addr = hclgevf_get_mac_addr, 2655 .set_mac_addr = hclgevf_set_mac_addr, 2656 .add_uc_addr = hclgevf_add_uc_addr, 2657 .rm_uc_addr = hclgevf_rm_uc_addr, 2658 .add_mc_addr = hclgevf_add_mc_addr, 2659 .rm_mc_addr = hclgevf_rm_mc_addr, 2660 .get_stats = hclgevf_get_stats, 2661 .update_stats = hclgevf_update_stats, 2662 .get_strings = hclgevf_get_strings, 2663 .get_sset_count = hclgevf_get_sset_count, 2664 .get_rss_key_size = hclgevf_get_rss_key_size, 2665 .get_rss_indir_size = hclgevf_get_rss_indir_size, 2666 .get_rss = hclgevf_get_rss, 2667 .set_rss = hclgevf_set_rss, 2668 .get_rss_tuple = hclgevf_get_rss_tuple, 2669 .set_rss_tuple = hclgevf_set_rss_tuple, 2670 .get_tc_size = hclgevf_get_tc_size, 2671 .get_fw_version = hclgevf_get_fw_version, 2672 .set_vlan_filter = hclgevf_set_vlan_filter, 2673 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 2674 .reset_event = hclgevf_reset_event, 2675 .set_default_reset_request = hclgevf_set_def_reset_request, 2676 .get_channels = hclgevf_get_channels, 2677 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2678 .get_regs_len = hclgevf_get_regs_len, 2679 .get_regs = hclgevf_get_regs, 2680 .get_status = hclgevf_get_status, 2681 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2682 .get_media_type = hclgevf_get_media_type, 2683 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 2684 .ae_dev_resetting = hclgevf_ae_dev_resetting, 2685 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 2686 .set_gro_en = hclgevf_gro_en, 2687 .set_mtu = hclgevf_set_mtu, 2688 .get_global_queue_id = hclgevf_get_qid_global, 2689 .set_timer_task = hclgevf_set_timer_task, 2690 }; 2691 2692 static struct hnae3_ae_algo ae_algovf = { 2693 .ops = &hclgevf_ops, 2694 .pdev_id_table = ae_algovf_pci_tbl, 2695 }; 2696 2697 static int hclgevf_init(void) 2698 { 2699 pr_info("%s is initializing\n", HCLGEVF_NAME); 2700 2701 hnae3_register_ae_algo(&ae_algovf); 2702 2703 return 0; 2704 } 2705 2706 static void hclgevf_exit(void) 2707 { 2708 hnae3_unregister_ae_algo(&ae_algovf); 2709 } 2710 module_init(hclgevf_init); 2711 module_exit(hclgevf_exit); 2712 2713 MODULE_LICENSE("GPL"); 2714 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2715 MODULE_DESCRIPTION("HCLGEVF Driver"); 2716 MODULE_VERSION(HCLGEVF_MOD_VERSION); 2717