1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 static const u8 hclgevf_hash_key[] = { 25 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 26 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 27 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 28 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 29 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 30 }; 31 32 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 33 34 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 35 HCLGEVF_CMDQ_TX_ADDR_H_REG, 36 HCLGEVF_CMDQ_TX_DEPTH_REG, 37 HCLGEVF_CMDQ_TX_TAIL_REG, 38 HCLGEVF_CMDQ_TX_HEAD_REG, 39 HCLGEVF_CMDQ_RX_ADDR_L_REG, 40 HCLGEVF_CMDQ_RX_ADDR_H_REG, 41 HCLGEVF_CMDQ_RX_DEPTH_REG, 42 HCLGEVF_CMDQ_RX_TAIL_REG, 43 HCLGEVF_CMDQ_RX_HEAD_REG, 44 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 45 HCLGEVF_CMDQ_INTR_STS_REG, 46 HCLGEVF_CMDQ_INTR_EN_REG, 47 HCLGEVF_CMDQ_INTR_GEN_REG}; 48 49 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 50 HCLGEVF_RST_ING, 51 HCLGEVF_GRO_EN_REG}; 52 53 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 54 HCLGEVF_RING_RX_ADDR_H_REG, 55 HCLGEVF_RING_RX_BD_NUM_REG, 56 HCLGEVF_RING_RX_BD_LENGTH_REG, 57 HCLGEVF_RING_RX_MERGE_EN_REG, 58 HCLGEVF_RING_RX_TAIL_REG, 59 HCLGEVF_RING_RX_HEAD_REG, 60 HCLGEVF_RING_RX_FBD_NUM_REG, 61 HCLGEVF_RING_RX_OFFSET_REG, 62 HCLGEVF_RING_RX_FBD_OFFSET_REG, 63 HCLGEVF_RING_RX_STASH_REG, 64 HCLGEVF_RING_RX_BD_ERR_REG, 65 HCLGEVF_RING_TX_ADDR_L_REG, 66 HCLGEVF_RING_TX_ADDR_H_REG, 67 HCLGEVF_RING_TX_BD_NUM_REG, 68 HCLGEVF_RING_TX_PRIORITY_REG, 69 HCLGEVF_RING_TX_TC_REG, 70 HCLGEVF_RING_TX_MERGE_EN_REG, 71 HCLGEVF_RING_TX_TAIL_REG, 72 HCLGEVF_RING_TX_HEAD_REG, 73 HCLGEVF_RING_TX_FBD_NUM_REG, 74 HCLGEVF_RING_TX_OFFSET_REG, 75 HCLGEVF_RING_TX_EBD_NUM_REG, 76 HCLGEVF_RING_TX_EBD_OFFSET_REG, 77 HCLGEVF_RING_TX_BD_ERR_REG, 78 HCLGEVF_RING_EN_REG}; 79 80 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 81 HCLGEVF_TQP_INTR_GL0_REG, 82 HCLGEVF_TQP_INTR_GL1_REG, 83 HCLGEVF_TQP_INTR_GL2_REG, 84 HCLGEVF_TQP_INTR_RL_REG}; 85 86 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 87 struct hnae3_handle *handle) 88 { 89 return container_of(handle, struct hclgevf_dev, nic); 90 } 91 92 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 93 { 94 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 95 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 96 struct hclgevf_desc desc; 97 struct hclgevf_tqp *tqp; 98 int status; 99 int i; 100 101 for (i = 0; i < kinfo->num_tqps; i++) { 102 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 103 hclgevf_cmd_setup_basic_desc(&desc, 104 HCLGEVF_OPC_QUERY_RX_STATUS, 105 true); 106 107 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 108 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 109 if (status) { 110 dev_err(&hdev->pdev->dev, 111 "Query tqp stat fail, status = %d,queue = %d\n", 112 status, i); 113 return status; 114 } 115 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 116 le32_to_cpu(desc.data[1]); 117 118 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 119 true); 120 121 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 122 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 123 if (status) { 124 dev_err(&hdev->pdev->dev, 125 "Query tqp stat fail, status = %d,queue = %d\n", 126 status, i); 127 return status; 128 } 129 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 130 le32_to_cpu(desc.data[1]); 131 } 132 133 return 0; 134 } 135 136 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 137 { 138 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 139 struct hclgevf_tqp *tqp; 140 u64 *buff = data; 141 int i; 142 143 for (i = 0; i < kinfo->num_tqps; i++) { 144 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 145 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 146 } 147 for (i = 0; i < kinfo->num_tqps; i++) { 148 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 149 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 150 } 151 152 return buff; 153 } 154 155 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 156 { 157 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 158 159 return kinfo->num_tqps * 2; 160 } 161 162 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 163 { 164 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 165 u8 *buff = data; 166 int i = 0; 167 168 for (i = 0; i < kinfo->num_tqps; i++) { 169 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 170 struct hclgevf_tqp, q); 171 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 172 tqp->index); 173 buff += ETH_GSTRING_LEN; 174 } 175 176 for (i = 0; i < kinfo->num_tqps; i++) { 177 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 178 struct hclgevf_tqp, q); 179 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 180 tqp->index); 181 buff += ETH_GSTRING_LEN; 182 } 183 184 return buff; 185 } 186 187 static void hclgevf_update_stats(struct hnae3_handle *handle, 188 struct net_device_stats *net_stats) 189 { 190 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 191 int status; 192 193 status = hclgevf_tqps_update_stats(handle); 194 if (status) 195 dev_err(&hdev->pdev->dev, 196 "VF update of TQPS stats fail, status = %d.\n", 197 status); 198 } 199 200 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 201 { 202 if (strset == ETH_SS_TEST) 203 return -EOPNOTSUPP; 204 else if (strset == ETH_SS_STATS) 205 return hclgevf_tqps_get_sset_count(handle, strset); 206 207 return 0; 208 } 209 210 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 211 u8 *data) 212 { 213 u8 *p = (char *)data; 214 215 if (strset == ETH_SS_STATS) 216 p = hclgevf_tqps_get_strings(handle, p); 217 } 218 219 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 220 { 221 hclgevf_tqps_get_stats(handle, data); 222 } 223 224 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 225 { 226 u8 resp_msg; 227 int status; 228 229 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 230 true, &resp_msg, sizeof(u8)); 231 if (status) { 232 dev_err(&hdev->pdev->dev, 233 "VF request to get TC info from PF failed %d", 234 status); 235 return status; 236 } 237 238 hdev->hw_tc_map = resp_msg; 239 240 return 0; 241 } 242 243 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 244 { 245 #define HCLGEVF_TQPS_RSS_INFO_LEN 8 246 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 247 int status; 248 249 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 250 true, resp_msg, 251 HCLGEVF_TQPS_RSS_INFO_LEN); 252 if (status) { 253 dev_err(&hdev->pdev->dev, 254 "VF request to get tqp info from PF failed %d", 255 status); 256 return status; 257 } 258 259 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 260 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 261 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 262 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 263 264 return 0; 265 } 266 267 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 268 { 269 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 270 u8 msg_data[2], resp_data[2]; 271 u16 qid_in_pf = 0; 272 int ret; 273 274 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 275 276 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, 277 2, true, resp_data, 2); 278 if (!ret) 279 qid_in_pf = *(u16 *)resp_data; 280 281 return qid_in_pf; 282 } 283 284 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 285 { 286 struct hclgevf_tqp *tqp; 287 int i; 288 289 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 290 sizeof(struct hclgevf_tqp), GFP_KERNEL); 291 if (!hdev->htqp) 292 return -ENOMEM; 293 294 tqp = hdev->htqp; 295 296 for (i = 0; i < hdev->num_tqps; i++) { 297 tqp->dev = &hdev->pdev->dev; 298 tqp->index = i; 299 300 tqp->q.ae_algo = &ae_algovf; 301 tqp->q.buf_size = hdev->rx_buf_len; 302 tqp->q.desc_num = hdev->num_desc; 303 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 304 i * HCLGEVF_TQP_REG_SIZE; 305 306 tqp++; 307 } 308 309 return 0; 310 } 311 312 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 313 { 314 struct hnae3_handle *nic = &hdev->nic; 315 struct hnae3_knic_private_info *kinfo; 316 u16 new_tqps = hdev->num_tqps; 317 int i; 318 319 kinfo = &nic->kinfo; 320 kinfo->num_tc = 0; 321 kinfo->num_desc = hdev->num_desc; 322 kinfo->rx_buf_len = hdev->rx_buf_len; 323 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 324 if (hdev->hw_tc_map & BIT(i)) 325 kinfo->num_tc++; 326 327 kinfo->rss_size 328 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 329 new_tqps = kinfo->rss_size * kinfo->num_tc; 330 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 331 332 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 333 sizeof(struct hnae3_queue *), GFP_KERNEL); 334 if (!kinfo->tqp) 335 return -ENOMEM; 336 337 for (i = 0; i < kinfo->num_tqps; i++) { 338 hdev->htqp[i].q.handle = &hdev->nic; 339 hdev->htqp[i].q.tqp_index = i; 340 kinfo->tqp[i] = &hdev->htqp[i].q; 341 } 342 343 return 0; 344 } 345 346 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 347 { 348 int status; 349 u8 resp_msg; 350 351 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 352 0, false, &resp_msg, sizeof(u8)); 353 if (status) 354 dev_err(&hdev->pdev->dev, 355 "VF failed to fetch link status(%d) from PF", status); 356 } 357 358 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 359 { 360 struct hnae3_handle *rhandle = &hdev->roce; 361 struct hnae3_handle *handle = &hdev->nic; 362 struct hnae3_client *rclient; 363 struct hnae3_client *client; 364 365 client = handle->client; 366 rclient = hdev->roce_client; 367 368 link_state = 369 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 370 371 if (link_state != hdev->hw.mac.link) { 372 client->ops->link_status_change(handle, !!link_state); 373 if (rclient && rclient->ops->link_status_change) 374 rclient->ops->link_status_change(rhandle, !!link_state); 375 hdev->hw.mac.link = link_state; 376 } 377 } 378 379 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 380 { 381 struct hnae3_handle *nic = &hdev->nic; 382 int ret; 383 384 nic->ae_algo = &ae_algovf; 385 nic->pdev = hdev->pdev; 386 nic->numa_node_mask = hdev->numa_node_mask; 387 nic->flags |= HNAE3_SUPPORT_VF; 388 389 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 390 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 391 hdev->ae_dev->dev_type); 392 return -EINVAL; 393 } 394 395 ret = hclgevf_knic_setup(hdev); 396 if (ret) 397 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 398 ret); 399 return ret; 400 } 401 402 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 403 { 404 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 405 dev_warn(&hdev->pdev->dev, 406 "vector(vector_id %d) has been freed.\n", vector_id); 407 return; 408 } 409 410 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 411 hdev->num_msi_left += 1; 412 hdev->num_msi_used -= 1; 413 } 414 415 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 416 struct hnae3_vector_info *vector_info) 417 { 418 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 419 struct hnae3_vector_info *vector = vector_info; 420 int alloc = 0; 421 int i, j; 422 423 vector_num = min(hdev->num_msi_left, vector_num); 424 425 for (j = 0; j < vector_num; j++) { 426 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 427 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 428 vector->vector = pci_irq_vector(hdev->pdev, i); 429 vector->io_addr = hdev->hw.io_base + 430 HCLGEVF_VECTOR_REG_BASE + 431 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 432 hdev->vector_status[i] = 0; 433 hdev->vector_irq[i] = vector->vector; 434 435 vector++; 436 alloc++; 437 438 break; 439 } 440 } 441 } 442 hdev->num_msi_left -= alloc; 443 hdev->num_msi_used += alloc; 444 445 return alloc; 446 } 447 448 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 449 { 450 int i; 451 452 for (i = 0; i < hdev->num_msi; i++) 453 if (vector == hdev->vector_irq[i]) 454 return i; 455 456 return -EINVAL; 457 } 458 459 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 460 const u8 hfunc, const u8 *key) 461 { 462 struct hclgevf_rss_config_cmd *req; 463 struct hclgevf_desc desc; 464 int key_offset; 465 int key_size; 466 int ret; 467 468 req = (struct hclgevf_rss_config_cmd *)desc.data; 469 470 for (key_offset = 0; key_offset < 3; key_offset++) { 471 hclgevf_cmd_setup_basic_desc(&desc, 472 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 473 false); 474 475 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 476 req->hash_config |= 477 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 478 479 if (key_offset == 2) 480 key_size = 481 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 482 else 483 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 484 485 memcpy(req->hash_key, 486 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 487 488 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 489 if (ret) { 490 dev_err(&hdev->pdev->dev, 491 "Configure RSS config fail, status = %d\n", 492 ret); 493 return ret; 494 } 495 } 496 497 return 0; 498 } 499 500 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 501 { 502 return HCLGEVF_RSS_KEY_SIZE; 503 } 504 505 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 506 { 507 return HCLGEVF_RSS_IND_TBL_SIZE; 508 } 509 510 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 511 { 512 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 513 struct hclgevf_rss_indirection_table_cmd *req; 514 struct hclgevf_desc desc; 515 int status; 516 int i, j; 517 518 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 519 520 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 521 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 522 false); 523 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 524 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 525 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 526 req->rss_result[j] = 527 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 528 529 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 530 if (status) { 531 dev_err(&hdev->pdev->dev, 532 "VF failed(=%d) to set RSS indirection table\n", 533 status); 534 return status; 535 } 536 } 537 538 return 0; 539 } 540 541 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 542 { 543 struct hclgevf_rss_tc_mode_cmd *req; 544 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 545 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 546 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 547 struct hclgevf_desc desc; 548 u16 roundup_size; 549 int status; 550 int i; 551 552 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 553 554 roundup_size = roundup_pow_of_two(rss_size); 555 roundup_size = ilog2(roundup_size); 556 557 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 558 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 559 tc_size[i] = roundup_size; 560 tc_offset[i] = rss_size * i; 561 } 562 563 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 564 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 565 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 566 (tc_valid[i] & 0x1)); 567 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 568 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 569 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 570 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 571 } 572 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 573 if (status) 574 dev_err(&hdev->pdev->dev, 575 "VF failed(=%d) to set rss tc mode\n", status); 576 577 return status; 578 } 579 580 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 581 u8 *hfunc) 582 { 583 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 584 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 585 int i; 586 587 if (handle->pdev->revision >= 0x21) { 588 /* Get hash algorithm */ 589 if (hfunc) { 590 switch (rss_cfg->hash_algo) { 591 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 592 *hfunc = ETH_RSS_HASH_TOP; 593 break; 594 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 595 *hfunc = ETH_RSS_HASH_XOR; 596 break; 597 default: 598 *hfunc = ETH_RSS_HASH_UNKNOWN; 599 break; 600 } 601 } 602 603 /* Get the RSS Key required by the user */ 604 if (key) 605 memcpy(key, rss_cfg->rss_hash_key, 606 HCLGEVF_RSS_KEY_SIZE); 607 } 608 609 if (indir) 610 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 611 indir[i] = rss_cfg->rss_indirection_tbl[i]; 612 613 return 0; 614 } 615 616 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 617 const u8 *key, const u8 hfunc) 618 { 619 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 620 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 621 int ret, i; 622 623 if (handle->pdev->revision >= 0x21) { 624 /* Set the RSS Hash Key if specififed by the user */ 625 if (key) { 626 switch (hfunc) { 627 case ETH_RSS_HASH_TOP: 628 rss_cfg->hash_algo = 629 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 630 break; 631 case ETH_RSS_HASH_XOR: 632 rss_cfg->hash_algo = 633 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 634 break; 635 case ETH_RSS_HASH_NO_CHANGE: 636 break; 637 default: 638 return -EINVAL; 639 } 640 641 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 642 key); 643 if (ret) 644 return ret; 645 646 /* Update the shadow RSS key with user specified qids */ 647 memcpy(rss_cfg->rss_hash_key, key, 648 HCLGEVF_RSS_KEY_SIZE); 649 } 650 } 651 652 /* update the shadow RSS table with user specified qids */ 653 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 654 rss_cfg->rss_indirection_tbl[i] = indir[i]; 655 656 /* update the hardware */ 657 return hclgevf_set_rss_indir_table(hdev); 658 } 659 660 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 661 { 662 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 663 664 if (nfc->data & RXH_L4_B_2_3) 665 hash_sets |= HCLGEVF_D_PORT_BIT; 666 else 667 hash_sets &= ~HCLGEVF_D_PORT_BIT; 668 669 if (nfc->data & RXH_IP_SRC) 670 hash_sets |= HCLGEVF_S_IP_BIT; 671 else 672 hash_sets &= ~HCLGEVF_S_IP_BIT; 673 674 if (nfc->data & RXH_IP_DST) 675 hash_sets |= HCLGEVF_D_IP_BIT; 676 else 677 hash_sets &= ~HCLGEVF_D_IP_BIT; 678 679 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 680 hash_sets |= HCLGEVF_V_TAG_BIT; 681 682 return hash_sets; 683 } 684 685 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 686 struct ethtool_rxnfc *nfc) 687 { 688 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 689 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 690 struct hclgevf_rss_input_tuple_cmd *req; 691 struct hclgevf_desc desc; 692 u8 tuple_sets; 693 int ret; 694 695 if (handle->pdev->revision == 0x20) 696 return -EOPNOTSUPP; 697 698 if (nfc->data & 699 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 700 return -EINVAL; 701 702 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 703 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 704 705 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 706 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 707 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 708 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 709 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 710 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 711 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 712 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 713 714 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 715 switch (nfc->flow_type) { 716 case TCP_V4_FLOW: 717 req->ipv4_tcp_en = tuple_sets; 718 break; 719 case TCP_V6_FLOW: 720 req->ipv6_tcp_en = tuple_sets; 721 break; 722 case UDP_V4_FLOW: 723 req->ipv4_udp_en = tuple_sets; 724 break; 725 case UDP_V6_FLOW: 726 req->ipv6_udp_en = tuple_sets; 727 break; 728 case SCTP_V4_FLOW: 729 req->ipv4_sctp_en = tuple_sets; 730 break; 731 case SCTP_V6_FLOW: 732 if ((nfc->data & RXH_L4_B_0_1) || 733 (nfc->data & RXH_L4_B_2_3)) 734 return -EINVAL; 735 736 req->ipv6_sctp_en = tuple_sets; 737 break; 738 case IPV4_FLOW: 739 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 740 break; 741 case IPV6_FLOW: 742 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 743 break; 744 default: 745 return -EINVAL; 746 } 747 748 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 749 if (ret) { 750 dev_err(&hdev->pdev->dev, 751 "Set rss tuple fail, status = %d\n", ret); 752 return ret; 753 } 754 755 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 756 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 757 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 758 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 759 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 760 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 761 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 762 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 763 return 0; 764 } 765 766 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 767 struct ethtool_rxnfc *nfc) 768 { 769 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 770 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 771 u8 tuple_sets; 772 773 if (handle->pdev->revision == 0x20) 774 return -EOPNOTSUPP; 775 776 nfc->data = 0; 777 778 switch (nfc->flow_type) { 779 case TCP_V4_FLOW: 780 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 781 break; 782 case UDP_V4_FLOW: 783 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 784 break; 785 case TCP_V6_FLOW: 786 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 787 break; 788 case UDP_V6_FLOW: 789 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 790 break; 791 case SCTP_V4_FLOW: 792 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 793 break; 794 case SCTP_V6_FLOW: 795 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 796 break; 797 case IPV4_FLOW: 798 case IPV6_FLOW: 799 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 800 break; 801 default: 802 return -EINVAL; 803 } 804 805 if (!tuple_sets) 806 return 0; 807 808 if (tuple_sets & HCLGEVF_D_PORT_BIT) 809 nfc->data |= RXH_L4_B_2_3; 810 if (tuple_sets & HCLGEVF_S_PORT_BIT) 811 nfc->data |= RXH_L4_B_0_1; 812 if (tuple_sets & HCLGEVF_D_IP_BIT) 813 nfc->data |= RXH_IP_DST; 814 if (tuple_sets & HCLGEVF_S_IP_BIT) 815 nfc->data |= RXH_IP_SRC; 816 817 return 0; 818 } 819 820 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 821 struct hclgevf_rss_cfg *rss_cfg) 822 { 823 struct hclgevf_rss_input_tuple_cmd *req; 824 struct hclgevf_desc desc; 825 int ret; 826 827 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 828 829 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 830 831 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 832 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 833 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 834 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 835 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 836 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 837 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 838 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 839 840 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 841 if (ret) 842 dev_err(&hdev->pdev->dev, 843 "Configure rss input fail, status = %d\n", ret); 844 return ret; 845 } 846 847 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 848 { 849 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 850 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 851 852 return rss_cfg->rss_size; 853 } 854 855 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 856 int vector_id, 857 struct hnae3_ring_chain_node *ring_chain) 858 { 859 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 860 struct hnae3_ring_chain_node *node; 861 struct hclge_mbx_vf_to_pf_cmd *req; 862 struct hclgevf_desc desc; 863 int i = 0; 864 int status; 865 u8 type; 866 867 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 868 869 for (node = ring_chain; node; node = node->next) { 870 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 871 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 872 873 if (i == 0) { 874 hclgevf_cmd_setup_basic_desc(&desc, 875 HCLGEVF_OPC_MBX_VF_TO_PF, 876 false); 877 type = en ? 878 HCLGE_MBX_MAP_RING_TO_VECTOR : 879 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 880 req->msg[0] = type; 881 req->msg[1] = vector_id; 882 } 883 884 req->msg[idx_offset] = 885 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 886 req->msg[idx_offset + 1] = node->tqp_index; 887 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 888 HNAE3_RING_GL_IDX_M, 889 HNAE3_RING_GL_IDX_S); 890 891 i++; 892 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 893 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 894 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 895 !node->next) { 896 req->msg[2] = i; 897 898 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 899 if (status) { 900 dev_err(&hdev->pdev->dev, 901 "Map TQP fail, status is %d.\n", 902 status); 903 return status; 904 } 905 i = 0; 906 hclgevf_cmd_setup_basic_desc(&desc, 907 HCLGEVF_OPC_MBX_VF_TO_PF, 908 false); 909 req->msg[0] = type; 910 req->msg[1] = vector_id; 911 } 912 } 913 914 return 0; 915 } 916 917 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 918 struct hnae3_ring_chain_node *ring_chain) 919 { 920 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 921 int vector_id; 922 923 vector_id = hclgevf_get_vector_index(hdev, vector); 924 if (vector_id < 0) { 925 dev_err(&handle->pdev->dev, 926 "Get vector index fail. ret =%d\n", vector_id); 927 return vector_id; 928 } 929 930 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 931 } 932 933 static int hclgevf_unmap_ring_from_vector( 934 struct hnae3_handle *handle, 935 int vector, 936 struct hnae3_ring_chain_node *ring_chain) 937 { 938 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 939 int ret, vector_id; 940 941 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 942 return 0; 943 944 vector_id = hclgevf_get_vector_index(hdev, vector); 945 if (vector_id < 0) { 946 dev_err(&handle->pdev->dev, 947 "Get vector index fail. ret =%d\n", vector_id); 948 return vector_id; 949 } 950 951 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 952 if (ret) 953 dev_err(&handle->pdev->dev, 954 "Unmap ring from vector fail. vector=%d, ret =%d\n", 955 vector_id, 956 ret); 957 958 return ret; 959 } 960 961 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 962 { 963 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 964 int vector_id; 965 966 vector_id = hclgevf_get_vector_index(hdev, vector); 967 if (vector_id < 0) { 968 dev_err(&handle->pdev->dev, 969 "hclgevf_put_vector get vector index fail. ret =%d\n", 970 vector_id); 971 return vector_id; 972 } 973 974 hclgevf_free_vector(hdev, vector_id); 975 976 return 0; 977 } 978 979 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 980 bool en_bc_pmc) 981 { 982 struct hclge_mbx_vf_to_pf_cmd *req; 983 struct hclgevf_desc desc; 984 int ret; 985 986 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 987 988 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 989 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 990 req->msg[1] = en_bc_pmc ? 1 : 0; 991 992 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 993 if (ret) 994 dev_err(&hdev->pdev->dev, 995 "Set promisc mode fail, status is %d.\n", ret); 996 997 return ret; 998 } 999 1000 static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc) 1001 { 1002 return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc); 1003 } 1004 1005 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 1006 int stream_id, bool enable) 1007 { 1008 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1009 struct hclgevf_desc desc; 1010 int status; 1011 1012 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1013 1014 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1015 false); 1016 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1017 req->stream_id = cpu_to_le16(stream_id); 1018 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 1019 1020 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1021 if (status) 1022 dev_err(&hdev->pdev->dev, 1023 "TQP enable fail, status =%d.\n", status); 1024 1025 return status; 1026 } 1027 1028 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1029 { 1030 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1031 struct hclgevf_tqp *tqp; 1032 int i; 1033 1034 for (i = 0; i < kinfo->num_tqps; i++) { 1035 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1036 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1037 } 1038 } 1039 1040 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1041 { 1042 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1043 1044 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1045 } 1046 1047 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1048 bool is_first) 1049 { 1050 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1051 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1052 u8 *new_mac_addr = (u8 *)p; 1053 u8 msg_data[ETH_ALEN * 2]; 1054 u16 subcode; 1055 int status; 1056 1057 ether_addr_copy(msg_data, new_mac_addr); 1058 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 1059 1060 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 1061 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1062 1063 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1064 subcode, msg_data, ETH_ALEN * 2, 1065 true, NULL, 0); 1066 if (!status) 1067 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1068 1069 return status; 1070 } 1071 1072 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1073 const unsigned char *addr) 1074 { 1075 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1076 1077 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1078 HCLGE_MBX_MAC_VLAN_UC_ADD, 1079 addr, ETH_ALEN, false, NULL, 0); 1080 } 1081 1082 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1083 const unsigned char *addr) 1084 { 1085 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1086 1087 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1088 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1089 addr, ETH_ALEN, false, NULL, 0); 1090 } 1091 1092 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1093 const unsigned char *addr) 1094 { 1095 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1096 1097 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1098 HCLGE_MBX_MAC_VLAN_MC_ADD, 1099 addr, ETH_ALEN, false, NULL, 0); 1100 } 1101 1102 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1103 const unsigned char *addr) 1104 { 1105 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1106 1107 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1108 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1109 addr, ETH_ALEN, false, NULL, 0); 1110 } 1111 1112 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1113 __be16 proto, u16 vlan_id, 1114 bool is_kill) 1115 { 1116 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1117 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1118 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1119 1120 if (vlan_id > 4095) 1121 return -EINVAL; 1122 1123 if (proto != htons(ETH_P_8021Q)) 1124 return -EPROTONOSUPPORT; 1125 1126 msg_data[0] = is_kill; 1127 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1128 memcpy(&msg_data[3], &proto, sizeof(proto)); 1129 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1130 HCLGE_MBX_VLAN_FILTER, msg_data, 1131 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 1132 } 1133 1134 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1135 { 1136 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1137 u8 msg_data; 1138 1139 msg_data = enable ? 1 : 0; 1140 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1141 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1142 1, false, NULL, 0); 1143 } 1144 1145 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1146 { 1147 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1148 u8 msg_data[2]; 1149 int ret; 1150 1151 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 1152 1153 /* disable vf queue before send queue reset msg to PF */ 1154 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1155 if (ret) 1156 return ret; 1157 1158 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1159 2, true, NULL, 0); 1160 } 1161 1162 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1163 { 1164 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1165 1166 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu, 1167 sizeof(new_mtu), true, NULL, 0); 1168 } 1169 1170 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1171 enum hnae3_reset_notify_type type) 1172 { 1173 struct hnae3_client *client = hdev->nic_client; 1174 struct hnae3_handle *handle = &hdev->nic; 1175 int ret; 1176 1177 if (!client->ops->reset_notify) 1178 return -EOPNOTSUPP; 1179 1180 ret = client->ops->reset_notify(handle, type); 1181 if (ret) 1182 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1183 type, ret); 1184 1185 return ret; 1186 } 1187 1188 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 1189 { 1190 struct hclgevf_dev *hdev = ae_dev->priv; 1191 1192 set_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1193 } 1194 1195 static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev, 1196 unsigned long delay_us, 1197 unsigned long wait_cnt) 1198 { 1199 unsigned long cnt = 0; 1200 1201 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && 1202 cnt++ < wait_cnt) 1203 usleep_range(delay_us, delay_us * 2); 1204 1205 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { 1206 dev_err(&hdev->pdev->dev, 1207 "flr wait timeout\n"); 1208 return -ETIMEDOUT; 1209 } 1210 1211 return 0; 1212 } 1213 1214 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1215 { 1216 #define HCLGEVF_RESET_WAIT_US 20000 1217 #define HCLGEVF_RESET_WAIT_CNT 2000 1218 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1219 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1220 1221 u32 val; 1222 int ret; 1223 1224 /* wait to check the hardware reset completion status */ 1225 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1226 dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val); 1227 1228 if (hdev->reset_type == HNAE3_FLR_RESET) 1229 return hclgevf_flr_poll_timeout(hdev, 1230 HCLGEVF_RESET_WAIT_US, 1231 HCLGEVF_RESET_WAIT_CNT); 1232 1233 ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val, 1234 !(val & HCLGEVF_RST_ING_BITS), 1235 HCLGEVF_RESET_WAIT_US, 1236 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1237 1238 /* hardware completion status should be available by this time */ 1239 if (ret) { 1240 dev_err(&hdev->pdev->dev, 1241 "could'nt get reset done status from h/w, timeout!\n"); 1242 return ret; 1243 } 1244 1245 /* we will wait a bit more to let reset of the stack to complete. This 1246 * might happen in case reset assertion was made by PF. Yes, this also 1247 * means we might end up waiting bit more even for VF reset. 1248 */ 1249 msleep(5000); 1250 1251 return 0; 1252 } 1253 1254 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1255 { 1256 int ret; 1257 1258 /* uninitialize the nic client */ 1259 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1260 if (ret) 1261 return ret; 1262 1263 /* re-initialize the hclge device */ 1264 ret = hclgevf_reset_hdev(hdev); 1265 if (ret) { 1266 dev_err(&hdev->pdev->dev, 1267 "hclge device re-init failed, VF is disabled!\n"); 1268 return ret; 1269 } 1270 1271 /* bring up the nic client again */ 1272 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1273 if (ret) 1274 return ret; 1275 1276 return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); 1277 } 1278 1279 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1280 { 1281 int ret = 0; 1282 1283 switch (hdev->reset_type) { 1284 case HNAE3_VF_FUNC_RESET: 1285 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1286 0, true, NULL, sizeof(u8)); 1287 break; 1288 case HNAE3_FLR_RESET: 1289 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1290 break; 1291 default: 1292 break; 1293 } 1294 1295 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1296 1297 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1298 hdev->reset_type, ret); 1299 1300 return ret; 1301 } 1302 1303 static int hclgevf_reset(struct hclgevf_dev *hdev) 1304 { 1305 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1306 int ret; 1307 1308 /* Initialize ae_dev reset status as well, in case enet layer wants to 1309 * know if device is undergoing reset 1310 */ 1311 ae_dev->reset_type = hdev->reset_type; 1312 hdev->reset_count++; 1313 rtnl_lock(); 1314 1315 /* bring down the nic to stop any ongoing TX/RX */ 1316 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1317 if (ret) 1318 goto err_reset_lock; 1319 1320 rtnl_unlock(); 1321 1322 ret = hclgevf_reset_prepare_wait(hdev); 1323 if (ret) 1324 goto err_reset; 1325 1326 /* check if VF could successfully fetch the hardware reset completion 1327 * status from the hardware 1328 */ 1329 ret = hclgevf_reset_wait(hdev); 1330 if (ret) { 1331 /* can't do much in this situation, will disable VF */ 1332 dev_err(&hdev->pdev->dev, 1333 "VF failed(=%d) to fetch H/W reset completion status\n", 1334 ret); 1335 goto err_reset; 1336 } 1337 1338 rtnl_lock(); 1339 1340 /* now, re-initialize the nic client and ae device*/ 1341 ret = hclgevf_reset_stack(hdev); 1342 if (ret) { 1343 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1344 goto err_reset_lock; 1345 } 1346 1347 /* bring up the nic to enable TX/RX again */ 1348 ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1349 if (ret) 1350 goto err_reset_lock; 1351 1352 rtnl_unlock(); 1353 1354 hdev->last_reset_time = jiffies; 1355 ae_dev->reset_type = HNAE3_NONE_RESET; 1356 1357 return ret; 1358 err_reset_lock: 1359 rtnl_unlock(); 1360 err_reset: 1361 /* When VF reset failed, only the higher level reset asserted by PF 1362 * can restore it, so re-initialize the command queue to receive 1363 * this higher reset event. 1364 */ 1365 hclgevf_cmd_init(hdev); 1366 dev_err(&hdev->pdev->dev, "failed to reset VF\n"); 1367 1368 return ret; 1369 } 1370 1371 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1372 unsigned long *addr) 1373 { 1374 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1375 1376 /* return the highest priority reset level amongst all */ 1377 if (test_bit(HNAE3_VF_RESET, addr)) { 1378 rst_level = HNAE3_VF_RESET; 1379 clear_bit(HNAE3_VF_RESET, addr); 1380 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1381 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1382 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1383 rst_level = HNAE3_VF_FULL_RESET; 1384 clear_bit(HNAE3_VF_FULL_RESET, addr); 1385 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1386 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1387 rst_level = HNAE3_VF_PF_FUNC_RESET; 1388 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1389 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1390 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1391 rst_level = HNAE3_VF_FUNC_RESET; 1392 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1393 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1394 rst_level = HNAE3_FLR_RESET; 1395 clear_bit(HNAE3_FLR_RESET, addr); 1396 } 1397 1398 return rst_level; 1399 } 1400 1401 static void hclgevf_reset_event(struct pci_dev *pdev, 1402 struct hnae3_handle *handle) 1403 { 1404 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1405 struct hclgevf_dev *hdev = ae_dev->priv; 1406 1407 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1408 1409 if (hdev->default_reset_request) 1410 hdev->reset_level = 1411 hclgevf_get_reset_level(hdev, 1412 &hdev->default_reset_request); 1413 else 1414 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1415 1416 /* reset of this VF requested */ 1417 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1418 hclgevf_reset_task_schedule(hdev); 1419 1420 hdev->last_reset_time = jiffies; 1421 } 1422 1423 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1424 enum hnae3_reset_type rst_type) 1425 { 1426 struct hclgevf_dev *hdev = ae_dev->priv; 1427 1428 set_bit(rst_type, &hdev->default_reset_request); 1429 } 1430 1431 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1432 { 1433 #define HCLGEVF_FLR_WAIT_MS 100 1434 #define HCLGEVF_FLR_WAIT_CNT 50 1435 struct hclgevf_dev *hdev = ae_dev->priv; 1436 int cnt = 0; 1437 1438 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1439 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1440 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); 1441 hclgevf_reset_event(hdev->pdev, NULL); 1442 1443 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && 1444 cnt++ < HCLGEVF_FLR_WAIT_CNT) 1445 msleep(HCLGEVF_FLR_WAIT_MS); 1446 1447 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) 1448 dev_err(&hdev->pdev->dev, 1449 "flr wait down timeout: %d\n", cnt); 1450 } 1451 1452 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1453 { 1454 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1455 1456 return hdev->fw_version; 1457 } 1458 1459 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1460 { 1461 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1462 1463 vector->vector_irq = pci_irq_vector(hdev->pdev, 1464 HCLGEVF_MISC_VECTOR_NUM); 1465 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1466 /* vector status always valid for Vector 0 */ 1467 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1468 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1469 1470 hdev->num_msi_left -= 1; 1471 hdev->num_msi_used += 1; 1472 } 1473 1474 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1475 { 1476 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1477 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1478 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1479 schedule_work(&hdev->rst_service_task); 1480 } 1481 } 1482 1483 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1484 { 1485 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1486 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1487 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1488 schedule_work(&hdev->mbx_service_task); 1489 } 1490 } 1491 1492 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1493 { 1494 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1495 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1496 schedule_work(&hdev->service_task); 1497 } 1498 1499 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1500 { 1501 /* if we have any pending mailbox event then schedule the mbx task */ 1502 if (hdev->mbx_event_pending) 1503 hclgevf_mbx_task_schedule(hdev); 1504 1505 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1506 hclgevf_reset_task_schedule(hdev); 1507 } 1508 1509 static void hclgevf_service_timer(struct timer_list *t) 1510 { 1511 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1512 1513 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1514 1515 hclgevf_task_schedule(hdev); 1516 } 1517 1518 static void hclgevf_reset_service_task(struct work_struct *work) 1519 { 1520 struct hclgevf_dev *hdev = 1521 container_of(work, struct hclgevf_dev, rst_service_task); 1522 int ret; 1523 1524 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1525 return; 1526 1527 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1528 1529 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1530 &hdev->reset_state)) { 1531 /* PF has initmated that it is about to reset the hardware. 1532 * We now have to poll & check if harware has actually completed 1533 * the reset sequence. On hardware reset completion, VF needs to 1534 * reset the client and ae device. 1535 */ 1536 hdev->reset_attempts = 0; 1537 1538 hdev->last_reset_time = jiffies; 1539 while ((hdev->reset_type = 1540 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 1541 != HNAE3_NONE_RESET) { 1542 ret = hclgevf_reset(hdev); 1543 if (ret) 1544 dev_err(&hdev->pdev->dev, 1545 "VF stack reset failed %d.\n", ret); 1546 } 1547 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1548 &hdev->reset_state)) { 1549 /* we could be here when either of below happens: 1550 * 1. reset was initiated due to watchdog timeout due to 1551 * a. IMP was earlier reset and our TX got choked down and 1552 * which resulted in watchdog reacting and inducing VF 1553 * reset. This also means our cmdq would be unreliable. 1554 * b. problem in TX due to other lower layer(example link 1555 * layer not functioning properly etc.) 1556 * 2. VF reset might have been initiated due to some config 1557 * change. 1558 * 1559 * NOTE: Theres no clear way to detect above cases than to react 1560 * to the response of PF for this reset request. PF will ack the 1561 * 1b and 2. cases but we will not get any intimation about 1a 1562 * from PF as cmdq would be in unreliable state i.e. mailbox 1563 * communication between PF and VF would be broken. 1564 */ 1565 1566 /* if we are never geting into pending state it means either: 1567 * 1. PF is not receiving our request which could be due to IMP 1568 * reset 1569 * 2. PF is screwed 1570 * We cannot do much for 2. but to check first we can try reset 1571 * our PCIe + stack and see if it alleviates the problem. 1572 */ 1573 if (hdev->reset_attempts > 3) { 1574 /* prepare for full reset of stack + pcie interface */ 1575 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1576 1577 /* "defer" schedule the reset task again */ 1578 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1579 } else { 1580 hdev->reset_attempts++; 1581 1582 set_bit(hdev->reset_level, &hdev->reset_pending); 1583 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1584 } 1585 hclgevf_reset_task_schedule(hdev); 1586 } 1587 1588 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1589 } 1590 1591 static void hclgevf_mailbox_service_task(struct work_struct *work) 1592 { 1593 struct hclgevf_dev *hdev; 1594 1595 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1596 1597 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1598 return; 1599 1600 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1601 1602 hclgevf_mbx_async_handler(hdev); 1603 1604 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1605 } 1606 1607 static void hclgevf_keep_alive_timer(struct timer_list *t) 1608 { 1609 struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); 1610 1611 schedule_work(&hdev->keep_alive_task); 1612 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 1613 } 1614 1615 static void hclgevf_keep_alive_task(struct work_struct *work) 1616 { 1617 struct hclgevf_dev *hdev; 1618 u8 respmsg; 1619 int ret; 1620 1621 hdev = container_of(work, struct hclgevf_dev, keep_alive_task); 1622 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, 1623 0, false, &respmsg, sizeof(u8)); 1624 if (ret) 1625 dev_err(&hdev->pdev->dev, 1626 "VF sends keep alive cmd failed(=%d)\n", ret); 1627 } 1628 1629 static void hclgevf_service_task(struct work_struct *work) 1630 { 1631 struct hclgevf_dev *hdev; 1632 1633 hdev = container_of(work, struct hclgevf_dev, service_task); 1634 1635 /* request the link status from the PF. PF would be able to tell VF 1636 * about such updates in future so we might remove this later 1637 */ 1638 hclgevf_request_link_info(hdev); 1639 1640 hclgevf_deferred_task_schedule(hdev); 1641 1642 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1643 } 1644 1645 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1646 { 1647 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1648 } 1649 1650 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1651 u32 *clearval) 1652 { 1653 u32 cmdq_src_reg, rst_ing_reg; 1654 1655 /* fetch the events from their corresponding regs */ 1656 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1657 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1658 1659 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) { 1660 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1661 dev_info(&hdev->pdev->dev, 1662 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1663 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1664 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1665 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1666 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); 1667 *clearval = cmdq_src_reg; 1668 return HCLGEVF_VECTOR0_EVENT_RST; 1669 } 1670 1671 /* check for vector0 mailbox(=CMDQ RX) event source */ 1672 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1673 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1674 *clearval = cmdq_src_reg; 1675 return HCLGEVF_VECTOR0_EVENT_MBX; 1676 } 1677 1678 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1679 1680 return HCLGEVF_VECTOR0_EVENT_OTHER; 1681 } 1682 1683 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1684 { 1685 writel(en ? 1 : 0, vector->addr); 1686 } 1687 1688 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1689 { 1690 enum hclgevf_evt_cause event_cause; 1691 struct hclgevf_dev *hdev = data; 1692 u32 clearval; 1693 1694 hclgevf_enable_vector(&hdev->misc_vector, false); 1695 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 1696 1697 switch (event_cause) { 1698 case HCLGEVF_VECTOR0_EVENT_RST: 1699 hclgevf_reset_task_schedule(hdev); 1700 break; 1701 case HCLGEVF_VECTOR0_EVENT_MBX: 1702 hclgevf_mbx_handler(hdev); 1703 break; 1704 default: 1705 break; 1706 } 1707 1708 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 1709 hclgevf_clear_event_cause(hdev, clearval); 1710 hclgevf_enable_vector(&hdev->misc_vector, true); 1711 } 1712 1713 return IRQ_HANDLED; 1714 } 1715 1716 static int hclgevf_configure(struct hclgevf_dev *hdev) 1717 { 1718 int ret; 1719 1720 hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE; 1721 1722 /* get queue configuration from PF */ 1723 ret = hclgevf_get_queue_info(hdev); 1724 if (ret) 1725 return ret; 1726 /* get tc configuration from PF */ 1727 return hclgevf_get_tc_info(hdev); 1728 } 1729 1730 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1731 { 1732 struct pci_dev *pdev = ae_dev->pdev; 1733 struct hclgevf_dev *hdev; 1734 1735 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1736 if (!hdev) 1737 return -ENOMEM; 1738 1739 hdev->pdev = pdev; 1740 hdev->ae_dev = ae_dev; 1741 ae_dev->priv = hdev; 1742 1743 return 0; 1744 } 1745 1746 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1747 { 1748 struct hnae3_handle *roce = &hdev->roce; 1749 struct hnae3_handle *nic = &hdev->nic; 1750 1751 roce->rinfo.num_vectors = hdev->num_roce_msix; 1752 1753 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1754 hdev->num_msi_left == 0) 1755 return -EINVAL; 1756 1757 roce->rinfo.base_vector = hdev->roce_base_vector; 1758 1759 roce->rinfo.netdev = nic->kinfo.netdev; 1760 roce->rinfo.roce_io_base = hdev->hw.io_base; 1761 1762 roce->pdev = nic->pdev; 1763 roce->ae_algo = nic->ae_algo; 1764 roce->numa_node_mask = nic->numa_node_mask; 1765 1766 return 0; 1767 } 1768 1769 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 1770 { 1771 struct hclgevf_cfg_gro_status_cmd *req; 1772 struct hclgevf_desc desc; 1773 int ret; 1774 1775 if (!hnae3_dev_gro_supported(hdev)) 1776 return 0; 1777 1778 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 1779 false); 1780 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 1781 1782 req->gro_en = cpu_to_le16(en ? 1 : 0); 1783 1784 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1785 if (ret) 1786 dev_err(&hdev->pdev->dev, 1787 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 1788 1789 return ret; 1790 } 1791 1792 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1793 { 1794 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1795 int i, ret; 1796 1797 rss_cfg->rss_size = hdev->rss_size_max; 1798 1799 if (hdev->pdev->revision >= 0x21) { 1800 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 1801 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 1802 HCLGEVF_RSS_KEY_SIZE); 1803 1804 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 1805 rss_cfg->rss_hash_key); 1806 if (ret) 1807 return ret; 1808 1809 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 1810 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1811 rss_cfg->rss_tuple_sets.ipv4_udp_en = 1812 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1813 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 1814 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1815 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 1816 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1817 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 1818 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1819 rss_cfg->rss_tuple_sets.ipv6_udp_en = 1820 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1821 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 1822 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1823 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 1824 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1825 1826 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 1827 if (ret) 1828 return ret; 1829 1830 } 1831 1832 /* Initialize RSS indirect table for each vport */ 1833 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1834 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1835 1836 ret = hclgevf_set_rss_indir_table(hdev); 1837 if (ret) 1838 return ret; 1839 1840 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1841 } 1842 1843 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1844 { 1845 /* other vlan config(like, VLAN TX/RX offload) would also be added 1846 * here later 1847 */ 1848 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1849 false); 1850 } 1851 1852 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 1853 { 1854 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1855 1856 if (enable) { 1857 mod_timer(&hdev->service_timer, jiffies + HZ); 1858 } else { 1859 del_timer_sync(&hdev->service_timer); 1860 cancel_work_sync(&hdev->service_task); 1861 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1862 } 1863 } 1864 1865 static int hclgevf_ae_start(struct hnae3_handle *handle) 1866 { 1867 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1868 1869 /* reset tqp stats */ 1870 hclgevf_reset_tqp_stats(handle); 1871 1872 hclgevf_request_link_info(hdev); 1873 1874 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1875 1876 return 0; 1877 } 1878 1879 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1880 { 1881 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1882 int i; 1883 1884 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1885 1886 for (i = 0; i < handle->kinfo.num_tqps; i++) 1887 hclgevf_reset_tqp(handle, i); 1888 1889 /* reset tqp stats */ 1890 hclgevf_reset_tqp_stats(handle); 1891 hclgevf_update_link_status(hdev, 0); 1892 } 1893 1894 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 1895 { 1896 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1897 u8 msg_data; 1898 1899 msg_data = alive ? 1 : 0; 1900 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE, 1901 0, &msg_data, 1, false, NULL, 0); 1902 } 1903 1904 static int hclgevf_client_start(struct hnae3_handle *handle) 1905 { 1906 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1907 1908 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 1909 return hclgevf_set_alive(handle, true); 1910 } 1911 1912 static void hclgevf_client_stop(struct hnae3_handle *handle) 1913 { 1914 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1915 int ret; 1916 1917 ret = hclgevf_set_alive(handle, false); 1918 if (ret) 1919 dev_warn(&hdev->pdev->dev, 1920 "%s failed %d\n", __func__, ret); 1921 1922 del_timer_sync(&hdev->keep_alive_timer); 1923 cancel_work_sync(&hdev->keep_alive_task); 1924 } 1925 1926 static void hclgevf_state_init(struct hclgevf_dev *hdev) 1927 { 1928 /* setup tasks for the MBX */ 1929 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1930 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1931 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1932 1933 /* setup tasks for service timer */ 1934 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1935 1936 INIT_WORK(&hdev->service_task, hclgevf_service_task); 1937 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1938 1939 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 1940 1941 mutex_init(&hdev->mbx_resp.mbx_mutex); 1942 1943 /* bring the device down */ 1944 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1945 } 1946 1947 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1948 { 1949 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1950 1951 if (hdev->service_timer.function) 1952 del_timer_sync(&hdev->service_timer); 1953 if (hdev->service_task.func) 1954 cancel_work_sync(&hdev->service_task); 1955 if (hdev->mbx_service_task.func) 1956 cancel_work_sync(&hdev->mbx_service_task); 1957 if (hdev->rst_service_task.func) 1958 cancel_work_sync(&hdev->rst_service_task); 1959 1960 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1961 } 1962 1963 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1964 { 1965 struct pci_dev *pdev = hdev->pdev; 1966 int vectors; 1967 int i; 1968 1969 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 1970 vectors = pci_alloc_irq_vectors(pdev, 1971 hdev->roce_base_msix_offset + 1, 1972 hdev->num_msi, 1973 PCI_IRQ_MSIX); 1974 else 1975 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1976 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1977 1978 if (vectors < 0) { 1979 dev_err(&pdev->dev, 1980 "failed(%d) to allocate MSI/MSI-X vectors\n", 1981 vectors); 1982 return vectors; 1983 } 1984 if (vectors < hdev->num_msi) 1985 dev_warn(&hdev->pdev->dev, 1986 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1987 hdev->num_msi, vectors); 1988 1989 hdev->num_msi = vectors; 1990 hdev->num_msi_left = vectors; 1991 hdev->base_msi_vector = pdev->irq; 1992 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 1993 1994 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1995 sizeof(u16), GFP_KERNEL); 1996 if (!hdev->vector_status) { 1997 pci_free_irq_vectors(pdev); 1998 return -ENOMEM; 1999 } 2000 2001 for (i = 0; i < hdev->num_msi; i++) 2002 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2003 2004 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2005 sizeof(int), GFP_KERNEL); 2006 if (!hdev->vector_irq) { 2007 devm_kfree(&pdev->dev, hdev->vector_status); 2008 pci_free_irq_vectors(pdev); 2009 return -ENOMEM; 2010 } 2011 2012 return 0; 2013 } 2014 2015 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2016 { 2017 struct pci_dev *pdev = hdev->pdev; 2018 2019 devm_kfree(&pdev->dev, hdev->vector_status); 2020 devm_kfree(&pdev->dev, hdev->vector_irq); 2021 pci_free_irq_vectors(pdev); 2022 } 2023 2024 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2025 { 2026 int ret = 0; 2027 2028 hclgevf_get_misc_vector(hdev); 2029 2030 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2031 0, "hclgevf_cmd", hdev); 2032 if (ret) { 2033 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2034 hdev->misc_vector.vector_irq); 2035 return ret; 2036 } 2037 2038 hclgevf_clear_event_cause(hdev, 0); 2039 2040 /* enable misc. vector(vector 0) */ 2041 hclgevf_enable_vector(&hdev->misc_vector, true); 2042 2043 return ret; 2044 } 2045 2046 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2047 { 2048 /* disable misc vector(vector 0) */ 2049 hclgevf_enable_vector(&hdev->misc_vector, false); 2050 synchronize_irq(hdev->misc_vector.vector_irq); 2051 free_irq(hdev->misc_vector.vector_irq, hdev); 2052 hclgevf_free_vector(hdev, 0); 2053 } 2054 2055 static int hclgevf_init_client_instance(struct hnae3_client *client, 2056 struct hnae3_ae_dev *ae_dev) 2057 { 2058 struct hclgevf_dev *hdev = ae_dev->priv; 2059 int ret; 2060 2061 switch (client->type) { 2062 case HNAE3_CLIENT_KNIC: 2063 hdev->nic_client = client; 2064 hdev->nic.client = client; 2065 2066 ret = client->ops->init_instance(&hdev->nic); 2067 if (ret) 2068 goto clear_nic; 2069 2070 hnae3_set_client_init_flag(client, ae_dev, 1); 2071 2072 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 2073 struct hnae3_client *rc = hdev->roce_client; 2074 2075 ret = hclgevf_init_roce_base_info(hdev); 2076 if (ret) 2077 goto clear_roce; 2078 ret = rc->ops->init_instance(&hdev->roce); 2079 if (ret) 2080 goto clear_roce; 2081 2082 hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 2083 1); 2084 } 2085 break; 2086 case HNAE3_CLIENT_UNIC: 2087 hdev->nic_client = client; 2088 hdev->nic.client = client; 2089 2090 ret = client->ops->init_instance(&hdev->nic); 2091 if (ret) 2092 goto clear_nic; 2093 2094 hnae3_set_client_init_flag(client, ae_dev, 1); 2095 break; 2096 case HNAE3_CLIENT_ROCE: 2097 if (hnae3_dev_roce_supported(hdev)) { 2098 hdev->roce_client = client; 2099 hdev->roce.client = client; 2100 } 2101 2102 if (hdev->roce_client && hdev->nic_client) { 2103 ret = hclgevf_init_roce_base_info(hdev); 2104 if (ret) 2105 goto clear_roce; 2106 2107 ret = client->ops->init_instance(&hdev->roce); 2108 if (ret) 2109 goto clear_roce; 2110 } 2111 2112 hnae3_set_client_init_flag(client, ae_dev, 1); 2113 break; 2114 default: 2115 return -EINVAL; 2116 } 2117 2118 return 0; 2119 2120 clear_nic: 2121 hdev->nic_client = NULL; 2122 hdev->nic.client = NULL; 2123 return ret; 2124 clear_roce: 2125 hdev->roce_client = NULL; 2126 hdev->roce.client = NULL; 2127 return ret; 2128 } 2129 2130 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2131 struct hnae3_ae_dev *ae_dev) 2132 { 2133 struct hclgevf_dev *hdev = ae_dev->priv; 2134 2135 /* un-init roce, if it exists */ 2136 if (hdev->roce_client) { 2137 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2138 hdev->roce_client = NULL; 2139 hdev->roce.client = NULL; 2140 } 2141 2142 /* un-init nic/unic, if this was not called by roce client */ 2143 if (client->ops->uninit_instance && hdev->nic_client && 2144 client->type != HNAE3_CLIENT_ROCE) { 2145 client->ops->uninit_instance(&hdev->nic, 0); 2146 hdev->nic_client = NULL; 2147 hdev->nic.client = NULL; 2148 } 2149 } 2150 2151 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2152 { 2153 struct pci_dev *pdev = hdev->pdev; 2154 struct hclgevf_hw *hw; 2155 int ret; 2156 2157 ret = pci_enable_device(pdev); 2158 if (ret) { 2159 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2160 return ret; 2161 } 2162 2163 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2164 if (ret) { 2165 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2166 goto err_disable_device; 2167 } 2168 2169 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2170 if (ret) { 2171 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2172 goto err_disable_device; 2173 } 2174 2175 pci_set_master(pdev); 2176 hw = &hdev->hw; 2177 hw->hdev = hdev; 2178 hw->io_base = pci_iomap(pdev, 2, 0); 2179 if (!hw->io_base) { 2180 dev_err(&pdev->dev, "can't map configuration register space\n"); 2181 ret = -ENOMEM; 2182 goto err_clr_master; 2183 } 2184 2185 return 0; 2186 2187 err_clr_master: 2188 pci_clear_master(pdev); 2189 pci_release_regions(pdev); 2190 err_disable_device: 2191 pci_disable_device(pdev); 2192 2193 return ret; 2194 } 2195 2196 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2197 { 2198 struct pci_dev *pdev = hdev->pdev; 2199 2200 pci_iounmap(pdev, hdev->hw.io_base); 2201 pci_clear_master(pdev); 2202 pci_release_regions(pdev); 2203 pci_disable_device(pdev); 2204 } 2205 2206 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2207 { 2208 struct hclgevf_query_res_cmd *req; 2209 struct hclgevf_desc desc; 2210 int ret; 2211 2212 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2213 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2214 if (ret) { 2215 dev_err(&hdev->pdev->dev, 2216 "query vf resource failed, ret = %d.\n", ret); 2217 return ret; 2218 } 2219 2220 req = (struct hclgevf_query_res_cmd *)desc.data; 2221 2222 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 2223 hdev->roce_base_msix_offset = 2224 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 2225 HCLGEVF_MSIX_OFT_ROCEE_M, 2226 HCLGEVF_MSIX_OFT_ROCEE_S); 2227 hdev->num_roce_msix = 2228 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2229 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2230 2231 /* VF should have NIC vectors and Roce vectors, NIC vectors 2232 * are queued before Roce vectors. The offset is fixed to 64. 2233 */ 2234 hdev->num_msi = hdev->num_roce_msix + 2235 hdev->roce_base_msix_offset; 2236 } else { 2237 hdev->num_msi = 2238 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2239 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2240 } 2241 2242 return 0; 2243 } 2244 2245 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2246 { 2247 struct pci_dev *pdev = hdev->pdev; 2248 int ret = 0; 2249 2250 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2251 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2252 hclgevf_misc_irq_uninit(hdev); 2253 hclgevf_uninit_msi(hdev); 2254 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2255 } 2256 2257 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2258 pci_set_master(pdev); 2259 ret = hclgevf_init_msi(hdev); 2260 if (ret) { 2261 dev_err(&pdev->dev, 2262 "failed(%d) to init MSI/MSI-X\n", ret); 2263 return ret; 2264 } 2265 2266 ret = hclgevf_misc_irq_init(hdev); 2267 if (ret) { 2268 hclgevf_uninit_msi(hdev); 2269 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2270 ret); 2271 return ret; 2272 } 2273 2274 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2275 } 2276 2277 return ret; 2278 } 2279 2280 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2281 { 2282 struct pci_dev *pdev = hdev->pdev; 2283 int ret; 2284 2285 ret = hclgevf_pci_reset(hdev); 2286 if (ret) { 2287 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2288 return ret; 2289 } 2290 2291 ret = hclgevf_cmd_init(hdev); 2292 if (ret) { 2293 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2294 return ret; 2295 } 2296 2297 ret = hclgevf_rss_init_hw(hdev); 2298 if (ret) { 2299 dev_err(&hdev->pdev->dev, 2300 "failed(%d) to initialize RSS\n", ret); 2301 return ret; 2302 } 2303 2304 ret = hclgevf_config_gro(hdev, true); 2305 if (ret) 2306 return ret; 2307 2308 ret = hclgevf_init_vlan_config(hdev); 2309 if (ret) { 2310 dev_err(&hdev->pdev->dev, 2311 "failed(%d) to initialize VLAN config\n", ret); 2312 return ret; 2313 } 2314 2315 dev_info(&hdev->pdev->dev, "Reset done\n"); 2316 2317 return 0; 2318 } 2319 2320 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2321 { 2322 struct pci_dev *pdev = hdev->pdev; 2323 int ret; 2324 2325 ret = hclgevf_pci_init(hdev); 2326 if (ret) { 2327 dev_err(&pdev->dev, "PCI initialization failed\n"); 2328 return ret; 2329 } 2330 2331 ret = hclgevf_cmd_queue_init(hdev); 2332 if (ret) { 2333 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); 2334 goto err_cmd_queue_init; 2335 } 2336 2337 ret = hclgevf_cmd_init(hdev); 2338 if (ret) 2339 goto err_cmd_init; 2340 2341 /* Get vf resource */ 2342 ret = hclgevf_query_vf_resource(hdev); 2343 if (ret) { 2344 dev_err(&hdev->pdev->dev, 2345 "Query vf status error, ret = %d.\n", ret); 2346 goto err_cmd_init; 2347 } 2348 2349 ret = hclgevf_init_msi(hdev); 2350 if (ret) { 2351 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2352 goto err_cmd_init; 2353 } 2354 2355 hclgevf_state_init(hdev); 2356 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2357 2358 ret = hclgevf_misc_irq_init(hdev); 2359 if (ret) { 2360 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2361 ret); 2362 goto err_misc_irq_init; 2363 } 2364 2365 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2366 2367 ret = hclgevf_configure(hdev); 2368 if (ret) { 2369 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2370 goto err_config; 2371 } 2372 2373 ret = hclgevf_alloc_tqps(hdev); 2374 if (ret) { 2375 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2376 goto err_config; 2377 } 2378 2379 ret = hclgevf_set_handle_info(hdev); 2380 if (ret) { 2381 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2382 goto err_config; 2383 } 2384 2385 ret = hclgevf_config_gro(hdev, true); 2386 if (ret) 2387 goto err_config; 2388 2389 /* vf is not allowed to enable unicast/multicast promisc mode. 2390 * For revision 0x20, default to disable broadcast promisc mode, 2391 * firmware makes sure broadcast packets can be accepted. 2392 * For revision 0x21, default to enable broadcast promisc mode. 2393 */ 2394 ret = hclgevf_set_promisc_mode(hdev, true); 2395 if (ret) 2396 goto err_config; 2397 2398 /* Initialize RSS for this VF */ 2399 ret = hclgevf_rss_init_hw(hdev); 2400 if (ret) { 2401 dev_err(&hdev->pdev->dev, 2402 "failed(%d) to initialize RSS\n", ret); 2403 goto err_config; 2404 } 2405 2406 ret = hclgevf_init_vlan_config(hdev); 2407 if (ret) { 2408 dev_err(&hdev->pdev->dev, 2409 "failed(%d) to initialize VLAN config\n", ret); 2410 goto err_config; 2411 } 2412 2413 hdev->last_reset_time = jiffies; 2414 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 2415 2416 return 0; 2417 2418 err_config: 2419 hclgevf_misc_irq_uninit(hdev); 2420 err_misc_irq_init: 2421 hclgevf_state_uninit(hdev); 2422 hclgevf_uninit_msi(hdev); 2423 err_cmd_init: 2424 hclgevf_cmd_uninit(hdev); 2425 err_cmd_queue_init: 2426 hclgevf_pci_uninit(hdev); 2427 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2428 return ret; 2429 } 2430 2431 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2432 { 2433 hclgevf_state_uninit(hdev); 2434 2435 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2436 hclgevf_misc_irq_uninit(hdev); 2437 hclgevf_uninit_msi(hdev); 2438 } 2439 2440 hclgevf_pci_uninit(hdev); 2441 hclgevf_cmd_uninit(hdev); 2442 } 2443 2444 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2445 { 2446 struct pci_dev *pdev = ae_dev->pdev; 2447 struct hclgevf_dev *hdev; 2448 int ret; 2449 2450 ret = hclgevf_alloc_hdev(ae_dev); 2451 if (ret) { 2452 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2453 return ret; 2454 } 2455 2456 ret = hclgevf_init_hdev(ae_dev->priv); 2457 if (ret) { 2458 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2459 return ret; 2460 } 2461 2462 hdev = ae_dev->priv; 2463 timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0); 2464 INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task); 2465 2466 return 0; 2467 } 2468 2469 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2470 { 2471 struct hclgevf_dev *hdev = ae_dev->priv; 2472 2473 hclgevf_uninit_hdev(hdev); 2474 ae_dev->priv = NULL; 2475 } 2476 2477 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2478 { 2479 struct hnae3_handle *nic = &hdev->nic; 2480 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2481 2482 return min_t(u32, hdev->rss_size_max, 2483 hdev->num_tqps / kinfo->num_tc); 2484 } 2485 2486 /** 2487 * hclgevf_get_channels - Get the current channels enabled and max supported. 2488 * @handle: hardware information for network interface 2489 * @ch: ethtool channels structure 2490 * 2491 * We don't support separate tx and rx queues as channels. The other count 2492 * represents how many queues are being used for control. max_combined counts 2493 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2494 * q_vectors since we support a lot more queue pairs than q_vectors. 2495 **/ 2496 static void hclgevf_get_channels(struct hnae3_handle *handle, 2497 struct ethtool_channels *ch) 2498 { 2499 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2500 2501 ch->max_combined = hclgevf_get_max_channels(hdev); 2502 ch->other_count = 0; 2503 ch->max_other = 0; 2504 ch->combined_count = handle->kinfo.rss_size; 2505 } 2506 2507 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2508 u16 *alloc_tqps, u16 *max_rss_size) 2509 { 2510 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2511 2512 *alloc_tqps = hdev->num_tqps; 2513 *max_rss_size = hdev->rss_size_max; 2514 } 2515 2516 static int hclgevf_get_status(struct hnae3_handle *handle) 2517 { 2518 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2519 2520 return hdev->hw.mac.link; 2521 } 2522 2523 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2524 u8 *auto_neg, u32 *speed, 2525 u8 *duplex) 2526 { 2527 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2528 2529 if (speed) 2530 *speed = hdev->hw.mac.speed; 2531 if (duplex) 2532 *duplex = hdev->hw.mac.duplex; 2533 if (auto_neg) 2534 *auto_neg = AUTONEG_DISABLE; 2535 } 2536 2537 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 2538 u8 duplex) 2539 { 2540 hdev->hw.mac.speed = speed; 2541 hdev->hw.mac.duplex = duplex; 2542 } 2543 2544 static int hclgevf_gro_en(struct hnae3_handle *handle, int enable) 2545 { 2546 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2547 2548 return hclgevf_config_gro(hdev, enable); 2549 } 2550 2551 static void hclgevf_get_media_type(struct hnae3_handle *handle, 2552 u8 *media_type) 2553 { 2554 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2555 if (media_type) 2556 *media_type = hdev->hw.mac.media_type; 2557 } 2558 2559 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 2560 { 2561 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2562 2563 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2564 } 2565 2566 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 2567 { 2568 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2569 2570 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2571 } 2572 2573 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 2574 { 2575 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2576 2577 return hdev->reset_count; 2578 } 2579 2580 #define MAX_SEPARATE_NUM 4 2581 #define SEPARATOR_VALUE 0xFFFFFFFF 2582 #define REG_NUM_PER_LINE 4 2583 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 2584 2585 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 2586 { 2587 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 2588 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2589 2590 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 2591 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 2592 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 2593 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 2594 2595 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 2596 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 2597 } 2598 2599 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 2600 void *data) 2601 { 2602 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2603 int i, j, reg_um, separator_num; 2604 u32 *reg = data; 2605 2606 *version = hdev->fw_version; 2607 2608 /* fetching per-VF registers values from VF PCIe register space */ 2609 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 2610 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2611 for (i = 0; i < reg_um; i++) 2612 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 2613 for (i = 0; i < separator_num; i++) 2614 *reg++ = SEPARATOR_VALUE; 2615 2616 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 2617 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2618 for (i = 0; i < reg_um; i++) 2619 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 2620 for (i = 0; i < separator_num; i++) 2621 *reg++ = SEPARATOR_VALUE; 2622 2623 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 2624 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2625 for (j = 0; j < hdev->num_tqps; j++) { 2626 for (i = 0; i < reg_um; i++) 2627 *reg++ = hclgevf_read_dev(&hdev->hw, 2628 ring_reg_addr_list[i] + 2629 0x200 * j); 2630 for (i = 0; i < separator_num; i++) 2631 *reg++ = SEPARATOR_VALUE; 2632 } 2633 2634 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 2635 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2636 for (j = 0; j < hdev->num_msi_used - 1; j++) { 2637 for (i = 0; i < reg_um; i++) 2638 *reg++ = hclgevf_read_dev(&hdev->hw, 2639 tqp_intr_reg_addr_list[i] + 2640 4 * j); 2641 for (i = 0; i < separator_num; i++) 2642 *reg++ = SEPARATOR_VALUE; 2643 } 2644 } 2645 2646 static const struct hnae3_ae_ops hclgevf_ops = { 2647 .init_ae_dev = hclgevf_init_ae_dev, 2648 .uninit_ae_dev = hclgevf_uninit_ae_dev, 2649 .flr_prepare = hclgevf_flr_prepare, 2650 .flr_done = hclgevf_flr_done, 2651 .init_client_instance = hclgevf_init_client_instance, 2652 .uninit_client_instance = hclgevf_uninit_client_instance, 2653 .start = hclgevf_ae_start, 2654 .stop = hclgevf_ae_stop, 2655 .client_start = hclgevf_client_start, 2656 .client_stop = hclgevf_client_stop, 2657 .map_ring_to_vector = hclgevf_map_ring_to_vector, 2658 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2659 .get_vector = hclgevf_get_vector, 2660 .put_vector = hclgevf_put_vector, 2661 .reset_queue = hclgevf_reset_tqp, 2662 .get_mac_addr = hclgevf_get_mac_addr, 2663 .set_mac_addr = hclgevf_set_mac_addr, 2664 .add_uc_addr = hclgevf_add_uc_addr, 2665 .rm_uc_addr = hclgevf_rm_uc_addr, 2666 .add_mc_addr = hclgevf_add_mc_addr, 2667 .rm_mc_addr = hclgevf_rm_mc_addr, 2668 .get_stats = hclgevf_get_stats, 2669 .update_stats = hclgevf_update_stats, 2670 .get_strings = hclgevf_get_strings, 2671 .get_sset_count = hclgevf_get_sset_count, 2672 .get_rss_key_size = hclgevf_get_rss_key_size, 2673 .get_rss_indir_size = hclgevf_get_rss_indir_size, 2674 .get_rss = hclgevf_get_rss, 2675 .set_rss = hclgevf_set_rss, 2676 .get_rss_tuple = hclgevf_get_rss_tuple, 2677 .set_rss_tuple = hclgevf_set_rss_tuple, 2678 .get_tc_size = hclgevf_get_tc_size, 2679 .get_fw_version = hclgevf_get_fw_version, 2680 .set_vlan_filter = hclgevf_set_vlan_filter, 2681 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 2682 .reset_event = hclgevf_reset_event, 2683 .set_default_reset_request = hclgevf_set_def_reset_request, 2684 .get_channels = hclgevf_get_channels, 2685 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2686 .get_regs_len = hclgevf_get_regs_len, 2687 .get_regs = hclgevf_get_regs, 2688 .get_status = hclgevf_get_status, 2689 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2690 .get_media_type = hclgevf_get_media_type, 2691 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 2692 .ae_dev_resetting = hclgevf_ae_dev_resetting, 2693 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 2694 .set_gro_en = hclgevf_gro_en, 2695 .set_mtu = hclgevf_set_mtu, 2696 .get_global_queue_id = hclgevf_get_qid_global, 2697 .set_timer_task = hclgevf_set_timer_task, 2698 }; 2699 2700 static struct hnae3_ae_algo ae_algovf = { 2701 .ops = &hclgevf_ops, 2702 .pdev_id_table = ae_algovf_pci_tbl, 2703 }; 2704 2705 static int hclgevf_init(void) 2706 { 2707 pr_info("%s is initializing\n", HCLGEVF_NAME); 2708 2709 hnae3_register_ae_algo(&ae_algovf); 2710 2711 return 0; 2712 } 2713 2714 static void hclgevf_exit(void) 2715 { 2716 hnae3_unregister_ae_algo(&ae_algovf); 2717 } 2718 module_init(hclgevf_init); 2719 module_exit(hclgevf_exit); 2720 2721 MODULE_LICENSE("GPL"); 2722 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2723 MODULE_DESCRIPTION("HCLGEVF Driver"); 2724 MODULE_VERSION(HCLGEVF_MOD_VERSION); 2725