1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 static const u8 hclgevf_hash_key[] = { 25 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 26 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 27 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 28 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 29 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 30 }; 31 32 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 33 34 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 35 HCLGEVF_CMDQ_TX_ADDR_H_REG, 36 HCLGEVF_CMDQ_TX_DEPTH_REG, 37 HCLGEVF_CMDQ_TX_TAIL_REG, 38 HCLGEVF_CMDQ_TX_HEAD_REG, 39 HCLGEVF_CMDQ_RX_ADDR_L_REG, 40 HCLGEVF_CMDQ_RX_ADDR_H_REG, 41 HCLGEVF_CMDQ_RX_DEPTH_REG, 42 HCLGEVF_CMDQ_RX_TAIL_REG, 43 HCLGEVF_CMDQ_RX_HEAD_REG, 44 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 45 HCLGEVF_CMDQ_INTR_STS_REG, 46 HCLGEVF_CMDQ_INTR_EN_REG, 47 HCLGEVF_CMDQ_INTR_GEN_REG}; 48 49 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 50 HCLGEVF_RST_ING, 51 HCLGEVF_GRO_EN_REG}; 52 53 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 54 HCLGEVF_RING_RX_ADDR_H_REG, 55 HCLGEVF_RING_RX_BD_NUM_REG, 56 HCLGEVF_RING_RX_BD_LENGTH_REG, 57 HCLGEVF_RING_RX_MERGE_EN_REG, 58 HCLGEVF_RING_RX_TAIL_REG, 59 HCLGEVF_RING_RX_HEAD_REG, 60 HCLGEVF_RING_RX_FBD_NUM_REG, 61 HCLGEVF_RING_RX_OFFSET_REG, 62 HCLGEVF_RING_RX_FBD_OFFSET_REG, 63 HCLGEVF_RING_RX_STASH_REG, 64 HCLGEVF_RING_RX_BD_ERR_REG, 65 HCLGEVF_RING_TX_ADDR_L_REG, 66 HCLGEVF_RING_TX_ADDR_H_REG, 67 HCLGEVF_RING_TX_BD_NUM_REG, 68 HCLGEVF_RING_TX_PRIORITY_REG, 69 HCLGEVF_RING_TX_TC_REG, 70 HCLGEVF_RING_TX_MERGE_EN_REG, 71 HCLGEVF_RING_TX_TAIL_REG, 72 HCLGEVF_RING_TX_HEAD_REG, 73 HCLGEVF_RING_TX_FBD_NUM_REG, 74 HCLGEVF_RING_TX_OFFSET_REG, 75 HCLGEVF_RING_TX_EBD_NUM_REG, 76 HCLGEVF_RING_TX_EBD_OFFSET_REG, 77 HCLGEVF_RING_TX_BD_ERR_REG, 78 HCLGEVF_RING_EN_REG}; 79 80 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 81 HCLGEVF_TQP_INTR_GL0_REG, 82 HCLGEVF_TQP_INTR_GL1_REG, 83 HCLGEVF_TQP_INTR_GL2_REG, 84 HCLGEVF_TQP_INTR_RL_REG}; 85 86 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 87 struct hnae3_handle *handle) 88 { 89 if (!handle->client) 90 return container_of(handle, struct hclgevf_dev, nic); 91 else if (handle->client->type == HNAE3_CLIENT_ROCE) 92 return container_of(handle, struct hclgevf_dev, roce); 93 else 94 return container_of(handle, struct hclgevf_dev, nic); 95 } 96 97 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 98 { 99 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 100 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 101 struct hclgevf_desc desc; 102 struct hclgevf_tqp *tqp; 103 int status; 104 int i; 105 106 for (i = 0; i < kinfo->num_tqps; i++) { 107 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 108 hclgevf_cmd_setup_basic_desc(&desc, 109 HCLGEVF_OPC_QUERY_RX_STATUS, 110 true); 111 112 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 113 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 114 if (status) { 115 dev_err(&hdev->pdev->dev, 116 "Query tqp stat fail, status = %d,queue = %d\n", 117 status, i); 118 return status; 119 } 120 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 121 le32_to_cpu(desc.data[1]); 122 123 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 124 true); 125 126 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 127 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 128 if (status) { 129 dev_err(&hdev->pdev->dev, 130 "Query tqp stat fail, status = %d,queue = %d\n", 131 status, i); 132 return status; 133 } 134 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 135 le32_to_cpu(desc.data[1]); 136 } 137 138 return 0; 139 } 140 141 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 142 { 143 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 144 struct hclgevf_tqp *tqp; 145 u64 *buff = data; 146 int i; 147 148 for (i = 0; i < kinfo->num_tqps; i++) { 149 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 150 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 151 } 152 for (i = 0; i < kinfo->num_tqps; i++) { 153 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 154 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 155 } 156 157 return buff; 158 } 159 160 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 161 { 162 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 163 164 return kinfo->num_tqps * 2; 165 } 166 167 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 168 { 169 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 170 u8 *buff = data; 171 int i = 0; 172 173 for (i = 0; i < kinfo->num_tqps; i++) { 174 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 175 struct hclgevf_tqp, q); 176 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 177 tqp->index); 178 buff += ETH_GSTRING_LEN; 179 } 180 181 for (i = 0; i < kinfo->num_tqps; i++) { 182 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 183 struct hclgevf_tqp, q); 184 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 185 tqp->index); 186 buff += ETH_GSTRING_LEN; 187 } 188 189 return buff; 190 } 191 192 static void hclgevf_update_stats(struct hnae3_handle *handle, 193 struct net_device_stats *net_stats) 194 { 195 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 196 int status; 197 198 status = hclgevf_tqps_update_stats(handle); 199 if (status) 200 dev_err(&hdev->pdev->dev, 201 "VF update of TQPS stats fail, status = %d.\n", 202 status); 203 } 204 205 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 206 { 207 if (strset == ETH_SS_TEST) 208 return -EOPNOTSUPP; 209 else if (strset == ETH_SS_STATS) 210 return hclgevf_tqps_get_sset_count(handle, strset); 211 212 return 0; 213 } 214 215 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 216 u8 *data) 217 { 218 u8 *p = (char *)data; 219 220 if (strset == ETH_SS_STATS) 221 p = hclgevf_tqps_get_strings(handle, p); 222 } 223 224 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 225 { 226 hclgevf_tqps_get_stats(handle, data); 227 } 228 229 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 230 { 231 u8 resp_msg; 232 int status; 233 234 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 235 true, &resp_msg, sizeof(u8)); 236 if (status) { 237 dev_err(&hdev->pdev->dev, 238 "VF request to get TC info from PF failed %d", 239 status); 240 return status; 241 } 242 243 hdev->hw_tc_map = resp_msg; 244 245 return 0; 246 } 247 248 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 249 { 250 #define HCLGEVF_TQPS_RSS_INFO_LEN 8 251 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 252 int status; 253 254 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 255 true, resp_msg, 256 HCLGEVF_TQPS_RSS_INFO_LEN); 257 if (status) { 258 dev_err(&hdev->pdev->dev, 259 "VF request to get tqp info from PF failed %d", 260 status); 261 return status; 262 } 263 264 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 265 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 266 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 267 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 268 269 return 0; 270 } 271 272 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 273 { 274 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 275 u8 msg_data[2], resp_data[2]; 276 u16 qid_in_pf = 0; 277 int ret; 278 279 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 280 281 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, 282 2, true, resp_data, 2); 283 if (!ret) 284 qid_in_pf = *(u16 *)resp_data; 285 286 return qid_in_pf; 287 } 288 289 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 290 { 291 struct hclgevf_tqp *tqp; 292 int i; 293 294 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 295 sizeof(struct hclgevf_tqp), GFP_KERNEL); 296 if (!hdev->htqp) 297 return -ENOMEM; 298 299 tqp = hdev->htqp; 300 301 for (i = 0; i < hdev->num_tqps; i++) { 302 tqp->dev = &hdev->pdev->dev; 303 tqp->index = i; 304 305 tqp->q.ae_algo = &ae_algovf; 306 tqp->q.buf_size = hdev->rx_buf_len; 307 tqp->q.desc_num = hdev->num_desc; 308 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 309 i * HCLGEVF_TQP_REG_SIZE; 310 311 tqp++; 312 } 313 314 return 0; 315 } 316 317 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 318 { 319 struct hnae3_handle *nic = &hdev->nic; 320 struct hnae3_knic_private_info *kinfo; 321 u16 new_tqps = hdev->num_tqps; 322 int i; 323 324 kinfo = &nic->kinfo; 325 kinfo->num_tc = 0; 326 kinfo->num_desc = hdev->num_desc; 327 kinfo->rx_buf_len = hdev->rx_buf_len; 328 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 329 if (hdev->hw_tc_map & BIT(i)) 330 kinfo->num_tc++; 331 332 kinfo->rss_size 333 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 334 new_tqps = kinfo->rss_size * kinfo->num_tc; 335 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 336 337 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 338 sizeof(struct hnae3_queue *), GFP_KERNEL); 339 if (!kinfo->tqp) 340 return -ENOMEM; 341 342 for (i = 0; i < kinfo->num_tqps; i++) { 343 hdev->htqp[i].q.handle = &hdev->nic; 344 hdev->htqp[i].q.tqp_index = i; 345 kinfo->tqp[i] = &hdev->htqp[i].q; 346 } 347 348 return 0; 349 } 350 351 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 352 { 353 int status; 354 u8 resp_msg; 355 356 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 357 0, false, &resp_msg, sizeof(u8)); 358 if (status) 359 dev_err(&hdev->pdev->dev, 360 "VF failed to fetch link status(%d) from PF", status); 361 } 362 363 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 364 { 365 struct hnae3_handle *rhandle = &hdev->roce; 366 struct hnae3_handle *handle = &hdev->nic; 367 struct hnae3_client *rclient; 368 struct hnae3_client *client; 369 370 client = handle->client; 371 rclient = hdev->roce_client; 372 373 link_state = 374 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 375 376 if (link_state != hdev->hw.mac.link) { 377 client->ops->link_status_change(handle, !!link_state); 378 if (rclient && rclient->ops->link_status_change) 379 rclient->ops->link_status_change(rhandle, !!link_state); 380 hdev->hw.mac.link = link_state; 381 } 382 } 383 384 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 385 { 386 struct hnae3_handle *nic = &hdev->nic; 387 int ret; 388 389 nic->ae_algo = &ae_algovf; 390 nic->pdev = hdev->pdev; 391 nic->numa_node_mask = hdev->numa_node_mask; 392 nic->flags |= HNAE3_SUPPORT_VF; 393 394 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 395 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 396 hdev->ae_dev->dev_type); 397 return -EINVAL; 398 } 399 400 ret = hclgevf_knic_setup(hdev); 401 if (ret) 402 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 403 ret); 404 return ret; 405 } 406 407 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 408 { 409 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 410 dev_warn(&hdev->pdev->dev, 411 "vector(vector_id %d) has been freed.\n", vector_id); 412 return; 413 } 414 415 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 416 hdev->num_msi_left += 1; 417 hdev->num_msi_used -= 1; 418 } 419 420 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 421 struct hnae3_vector_info *vector_info) 422 { 423 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 424 struct hnae3_vector_info *vector = vector_info; 425 int alloc = 0; 426 int i, j; 427 428 vector_num = min(hdev->num_msi_left, vector_num); 429 430 for (j = 0; j < vector_num; j++) { 431 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 432 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 433 vector->vector = pci_irq_vector(hdev->pdev, i); 434 vector->io_addr = hdev->hw.io_base + 435 HCLGEVF_VECTOR_REG_BASE + 436 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 437 hdev->vector_status[i] = 0; 438 hdev->vector_irq[i] = vector->vector; 439 440 vector++; 441 alloc++; 442 443 break; 444 } 445 } 446 } 447 hdev->num_msi_left -= alloc; 448 hdev->num_msi_used += alloc; 449 450 return alloc; 451 } 452 453 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 454 { 455 int i; 456 457 for (i = 0; i < hdev->num_msi; i++) 458 if (vector == hdev->vector_irq[i]) 459 return i; 460 461 return -EINVAL; 462 } 463 464 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 465 const u8 hfunc, const u8 *key) 466 { 467 struct hclgevf_rss_config_cmd *req; 468 struct hclgevf_desc desc; 469 int key_offset; 470 int key_size; 471 int ret; 472 473 req = (struct hclgevf_rss_config_cmd *)desc.data; 474 475 for (key_offset = 0; key_offset < 3; key_offset++) { 476 hclgevf_cmd_setup_basic_desc(&desc, 477 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 478 false); 479 480 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 481 req->hash_config |= 482 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 483 484 if (key_offset == 2) 485 key_size = 486 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 487 else 488 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 489 490 memcpy(req->hash_key, 491 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 492 493 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 494 if (ret) { 495 dev_err(&hdev->pdev->dev, 496 "Configure RSS config fail, status = %d\n", 497 ret); 498 return ret; 499 } 500 } 501 502 return 0; 503 } 504 505 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 506 { 507 return HCLGEVF_RSS_KEY_SIZE; 508 } 509 510 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 511 { 512 return HCLGEVF_RSS_IND_TBL_SIZE; 513 } 514 515 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 516 { 517 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 518 struct hclgevf_rss_indirection_table_cmd *req; 519 struct hclgevf_desc desc; 520 int status; 521 int i, j; 522 523 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 524 525 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 526 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 527 false); 528 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 529 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 530 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 531 req->rss_result[j] = 532 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 533 534 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 535 if (status) { 536 dev_err(&hdev->pdev->dev, 537 "VF failed(=%d) to set RSS indirection table\n", 538 status); 539 return status; 540 } 541 } 542 543 return 0; 544 } 545 546 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 547 { 548 struct hclgevf_rss_tc_mode_cmd *req; 549 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 550 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 551 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 552 struct hclgevf_desc desc; 553 u16 roundup_size; 554 int status; 555 int i; 556 557 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 558 559 roundup_size = roundup_pow_of_two(rss_size); 560 roundup_size = ilog2(roundup_size); 561 562 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 563 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 564 tc_size[i] = roundup_size; 565 tc_offset[i] = rss_size * i; 566 } 567 568 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 569 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 570 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 571 (tc_valid[i] & 0x1)); 572 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 573 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 574 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 575 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 576 } 577 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 578 if (status) 579 dev_err(&hdev->pdev->dev, 580 "VF failed(=%d) to set rss tc mode\n", status); 581 582 return status; 583 } 584 585 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 586 u8 *hfunc) 587 { 588 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 589 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 590 int i; 591 592 if (handle->pdev->revision >= 0x21) { 593 /* Get hash algorithm */ 594 if (hfunc) { 595 switch (rss_cfg->hash_algo) { 596 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 597 *hfunc = ETH_RSS_HASH_TOP; 598 break; 599 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 600 *hfunc = ETH_RSS_HASH_XOR; 601 break; 602 default: 603 *hfunc = ETH_RSS_HASH_UNKNOWN; 604 break; 605 } 606 } 607 608 /* Get the RSS Key required by the user */ 609 if (key) 610 memcpy(key, rss_cfg->rss_hash_key, 611 HCLGEVF_RSS_KEY_SIZE); 612 } 613 614 if (indir) 615 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 616 indir[i] = rss_cfg->rss_indirection_tbl[i]; 617 618 return 0; 619 } 620 621 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 622 const u8 *key, const u8 hfunc) 623 { 624 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 625 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 626 int ret, i; 627 628 if (handle->pdev->revision >= 0x21) { 629 /* Set the RSS Hash Key if specififed by the user */ 630 if (key) { 631 switch (hfunc) { 632 case ETH_RSS_HASH_TOP: 633 rss_cfg->hash_algo = 634 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 635 break; 636 case ETH_RSS_HASH_XOR: 637 rss_cfg->hash_algo = 638 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 639 break; 640 case ETH_RSS_HASH_NO_CHANGE: 641 break; 642 default: 643 return -EINVAL; 644 } 645 646 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 647 key); 648 if (ret) 649 return ret; 650 651 /* Update the shadow RSS key with user specified qids */ 652 memcpy(rss_cfg->rss_hash_key, key, 653 HCLGEVF_RSS_KEY_SIZE); 654 } 655 } 656 657 /* update the shadow RSS table with user specified qids */ 658 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 659 rss_cfg->rss_indirection_tbl[i] = indir[i]; 660 661 /* update the hardware */ 662 return hclgevf_set_rss_indir_table(hdev); 663 } 664 665 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 666 { 667 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 668 669 if (nfc->data & RXH_L4_B_2_3) 670 hash_sets |= HCLGEVF_D_PORT_BIT; 671 else 672 hash_sets &= ~HCLGEVF_D_PORT_BIT; 673 674 if (nfc->data & RXH_IP_SRC) 675 hash_sets |= HCLGEVF_S_IP_BIT; 676 else 677 hash_sets &= ~HCLGEVF_S_IP_BIT; 678 679 if (nfc->data & RXH_IP_DST) 680 hash_sets |= HCLGEVF_D_IP_BIT; 681 else 682 hash_sets &= ~HCLGEVF_D_IP_BIT; 683 684 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 685 hash_sets |= HCLGEVF_V_TAG_BIT; 686 687 return hash_sets; 688 } 689 690 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 691 struct ethtool_rxnfc *nfc) 692 { 693 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 694 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 695 struct hclgevf_rss_input_tuple_cmd *req; 696 struct hclgevf_desc desc; 697 u8 tuple_sets; 698 int ret; 699 700 if (handle->pdev->revision == 0x20) 701 return -EOPNOTSUPP; 702 703 if (nfc->data & 704 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 705 return -EINVAL; 706 707 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 708 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 709 710 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 711 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 712 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 713 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 714 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 715 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 716 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 717 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 718 719 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 720 switch (nfc->flow_type) { 721 case TCP_V4_FLOW: 722 req->ipv4_tcp_en = tuple_sets; 723 break; 724 case TCP_V6_FLOW: 725 req->ipv6_tcp_en = tuple_sets; 726 break; 727 case UDP_V4_FLOW: 728 req->ipv4_udp_en = tuple_sets; 729 break; 730 case UDP_V6_FLOW: 731 req->ipv6_udp_en = tuple_sets; 732 break; 733 case SCTP_V4_FLOW: 734 req->ipv4_sctp_en = tuple_sets; 735 break; 736 case SCTP_V6_FLOW: 737 if ((nfc->data & RXH_L4_B_0_1) || 738 (nfc->data & RXH_L4_B_2_3)) 739 return -EINVAL; 740 741 req->ipv6_sctp_en = tuple_sets; 742 break; 743 case IPV4_FLOW: 744 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 745 break; 746 case IPV6_FLOW: 747 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 748 break; 749 default: 750 return -EINVAL; 751 } 752 753 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 754 if (ret) { 755 dev_err(&hdev->pdev->dev, 756 "Set rss tuple fail, status = %d\n", ret); 757 return ret; 758 } 759 760 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 761 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 762 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 763 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 764 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 765 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 766 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 767 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 768 return 0; 769 } 770 771 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 772 struct ethtool_rxnfc *nfc) 773 { 774 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 775 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 776 u8 tuple_sets; 777 778 if (handle->pdev->revision == 0x20) 779 return -EOPNOTSUPP; 780 781 nfc->data = 0; 782 783 switch (nfc->flow_type) { 784 case TCP_V4_FLOW: 785 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 786 break; 787 case UDP_V4_FLOW: 788 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 789 break; 790 case TCP_V6_FLOW: 791 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 792 break; 793 case UDP_V6_FLOW: 794 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 795 break; 796 case SCTP_V4_FLOW: 797 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 798 break; 799 case SCTP_V6_FLOW: 800 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 801 break; 802 case IPV4_FLOW: 803 case IPV6_FLOW: 804 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 805 break; 806 default: 807 return -EINVAL; 808 } 809 810 if (!tuple_sets) 811 return 0; 812 813 if (tuple_sets & HCLGEVF_D_PORT_BIT) 814 nfc->data |= RXH_L4_B_2_3; 815 if (tuple_sets & HCLGEVF_S_PORT_BIT) 816 nfc->data |= RXH_L4_B_0_1; 817 if (tuple_sets & HCLGEVF_D_IP_BIT) 818 nfc->data |= RXH_IP_DST; 819 if (tuple_sets & HCLGEVF_S_IP_BIT) 820 nfc->data |= RXH_IP_SRC; 821 822 return 0; 823 } 824 825 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 826 struct hclgevf_rss_cfg *rss_cfg) 827 { 828 struct hclgevf_rss_input_tuple_cmd *req; 829 struct hclgevf_desc desc; 830 int ret; 831 832 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 833 834 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 835 836 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 837 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 838 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 839 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 840 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 841 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 842 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 843 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 844 845 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 846 if (ret) 847 dev_err(&hdev->pdev->dev, 848 "Configure rss input fail, status = %d\n", ret); 849 return ret; 850 } 851 852 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 853 { 854 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 855 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 856 857 return rss_cfg->rss_size; 858 } 859 860 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 861 int vector_id, 862 struct hnae3_ring_chain_node *ring_chain) 863 { 864 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 865 struct hnae3_ring_chain_node *node; 866 struct hclge_mbx_vf_to_pf_cmd *req; 867 struct hclgevf_desc desc; 868 int i = 0; 869 int status; 870 u8 type; 871 872 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 873 874 for (node = ring_chain; node; node = node->next) { 875 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 876 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 877 878 if (i == 0) { 879 hclgevf_cmd_setup_basic_desc(&desc, 880 HCLGEVF_OPC_MBX_VF_TO_PF, 881 false); 882 type = en ? 883 HCLGE_MBX_MAP_RING_TO_VECTOR : 884 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 885 req->msg[0] = type; 886 req->msg[1] = vector_id; 887 } 888 889 req->msg[idx_offset] = 890 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 891 req->msg[idx_offset + 1] = node->tqp_index; 892 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 893 HNAE3_RING_GL_IDX_M, 894 HNAE3_RING_GL_IDX_S); 895 896 i++; 897 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 898 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 899 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 900 !node->next) { 901 req->msg[2] = i; 902 903 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 904 if (status) { 905 dev_err(&hdev->pdev->dev, 906 "Map TQP fail, status is %d.\n", 907 status); 908 return status; 909 } 910 i = 0; 911 hclgevf_cmd_setup_basic_desc(&desc, 912 HCLGEVF_OPC_MBX_VF_TO_PF, 913 false); 914 req->msg[0] = type; 915 req->msg[1] = vector_id; 916 } 917 } 918 919 return 0; 920 } 921 922 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 923 struct hnae3_ring_chain_node *ring_chain) 924 { 925 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 926 int vector_id; 927 928 vector_id = hclgevf_get_vector_index(hdev, vector); 929 if (vector_id < 0) { 930 dev_err(&handle->pdev->dev, 931 "Get vector index fail. ret =%d\n", vector_id); 932 return vector_id; 933 } 934 935 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 936 } 937 938 static int hclgevf_unmap_ring_from_vector( 939 struct hnae3_handle *handle, 940 int vector, 941 struct hnae3_ring_chain_node *ring_chain) 942 { 943 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 944 int ret, vector_id; 945 946 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 947 return 0; 948 949 vector_id = hclgevf_get_vector_index(hdev, vector); 950 if (vector_id < 0) { 951 dev_err(&handle->pdev->dev, 952 "Get vector index fail. ret =%d\n", vector_id); 953 return vector_id; 954 } 955 956 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 957 if (ret) 958 dev_err(&handle->pdev->dev, 959 "Unmap ring from vector fail. vector=%d, ret =%d\n", 960 vector_id, 961 ret); 962 963 return ret; 964 } 965 966 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 967 { 968 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 969 int vector_id; 970 971 vector_id = hclgevf_get_vector_index(hdev, vector); 972 if (vector_id < 0) { 973 dev_err(&handle->pdev->dev, 974 "hclgevf_put_vector get vector index fail. ret =%d\n", 975 vector_id); 976 return vector_id; 977 } 978 979 hclgevf_free_vector(hdev, vector_id); 980 981 return 0; 982 } 983 984 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 985 bool en_bc_pmc) 986 { 987 struct hclge_mbx_vf_to_pf_cmd *req; 988 struct hclgevf_desc desc; 989 int ret; 990 991 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 992 993 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 994 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 995 req->msg[1] = en_bc_pmc ? 1 : 0; 996 997 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 998 if (ret) 999 dev_err(&hdev->pdev->dev, 1000 "Set promisc mode fail, status is %d.\n", ret); 1001 1002 return ret; 1003 } 1004 1005 static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc) 1006 { 1007 return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc); 1008 } 1009 1010 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 1011 int stream_id, bool enable) 1012 { 1013 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1014 struct hclgevf_desc desc; 1015 int status; 1016 1017 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1018 1019 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1020 false); 1021 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1022 req->stream_id = cpu_to_le16(stream_id); 1023 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 1024 1025 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1026 if (status) 1027 dev_err(&hdev->pdev->dev, 1028 "TQP enable fail, status =%d.\n", status); 1029 1030 return status; 1031 } 1032 1033 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1034 { 1035 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1036 struct hclgevf_tqp *tqp; 1037 int i; 1038 1039 for (i = 0; i < kinfo->num_tqps; i++) { 1040 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1041 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1042 } 1043 } 1044 1045 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1046 { 1047 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1048 1049 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1050 } 1051 1052 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1053 bool is_first) 1054 { 1055 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1056 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1057 u8 *new_mac_addr = (u8 *)p; 1058 u8 msg_data[ETH_ALEN * 2]; 1059 u16 subcode; 1060 int status; 1061 1062 ether_addr_copy(msg_data, new_mac_addr); 1063 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 1064 1065 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 1066 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1067 1068 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1069 subcode, msg_data, ETH_ALEN * 2, 1070 true, NULL, 0); 1071 if (!status) 1072 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1073 1074 return status; 1075 } 1076 1077 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1078 const unsigned char *addr) 1079 { 1080 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1081 1082 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1083 HCLGE_MBX_MAC_VLAN_UC_ADD, 1084 addr, ETH_ALEN, false, NULL, 0); 1085 } 1086 1087 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1088 const unsigned char *addr) 1089 { 1090 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1091 1092 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1093 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1094 addr, ETH_ALEN, false, NULL, 0); 1095 } 1096 1097 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1098 const unsigned char *addr) 1099 { 1100 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1101 1102 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1103 HCLGE_MBX_MAC_VLAN_MC_ADD, 1104 addr, ETH_ALEN, false, NULL, 0); 1105 } 1106 1107 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1108 const unsigned char *addr) 1109 { 1110 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1111 1112 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1113 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1114 addr, ETH_ALEN, false, NULL, 0); 1115 } 1116 1117 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1118 __be16 proto, u16 vlan_id, 1119 bool is_kill) 1120 { 1121 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1122 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1123 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1124 1125 if (vlan_id > 4095) 1126 return -EINVAL; 1127 1128 if (proto != htons(ETH_P_8021Q)) 1129 return -EPROTONOSUPPORT; 1130 1131 msg_data[0] = is_kill; 1132 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1133 memcpy(&msg_data[3], &proto, sizeof(proto)); 1134 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1135 HCLGE_MBX_VLAN_FILTER, msg_data, 1136 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 1137 } 1138 1139 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1140 { 1141 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1142 u8 msg_data; 1143 1144 msg_data = enable ? 1 : 0; 1145 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1146 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1147 1, false, NULL, 0); 1148 } 1149 1150 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1151 { 1152 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1153 u8 msg_data[2]; 1154 int ret; 1155 1156 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 1157 1158 /* disable vf queue before send queue reset msg to PF */ 1159 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1160 if (ret) 1161 return ret; 1162 1163 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1164 2, true, NULL, 0); 1165 } 1166 1167 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1168 { 1169 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1170 1171 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu, 1172 sizeof(new_mtu), true, NULL, 0); 1173 } 1174 1175 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1176 enum hnae3_reset_notify_type type) 1177 { 1178 struct hnae3_client *client = hdev->nic_client; 1179 struct hnae3_handle *handle = &hdev->nic; 1180 int ret; 1181 1182 if (!client->ops->reset_notify) 1183 return -EOPNOTSUPP; 1184 1185 ret = client->ops->reset_notify(handle, type); 1186 if (ret) 1187 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1188 type, ret); 1189 1190 return ret; 1191 } 1192 1193 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 1194 { 1195 struct hclgevf_dev *hdev = ae_dev->priv; 1196 1197 set_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1198 } 1199 1200 static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev, 1201 unsigned long delay_us, 1202 unsigned long wait_cnt) 1203 { 1204 unsigned long cnt = 0; 1205 1206 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && 1207 cnt++ < wait_cnt) 1208 usleep_range(delay_us, delay_us * 2); 1209 1210 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { 1211 dev_err(&hdev->pdev->dev, 1212 "flr wait timeout\n"); 1213 return -ETIMEDOUT; 1214 } 1215 1216 return 0; 1217 } 1218 1219 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1220 { 1221 #define HCLGEVF_RESET_WAIT_US 20000 1222 #define HCLGEVF_RESET_WAIT_CNT 2000 1223 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1224 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1225 1226 u32 val; 1227 int ret; 1228 1229 /* wait to check the hardware reset completion status */ 1230 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1231 dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val); 1232 1233 if (hdev->reset_type == HNAE3_FLR_RESET) 1234 return hclgevf_flr_poll_timeout(hdev, 1235 HCLGEVF_RESET_WAIT_US, 1236 HCLGEVF_RESET_WAIT_CNT); 1237 1238 ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val, 1239 !(val & HCLGEVF_RST_ING_BITS), 1240 HCLGEVF_RESET_WAIT_US, 1241 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1242 1243 /* hardware completion status should be available by this time */ 1244 if (ret) { 1245 dev_err(&hdev->pdev->dev, 1246 "could'nt get reset done status from h/w, timeout!\n"); 1247 return ret; 1248 } 1249 1250 /* we will wait a bit more to let reset of the stack to complete. This 1251 * might happen in case reset assertion was made by PF. Yes, this also 1252 * means we might end up waiting bit more even for VF reset. 1253 */ 1254 msleep(5000); 1255 1256 return 0; 1257 } 1258 1259 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1260 { 1261 int ret; 1262 1263 /* uninitialize the nic client */ 1264 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1265 if (ret) 1266 return ret; 1267 1268 /* re-initialize the hclge device */ 1269 ret = hclgevf_reset_hdev(hdev); 1270 if (ret) { 1271 dev_err(&hdev->pdev->dev, 1272 "hclge device re-init failed, VF is disabled!\n"); 1273 return ret; 1274 } 1275 1276 /* bring up the nic client again */ 1277 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1278 if (ret) 1279 return ret; 1280 1281 return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); 1282 } 1283 1284 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1285 { 1286 int ret = 0; 1287 1288 switch (hdev->reset_type) { 1289 case HNAE3_VF_FUNC_RESET: 1290 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1291 0, true, NULL, sizeof(u8)); 1292 break; 1293 case HNAE3_FLR_RESET: 1294 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1295 break; 1296 default: 1297 break; 1298 } 1299 1300 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1301 1302 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1303 hdev->reset_type, ret); 1304 1305 return ret; 1306 } 1307 1308 static int hclgevf_reset(struct hclgevf_dev *hdev) 1309 { 1310 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1311 int ret; 1312 1313 /* Initialize ae_dev reset status as well, in case enet layer wants to 1314 * know if device is undergoing reset 1315 */ 1316 ae_dev->reset_type = hdev->reset_type; 1317 hdev->reset_count++; 1318 rtnl_lock(); 1319 1320 /* bring down the nic to stop any ongoing TX/RX */ 1321 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1322 if (ret) 1323 goto err_reset_lock; 1324 1325 rtnl_unlock(); 1326 1327 ret = hclgevf_reset_prepare_wait(hdev); 1328 if (ret) 1329 goto err_reset; 1330 1331 /* check if VF could successfully fetch the hardware reset completion 1332 * status from the hardware 1333 */ 1334 ret = hclgevf_reset_wait(hdev); 1335 if (ret) { 1336 /* can't do much in this situation, will disable VF */ 1337 dev_err(&hdev->pdev->dev, 1338 "VF failed(=%d) to fetch H/W reset completion status\n", 1339 ret); 1340 goto err_reset; 1341 } 1342 1343 rtnl_lock(); 1344 1345 /* now, re-initialize the nic client and ae device*/ 1346 ret = hclgevf_reset_stack(hdev); 1347 if (ret) { 1348 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1349 goto err_reset_lock; 1350 } 1351 1352 /* bring up the nic to enable TX/RX again */ 1353 ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1354 if (ret) 1355 goto err_reset_lock; 1356 1357 rtnl_unlock(); 1358 1359 hdev->last_reset_time = jiffies; 1360 ae_dev->reset_type = HNAE3_NONE_RESET; 1361 1362 return ret; 1363 err_reset_lock: 1364 rtnl_unlock(); 1365 err_reset: 1366 /* When VF reset failed, only the higher level reset asserted by PF 1367 * can restore it, so re-initialize the command queue to receive 1368 * this higher reset event. 1369 */ 1370 hclgevf_cmd_init(hdev); 1371 dev_err(&hdev->pdev->dev, "failed to reset VF\n"); 1372 1373 return ret; 1374 } 1375 1376 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1377 unsigned long *addr) 1378 { 1379 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1380 1381 /* return the highest priority reset level amongst all */ 1382 if (test_bit(HNAE3_VF_RESET, addr)) { 1383 rst_level = HNAE3_VF_RESET; 1384 clear_bit(HNAE3_VF_RESET, addr); 1385 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1386 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1387 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1388 rst_level = HNAE3_VF_FULL_RESET; 1389 clear_bit(HNAE3_VF_FULL_RESET, addr); 1390 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1391 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1392 rst_level = HNAE3_VF_PF_FUNC_RESET; 1393 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1394 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1395 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1396 rst_level = HNAE3_VF_FUNC_RESET; 1397 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1398 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1399 rst_level = HNAE3_FLR_RESET; 1400 clear_bit(HNAE3_FLR_RESET, addr); 1401 } 1402 1403 return rst_level; 1404 } 1405 1406 static void hclgevf_reset_event(struct pci_dev *pdev, 1407 struct hnae3_handle *handle) 1408 { 1409 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1410 struct hclgevf_dev *hdev = ae_dev->priv; 1411 1412 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1413 1414 if (hdev->default_reset_request) 1415 hdev->reset_level = 1416 hclgevf_get_reset_level(hdev, 1417 &hdev->default_reset_request); 1418 else 1419 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1420 1421 /* reset of this VF requested */ 1422 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1423 hclgevf_reset_task_schedule(hdev); 1424 1425 hdev->last_reset_time = jiffies; 1426 } 1427 1428 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1429 enum hnae3_reset_type rst_type) 1430 { 1431 struct hclgevf_dev *hdev = ae_dev->priv; 1432 1433 set_bit(rst_type, &hdev->default_reset_request); 1434 } 1435 1436 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1437 { 1438 #define HCLGEVF_FLR_WAIT_MS 100 1439 #define HCLGEVF_FLR_WAIT_CNT 50 1440 struct hclgevf_dev *hdev = ae_dev->priv; 1441 int cnt = 0; 1442 1443 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1444 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1445 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); 1446 hclgevf_reset_event(hdev->pdev, NULL); 1447 1448 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && 1449 cnt++ < HCLGEVF_FLR_WAIT_CNT) 1450 msleep(HCLGEVF_FLR_WAIT_MS); 1451 1452 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) 1453 dev_err(&hdev->pdev->dev, 1454 "flr wait down timeout: %d\n", cnt); 1455 } 1456 1457 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1458 { 1459 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1460 1461 return hdev->fw_version; 1462 } 1463 1464 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1465 { 1466 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1467 1468 vector->vector_irq = pci_irq_vector(hdev->pdev, 1469 HCLGEVF_MISC_VECTOR_NUM); 1470 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1471 /* vector status always valid for Vector 0 */ 1472 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1473 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1474 1475 hdev->num_msi_left -= 1; 1476 hdev->num_msi_used += 1; 1477 } 1478 1479 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1480 { 1481 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1482 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1483 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1484 schedule_work(&hdev->rst_service_task); 1485 } 1486 } 1487 1488 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1489 { 1490 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1491 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1492 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1493 schedule_work(&hdev->mbx_service_task); 1494 } 1495 } 1496 1497 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1498 { 1499 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1500 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1501 schedule_work(&hdev->service_task); 1502 } 1503 1504 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1505 { 1506 /* if we have any pending mailbox event then schedule the mbx task */ 1507 if (hdev->mbx_event_pending) 1508 hclgevf_mbx_task_schedule(hdev); 1509 1510 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1511 hclgevf_reset_task_schedule(hdev); 1512 } 1513 1514 static void hclgevf_service_timer(struct timer_list *t) 1515 { 1516 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1517 1518 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1519 1520 hclgevf_task_schedule(hdev); 1521 } 1522 1523 static void hclgevf_reset_service_task(struct work_struct *work) 1524 { 1525 struct hclgevf_dev *hdev = 1526 container_of(work, struct hclgevf_dev, rst_service_task); 1527 int ret; 1528 1529 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1530 return; 1531 1532 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1533 1534 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1535 &hdev->reset_state)) { 1536 /* PF has initmated that it is about to reset the hardware. 1537 * We now have to poll & check if harware has actually completed 1538 * the reset sequence. On hardware reset completion, VF needs to 1539 * reset the client and ae device. 1540 */ 1541 hdev->reset_attempts = 0; 1542 1543 hdev->last_reset_time = jiffies; 1544 while ((hdev->reset_type = 1545 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 1546 != HNAE3_NONE_RESET) { 1547 ret = hclgevf_reset(hdev); 1548 if (ret) 1549 dev_err(&hdev->pdev->dev, 1550 "VF stack reset failed %d.\n", ret); 1551 } 1552 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1553 &hdev->reset_state)) { 1554 /* we could be here when either of below happens: 1555 * 1. reset was initiated due to watchdog timeout due to 1556 * a. IMP was earlier reset and our TX got choked down and 1557 * which resulted in watchdog reacting and inducing VF 1558 * reset. This also means our cmdq would be unreliable. 1559 * b. problem in TX due to other lower layer(example link 1560 * layer not functioning properly etc.) 1561 * 2. VF reset might have been initiated due to some config 1562 * change. 1563 * 1564 * NOTE: Theres no clear way to detect above cases than to react 1565 * to the response of PF for this reset request. PF will ack the 1566 * 1b and 2. cases but we will not get any intimation about 1a 1567 * from PF as cmdq would be in unreliable state i.e. mailbox 1568 * communication between PF and VF would be broken. 1569 */ 1570 1571 /* if we are never geting into pending state it means either: 1572 * 1. PF is not receiving our request which could be due to IMP 1573 * reset 1574 * 2. PF is screwed 1575 * We cannot do much for 2. but to check first we can try reset 1576 * our PCIe + stack and see if it alleviates the problem. 1577 */ 1578 if (hdev->reset_attempts > 3) { 1579 /* prepare for full reset of stack + pcie interface */ 1580 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1581 1582 /* "defer" schedule the reset task again */ 1583 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1584 } else { 1585 hdev->reset_attempts++; 1586 1587 set_bit(hdev->reset_level, &hdev->reset_pending); 1588 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1589 } 1590 hclgevf_reset_task_schedule(hdev); 1591 } 1592 1593 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1594 } 1595 1596 static void hclgevf_mailbox_service_task(struct work_struct *work) 1597 { 1598 struct hclgevf_dev *hdev; 1599 1600 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1601 1602 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1603 return; 1604 1605 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1606 1607 hclgevf_mbx_async_handler(hdev); 1608 1609 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1610 } 1611 1612 static void hclgevf_keep_alive_timer(struct timer_list *t) 1613 { 1614 struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); 1615 1616 schedule_work(&hdev->keep_alive_task); 1617 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 1618 } 1619 1620 static void hclgevf_keep_alive_task(struct work_struct *work) 1621 { 1622 struct hclgevf_dev *hdev; 1623 u8 respmsg; 1624 int ret; 1625 1626 hdev = container_of(work, struct hclgevf_dev, keep_alive_task); 1627 1628 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1629 return; 1630 1631 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, 1632 0, false, &respmsg, sizeof(u8)); 1633 if (ret) 1634 dev_err(&hdev->pdev->dev, 1635 "VF sends keep alive cmd failed(=%d)\n", ret); 1636 } 1637 1638 static void hclgevf_service_task(struct work_struct *work) 1639 { 1640 struct hclgevf_dev *hdev; 1641 1642 hdev = container_of(work, struct hclgevf_dev, service_task); 1643 1644 /* request the link status from the PF. PF would be able to tell VF 1645 * about such updates in future so we might remove this later 1646 */ 1647 hclgevf_request_link_info(hdev); 1648 1649 hclgevf_deferred_task_schedule(hdev); 1650 1651 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1652 } 1653 1654 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1655 { 1656 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1657 } 1658 1659 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1660 u32 *clearval) 1661 { 1662 u32 cmdq_src_reg, rst_ing_reg; 1663 1664 /* fetch the events from their corresponding regs */ 1665 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1666 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1667 1668 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) { 1669 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1670 dev_info(&hdev->pdev->dev, 1671 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1672 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1673 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1674 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1675 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); 1676 *clearval = cmdq_src_reg; 1677 return HCLGEVF_VECTOR0_EVENT_RST; 1678 } 1679 1680 /* check for vector0 mailbox(=CMDQ RX) event source */ 1681 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1682 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1683 *clearval = cmdq_src_reg; 1684 return HCLGEVF_VECTOR0_EVENT_MBX; 1685 } 1686 1687 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1688 1689 return HCLGEVF_VECTOR0_EVENT_OTHER; 1690 } 1691 1692 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1693 { 1694 writel(en ? 1 : 0, vector->addr); 1695 } 1696 1697 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1698 { 1699 enum hclgevf_evt_cause event_cause; 1700 struct hclgevf_dev *hdev = data; 1701 u32 clearval; 1702 1703 hclgevf_enable_vector(&hdev->misc_vector, false); 1704 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 1705 1706 switch (event_cause) { 1707 case HCLGEVF_VECTOR0_EVENT_RST: 1708 hclgevf_reset_task_schedule(hdev); 1709 break; 1710 case HCLGEVF_VECTOR0_EVENT_MBX: 1711 hclgevf_mbx_handler(hdev); 1712 break; 1713 default: 1714 break; 1715 } 1716 1717 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 1718 hclgevf_clear_event_cause(hdev, clearval); 1719 hclgevf_enable_vector(&hdev->misc_vector, true); 1720 } 1721 1722 return IRQ_HANDLED; 1723 } 1724 1725 static int hclgevf_configure(struct hclgevf_dev *hdev) 1726 { 1727 int ret; 1728 1729 hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE; 1730 1731 /* get queue configuration from PF */ 1732 ret = hclgevf_get_queue_info(hdev); 1733 if (ret) 1734 return ret; 1735 /* get tc configuration from PF */ 1736 return hclgevf_get_tc_info(hdev); 1737 } 1738 1739 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1740 { 1741 struct pci_dev *pdev = ae_dev->pdev; 1742 struct hclgevf_dev *hdev; 1743 1744 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1745 if (!hdev) 1746 return -ENOMEM; 1747 1748 hdev->pdev = pdev; 1749 hdev->ae_dev = ae_dev; 1750 ae_dev->priv = hdev; 1751 1752 return 0; 1753 } 1754 1755 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1756 { 1757 struct hnae3_handle *roce = &hdev->roce; 1758 struct hnae3_handle *nic = &hdev->nic; 1759 1760 roce->rinfo.num_vectors = hdev->num_roce_msix; 1761 1762 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1763 hdev->num_msi_left == 0) 1764 return -EINVAL; 1765 1766 roce->rinfo.base_vector = hdev->roce_base_vector; 1767 1768 roce->rinfo.netdev = nic->kinfo.netdev; 1769 roce->rinfo.roce_io_base = hdev->hw.io_base; 1770 1771 roce->pdev = nic->pdev; 1772 roce->ae_algo = nic->ae_algo; 1773 roce->numa_node_mask = nic->numa_node_mask; 1774 1775 return 0; 1776 } 1777 1778 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 1779 { 1780 struct hclgevf_cfg_gro_status_cmd *req; 1781 struct hclgevf_desc desc; 1782 int ret; 1783 1784 if (!hnae3_dev_gro_supported(hdev)) 1785 return 0; 1786 1787 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 1788 false); 1789 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 1790 1791 req->gro_en = cpu_to_le16(en ? 1 : 0); 1792 1793 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1794 if (ret) 1795 dev_err(&hdev->pdev->dev, 1796 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 1797 1798 return ret; 1799 } 1800 1801 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1802 { 1803 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1804 int i, ret; 1805 1806 rss_cfg->rss_size = hdev->rss_size_max; 1807 1808 if (hdev->pdev->revision >= 0x21) { 1809 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 1810 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 1811 HCLGEVF_RSS_KEY_SIZE); 1812 1813 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 1814 rss_cfg->rss_hash_key); 1815 if (ret) 1816 return ret; 1817 1818 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 1819 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1820 rss_cfg->rss_tuple_sets.ipv4_udp_en = 1821 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1822 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 1823 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1824 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 1825 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1826 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 1827 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1828 rss_cfg->rss_tuple_sets.ipv6_udp_en = 1829 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1830 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 1831 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1832 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 1833 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1834 1835 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 1836 if (ret) 1837 return ret; 1838 1839 } 1840 1841 /* Initialize RSS indirect table for each vport */ 1842 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1843 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1844 1845 ret = hclgevf_set_rss_indir_table(hdev); 1846 if (ret) 1847 return ret; 1848 1849 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1850 } 1851 1852 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1853 { 1854 /* other vlan config(like, VLAN TX/RX offload) would also be added 1855 * here later 1856 */ 1857 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1858 false); 1859 } 1860 1861 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 1862 { 1863 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1864 1865 if (enable) { 1866 mod_timer(&hdev->service_timer, jiffies + HZ); 1867 } else { 1868 del_timer_sync(&hdev->service_timer); 1869 cancel_work_sync(&hdev->service_task); 1870 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1871 } 1872 } 1873 1874 static int hclgevf_ae_start(struct hnae3_handle *handle) 1875 { 1876 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1877 1878 /* reset tqp stats */ 1879 hclgevf_reset_tqp_stats(handle); 1880 1881 hclgevf_request_link_info(hdev); 1882 1883 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1884 1885 return 0; 1886 } 1887 1888 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1889 { 1890 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1891 int i; 1892 1893 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1894 1895 for (i = 0; i < handle->kinfo.num_tqps; i++) 1896 hclgevf_reset_tqp(handle, i); 1897 1898 /* reset tqp stats */ 1899 hclgevf_reset_tqp_stats(handle); 1900 hclgevf_update_link_status(hdev, 0); 1901 } 1902 1903 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 1904 { 1905 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1906 u8 msg_data; 1907 1908 msg_data = alive ? 1 : 0; 1909 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE, 1910 0, &msg_data, 1, false, NULL, 0); 1911 } 1912 1913 static int hclgevf_client_start(struct hnae3_handle *handle) 1914 { 1915 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1916 1917 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 1918 return hclgevf_set_alive(handle, true); 1919 } 1920 1921 static void hclgevf_client_stop(struct hnae3_handle *handle) 1922 { 1923 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1924 int ret; 1925 1926 ret = hclgevf_set_alive(handle, false); 1927 if (ret) 1928 dev_warn(&hdev->pdev->dev, 1929 "%s failed %d\n", __func__, ret); 1930 1931 del_timer_sync(&hdev->keep_alive_timer); 1932 cancel_work_sync(&hdev->keep_alive_task); 1933 } 1934 1935 static void hclgevf_state_init(struct hclgevf_dev *hdev) 1936 { 1937 /* setup tasks for the MBX */ 1938 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1939 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1940 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1941 1942 /* setup tasks for service timer */ 1943 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1944 1945 INIT_WORK(&hdev->service_task, hclgevf_service_task); 1946 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1947 1948 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 1949 1950 mutex_init(&hdev->mbx_resp.mbx_mutex); 1951 1952 /* bring the device down */ 1953 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1954 } 1955 1956 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1957 { 1958 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1959 1960 if (hdev->service_timer.function) 1961 del_timer_sync(&hdev->service_timer); 1962 if (hdev->service_task.func) 1963 cancel_work_sync(&hdev->service_task); 1964 if (hdev->mbx_service_task.func) 1965 cancel_work_sync(&hdev->mbx_service_task); 1966 if (hdev->rst_service_task.func) 1967 cancel_work_sync(&hdev->rst_service_task); 1968 1969 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1970 } 1971 1972 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1973 { 1974 struct pci_dev *pdev = hdev->pdev; 1975 int vectors; 1976 int i; 1977 1978 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 1979 vectors = pci_alloc_irq_vectors(pdev, 1980 hdev->roce_base_msix_offset + 1, 1981 hdev->num_msi, 1982 PCI_IRQ_MSIX); 1983 else 1984 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1985 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1986 1987 if (vectors < 0) { 1988 dev_err(&pdev->dev, 1989 "failed(%d) to allocate MSI/MSI-X vectors\n", 1990 vectors); 1991 return vectors; 1992 } 1993 if (vectors < hdev->num_msi) 1994 dev_warn(&hdev->pdev->dev, 1995 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1996 hdev->num_msi, vectors); 1997 1998 hdev->num_msi = vectors; 1999 hdev->num_msi_left = vectors; 2000 hdev->base_msi_vector = pdev->irq; 2001 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2002 2003 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2004 sizeof(u16), GFP_KERNEL); 2005 if (!hdev->vector_status) { 2006 pci_free_irq_vectors(pdev); 2007 return -ENOMEM; 2008 } 2009 2010 for (i = 0; i < hdev->num_msi; i++) 2011 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2012 2013 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2014 sizeof(int), GFP_KERNEL); 2015 if (!hdev->vector_irq) { 2016 devm_kfree(&pdev->dev, hdev->vector_status); 2017 pci_free_irq_vectors(pdev); 2018 return -ENOMEM; 2019 } 2020 2021 return 0; 2022 } 2023 2024 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2025 { 2026 struct pci_dev *pdev = hdev->pdev; 2027 2028 devm_kfree(&pdev->dev, hdev->vector_status); 2029 devm_kfree(&pdev->dev, hdev->vector_irq); 2030 pci_free_irq_vectors(pdev); 2031 } 2032 2033 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2034 { 2035 int ret = 0; 2036 2037 hclgevf_get_misc_vector(hdev); 2038 2039 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2040 0, "hclgevf_cmd", hdev); 2041 if (ret) { 2042 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2043 hdev->misc_vector.vector_irq); 2044 return ret; 2045 } 2046 2047 hclgevf_clear_event_cause(hdev, 0); 2048 2049 /* enable misc. vector(vector 0) */ 2050 hclgevf_enable_vector(&hdev->misc_vector, true); 2051 2052 return ret; 2053 } 2054 2055 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2056 { 2057 /* disable misc vector(vector 0) */ 2058 hclgevf_enable_vector(&hdev->misc_vector, false); 2059 synchronize_irq(hdev->misc_vector.vector_irq); 2060 free_irq(hdev->misc_vector.vector_irq, hdev); 2061 hclgevf_free_vector(hdev, 0); 2062 } 2063 2064 static int hclgevf_init_client_instance(struct hnae3_client *client, 2065 struct hnae3_ae_dev *ae_dev) 2066 { 2067 struct hclgevf_dev *hdev = ae_dev->priv; 2068 int ret; 2069 2070 switch (client->type) { 2071 case HNAE3_CLIENT_KNIC: 2072 hdev->nic_client = client; 2073 hdev->nic.client = client; 2074 2075 ret = client->ops->init_instance(&hdev->nic); 2076 if (ret) 2077 goto clear_nic; 2078 2079 hnae3_set_client_init_flag(client, ae_dev, 1); 2080 2081 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 2082 struct hnae3_client *rc = hdev->roce_client; 2083 2084 ret = hclgevf_init_roce_base_info(hdev); 2085 if (ret) 2086 goto clear_roce; 2087 ret = rc->ops->init_instance(&hdev->roce); 2088 if (ret) 2089 goto clear_roce; 2090 2091 hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 2092 1); 2093 } 2094 break; 2095 case HNAE3_CLIENT_UNIC: 2096 hdev->nic_client = client; 2097 hdev->nic.client = client; 2098 2099 ret = client->ops->init_instance(&hdev->nic); 2100 if (ret) 2101 goto clear_nic; 2102 2103 hnae3_set_client_init_flag(client, ae_dev, 1); 2104 break; 2105 case HNAE3_CLIENT_ROCE: 2106 if (hnae3_dev_roce_supported(hdev)) { 2107 hdev->roce_client = client; 2108 hdev->roce.client = client; 2109 } 2110 2111 if (hdev->roce_client && hdev->nic_client) { 2112 ret = hclgevf_init_roce_base_info(hdev); 2113 if (ret) 2114 goto clear_roce; 2115 2116 ret = client->ops->init_instance(&hdev->roce); 2117 if (ret) 2118 goto clear_roce; 2119 } 2120 2121 hnae3_set_client_init_flag(client, ae_dev, 1); 2122 break; 2123 default: 2124 return -EINVAL; 2125 } 2126 2127 return 0; 2128 2129 clear_nic: 2130 hdev->nic_client = NULL; 2131 hdev->nic.client = NULL; 2132 return ret; 2133 clear_roce: 2134 hdev->roce_client = NULL; 2135 hdev->roce.client = NULL; 2136 return ret; 2137 } 2138 2139 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2140 struct hnae3_ae_dev *ae_dev) 2141 { 2142 struct hclgevf_dev *hdev = ae_dev->priv; 2143 2144 /* un-init roce, if it exists */ 2145 if (hdev->roce_client) { 2146 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2147 hdev->roce_client = NULL; 2148 hdev->roce.client = NULL; 2149 } 2150 2151 /* un-init nic/unic, if this was not called by roce client */ 2152 if (client->ops->uninit_instance && hdev->nic_client && 2153 client->type != HNAE3_CLIENT_ROCE) { 2154 client->ops->uninit_instance(&hdev->nic, 0); 2155 hdev->nic_client = NULL; 2156 hdev->nic.client = NULL; 2157 } 2158 } 2159 2160 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2161 { 2162 struct pci_dev *pdev = hdev->pdev; 2163 struct hclgevf_hw *hw; 2164 int ret; 2165 2166 ret = pci_enable_device(pdev); 2167 if (ret) { 2168 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2169 return ret; 2170 } 2171 2172 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2173 if (ret) { 2174 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2175 goto err_disable_device; 2176 } 2177 2178 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2179 if (ret) { 2180 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2181 goto err_disable_device; 2182 } 2183 2184 pci_set_master(pdev); 2185 hw = &hdev->hw; 2186 hw->hdev = hdev; 2187 hw->io_base = pci_iomap(pdev, 2, 0); 2188 if (!hw->io_base) { 2189 dev_err(&pdev->dev, "can't map configuration register space\n"); 2190 ret = -ENOMEM; 2191 goto err_clr_master; 2192 } 2193 2194 return 0; 2195 2196 err_clr_master: 2197 pci_clear_master(pdev); 2198 pci_release_regions(pdev); 2199 err_disable_device: 2200 pci_disable_device(pdev); 2201 2202 return ret; 2203 } 2204 2205 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2206 { 2207 struct pci_dev *pdev = hdev->pdev; 2208 2209 pci_iounmap(pdev, hdev->hw.io_base); 2210 pci_clear_master(pdev); 2211 pci_release_regions(pdev); 2212 pci_disable_device(pdev); 2213 } 2214 2215 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2216 { 2217 struct hclgevf_query_res_cmd *req; 2218 struct hclgevf_desc desc; 2219 int ret; 2220 2221 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2222 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2223 if (ret) { 2224 dev_err(&hdev->pdev->dev, 2225 "query vf resource failed, ret = %d.\n", ret); 2226 return ret; 2227 } 2228 2229 req = (struct hclgevf_query_res_cmd *)desc.data; 2230 2231 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 2232 hdev->roce_base_msix_offset = 2233 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 2234 HCLGEVF_MSIX_OFT_ROCEE_M, 2235 HCLGEVF_MSIX_OFT_ROCEE_S); 2236 hdev->num_roce_msix = 2237 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2238 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2239 2240 /* VF should have NIC vectors and Roce vectors, NIC vectors 2241 * are queued before Roce vectors. The offset is fixed to 64. 2242 */ 2243 hdev->num_msi = hdev->num_roce_msix + 2244 hdev->roce_base_msix_offset; 2245 } else { 2246 hdev->num_msi = 2247 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2248 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2249 } 2250 2251 return 0; 2252 } 2253 2254 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2255 { 2256 struct pci_dev *pdev = hdev->pdev; 2257 int ret = 0; 2258 2259 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2260 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2261 hclgevf_misc_irq_uninit(hdev); 2262 hclgevf_uninit_msi(hdev); 2263 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2264 } 2265 2266 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2267 pci_set_master(pdev); 2268 ret = hclgevf_init_msi(hdev); 2269 if (ret) { 2270 dev_err(&pdev->dev, 2271 "failed(%d) to init MSI/MSI-X\n", ret); 2272 return ret; 2273 } 2274 2275 ret = hclgevf_misc_irq_init(hdev); 2276 if (ret) { 2277 hclgevf_uninit_msi(hdev); 2278 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2279 ret); 2280 return ret; 2281 } 2282 2283 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2284 } 2285 2286 return ret; 2287 } 2288 2289 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2290 { 2291 struct pci_dev *pdev = hdev->pdev; 2292 int ret; 2293 2294 ret = hclgevf_pci_reset(hdev); 2295 if (ret) { 2296 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2297 return ret; 2298 } 2299 2300 ret = hclgevf_cmd_init(hdev); 2301 if (ret) { 2302 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2303 return ret; 2304 } 2305 2306 ret = hclgevf_rss_init_hw(hdev); 2307 if (ret) { 2308 dev_err(&hdev->pdev->dev, 2309 "failed(%d) to initialize RSS\n", ret); 2310 return ret; 2311 } 2312 2313 ret = hclgevf_config_gro(hdev, true); 2314 if (ret) 2315 return ret; 2316 2317 ret = hclgevf_init_vlan_config(hdev); 2318 if (ret) { 2319 dev_err(&hdev->pdev->dev, 2320 "failed(%d) to initialize VLAN config\n", ret); 2321 return ret; 2322 } 2323 2324 dev_info(&hdev->pdev->dev, "Reset done\n"); 2325 2326 return 0; 2327 } 2328 2329 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2330 { 2331 struct pci_dev *pdev = hdev->pdev; 2332 int ret; 2333 2334 ret = hclgevf_pci_init(hdev); 2335 if (ret) { 2336 dev_err(&pdev->dev, "PCI initialization failed\n"); 2337 return ret; 2338 } 2339 2340 ret = hclgevf_cmd_queue_init(hdev); 2341 if (ret) { 2342 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); 2343 goto err_cmd_queue_init; 2344 } 2345 2346 ret = hclgevf_cmd_init(hdev); 2347 if (ret) 2348 goto err_cmd_init; 2349 2350 /* Get vf resource */ 2351 ret = hclgevf_query_vf_resource(hdev); 2352 if (ret) { 2353 dev_err(&hdev->pdev->dev, 2354 "Query vf status error, ret = %d.\n", ret); 2355 goto err_cmd_init; 2356 } 2357 2358 ret = hclgevf_init_msi(hdev); 2359 if (ret) { 2360 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2361 goto err_cmd_init; 2362 } 2363 2364 hclgevf_state_init(hdev); 2365 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2366 2367 ret = hclgevf_misc_irq_init(hdev); 2368 if (ret) { 2369 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2370 ret); 2371 goto err_misc_irq_init; 2372 } 2373 2374 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2375 2376 ret = hclgevf_configure(hdev); 2377 if (ret) { 2378 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2379 goto err_config; 2380 } 2381 2382 ret = hclgevf_alloc_tqps(hdev); 2383 if (ret) { 2384 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2385 goto err_config; 2386 } 2387 2388 ret = hclgevf_set_handle_info(hdev); 2389 if (ret) { 2390 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2391 goto err_config; 2392 } 2393 2394 ret = hclgevf_config_gro(hdev, true); 2395 if (ret) 2396 goto err_config; 2397 2398 /* vf is not allowed to enable unicast/multicast promisc mode. 2399 * For revision 0x20, default to disable broadcast promisc mode, 2400 * firmware makes sure broadcast packets can be accepted. 2401 * For revision 0x21, default to enable broadcast promisc mode. 2402 */ 2403 ret = hclgevf_set_promisc_mode(hdev, true); 2404 if (ret) 2405 goto err_config; 2406 2407 /* Initialize RSS for this VF */ 2408 ret = hclgevf_rss_init_hw(hdev); 2409 if (ret) { 2410 dev_err(&hdev->pdev->dev, 2411 "failed(%d) to initialize RSS\n", ret); 2412 goto err_config; 2413 } 2414 2415 ret = hclgevf_init_vlan_config(hdev); 2416 if (ret) { 2417 dev_err(&hdev->pdev->dev, 2418 "failed(%d) to initialize VLAN config\n", ret); 2419 goto err_config; 2420 } 2421 2422 hdev->last_reset_time = jiffies; 2423 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 2424 2425 return 0; 2426 2427 err_config: 2428 hclgevf_misc_irq_uninit(hdev); 2429 err_misc_irq_init: 2430 hclgevf_state_uninit(hdev); 2431 hclgevf_uninit_msi(hdev); 2432 err_cmd_init: 2433 hclgevf_cmd_uninit(hdev); 2434 err_cmd_queue_init: 2435 hclgevf_pci_uninit(hdev); 2436 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2437 return ret; 2438 } 2439 2440 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2441 { 2442 hclgevf_state_uninit(hdev); 2443 2444 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2445 hclgevf_misc_irq_uninit(hdev); 2446 hclgevf_uninit_msi(hdev); 2447 } 2448 2449 hclgevf_pci_uninit(hdev); 2450 hclgevf_cmd_uninit(hdev); 2451 } 2452 2453 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2454 { 2455 struct pci_dev *pdev = ae_dev->pdev; 2456 struct hclgevf_dev *hdev; 2457 int ret; 2458 2459 ret = hclgevf_alloc_hdev(ae_dev); 2460 if (ret) { 2461 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2462 return ret; 2463 } 2464 2465 ret = hclgevf_init_hdev(ae_dev->priv); 2466 if (ret) { 2467 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2468 return ret; 2469 } 2470 2471 hdev = ae_dev->priv; 2472 timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0); 2473 INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task); 2474 2475 return 0; 2476 } 2477 2478 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2479 { 2480 struct hclgevf_dev *hdev = ae_dev->priv; 2481 2482 hclgevf_uninit_hdev(hdev); 2483 ae_dev->priv = NULL; 2484 } 2485 2486 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2487 { 2488 struct hnae3_handle *nic = &hdev->nic; 2489 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2490 2491 return min_t(u32, hdev->rss_size_max, 2492 hdev->num_tqps / kinfo->num_tc); 2493 } 2494 2495 /** 2496 * hclgevf_get_channels - Get the current channels enabled and max supported. 2497 * @handle: hardware information for network interface 2498 * @ch: ethtool channels structure 2499 * 2500 * We don't support separate tx and rx queues as channels. The other count 2501 * represents how many queues are being used for control. max_combined counts 2502 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2503 * q_vectors since we support a lot more queue pairs than q_vectors. 2504 **/ 2505 static void hclgevf_get_channels(struct hnae3_handle *handle, 2506 struct ethtool_channels *ch) 2507 { 2508 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2509 2510 ch->max_combined = hclgevf_get_max_channels(hdev); 2511 ch->other_count = 0; 2512 ch->max_other = 0; 2513 ch->combined_count = handle->kinfo.rss_size; 2514 } 2515 2516 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2517 u16 *alloc_tqps, u16 *max_rss_size) 2518 { 2519 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2520 2521 *alloc_tqps = hdev->num_tqps; 2522 *max_rss_size = hdev->rss_size_max; 2523 } 2524 2525 static int hclgevf_get_status(struct hnae3_handle *handle) 2526 { 2527 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2528 2529 return hdev->hw.mac.link; 2530 } 2531 2532 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2533 u8 *auto_neg, u32 *speed, 2534 u8 *duplex) 2535 { 2536 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2537 2538 if (speed) 2539 *speed = hdev->hw.mac.speed; 2540 if (duplex) 2541 *duplex = hdev->hw.mac.duplex; 2542 if (auto_neg) 2543 *auto_neg = AUTONEG_DISABLE; 2544 } 2545 2546 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 2547 u8 duplex) 2548 { 2549 hdev->hw.mac.speed = speed; 2550 hdev->hw.mac.duplex = duplex; 2551 } 2552 2553 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 2554 { 2555 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2556 2557 return hclgevf_config_gro(hdev, enable); 2558 } 2559 2560 static void hclgevf_get_media_type(struct hnae3_handle *handle, 2561 u8 *media_type) 2562 { 2563 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2564 if (media_type) 2565 *media_type = hdev->hw.mac.media_type; 2566 } 2567 2568 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 2569 { 2570 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2571 2572 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2573 } 2574 2575 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 2576 { 2577 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2578 2579 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2580 } 2581 2582 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 2583 { 2584 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2585 2586 return hdev->reset_count; 2587 } 2588 2589 #define MAX_SEPARATE_NUM 4 2590 #define SEPARATOR_VALUE 0xFFFFFFFF 2591 #define REG_NUM_PER_LINE 4 2592 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 2593 2594 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 2595 { 2596 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 2597 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2598 2599 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 2600 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 2601 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 2602 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 2603 2604 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 2605 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 2606 } 2607 2608 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 2609 void *data) 2610 { 2611 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2612 int i, j, reg_um, separator_num; 2613 u32 *reg = data; 2614 2615 *version = hdev->fw_version; 2616 2617 /* fetching per-VF registers values from VF PCIe register space */ 2618 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 2619 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2620 for (i = 0; i < reg_um; i++) 2621 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 2622 for (i = 0; i < separator_num; i++) 2623 *reg++ = SEPARATOR_VALUE; 2624 2625 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 2626 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2627 for (i = 0; i < reg_um; i++) 2628 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 2629 for (i = 0; i < separator_num; i++) 2630 *reg++ = SEPARATOR_VALUE; 2631 2632 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 2633 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2634 for (j = 0; j < hdev->num_tqps; j++) { 2635 for (i = 0; i < reg_um; i++) 2636 *reg++ = hclgevf_read_dev(&hdev->hw, 2637 ring_reg_addr_list[i] + 2638 0x200 * j); 2639 for (i = 0; i < separator_num; i++) 2640 *reg++ = SEPARATOR_VALUE; 2641 } 2642 2643 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 2644 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2645 for (j = 0; j < hdev->num_msi_used - 1; j++) { 2646 for (i = 0; i < reg_um; i++) 2647 *reg++ = hclgevf_read_dev(&hdev->hw, 2648 tqp_intr_reg_addr_list[i] + 2649 4 * j); 2650 for (i = 0; i < separator_num; i++) 2651 *reg++ = SEPARATOR_VALUE; 2652 } 2653 } 2654 2655 static const struct hnae3_ae_ops hclgevf_ops = { 2656 .init_ae_dev = hclgevf_init_ae_dev, 2657 .uninit_ae_dev = hclgevf_uninit_ae_dev, 2658 .flr_prepare = hclgevf_flr_prepare, 2659 .flr_done = hclgevf_flr_done, 2660 .init_client_instance = hclgevf_init_client_instance, 2661 .uninit_client_instance = hclgevf_uninit_client_instance, 2662 .start = hclgevf_ae_start, 2663 .stop = hclgevf_ae_stop, 2664 .client_start = hclgevf_client_start, 2665 .client_stop = hclgevf_client_stop, 2666 .map_ring_to_vector = hclgevf_map_ring_to_vector, 2667 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2668 .get_vector = hclgevf_get_vector, 2669 .put_vector = hclgevf_put_vector, 2670 .reset_queue = hclgevf_reset_tqp, 2671 .get_mac_addr = hclgevf_get_mac_addr, 2672 .set_mac_addr = hclgevf_set_mac_addr, 2673 .add_uc_addr = hclgevf_add_uc_addr, 2674 .rm_uc_addr = hclgevf_rm_uc_addr, 2675 .add_mc_addr = hclgevf_add_mc_addr, 2676 .rm_mc_addr = hclgevf_rm_mc_addr, 2677 .get_stats = hclgevf_get_stats, 2678 .update_stats = hclgevf_update_stats, 2679 .get_strings = hclgevf_get_strings, 2680 .get_sset_count = hclgevf_get_sset_count, 2681 .get_rss_key_size = hclgevf_get_rss_key_size, 2682 .get_rss_indir_size = hclgevf_get_rss_indir_size, 2683 .get_rss = hclgevf_get_rss, 2684 .set_rss = hclgevf_set_rss, 2685 .get_rss_tuple = hclgevf_get_rss_tuple, 2686 .set_rss_tuple = hclgevf_set_rss_tuple, 2687 .get_tc_size = hclgevf_get_tc_size, 2688 .get_fw_version = hclgevf_get_fw_version, 2689 .set_vlan_filter = hclgevf_set_vlan_filter, 2690 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 2691 .reset_event = hclgevf_reset_event, 2692 .set_default_reset_request = hclgevf_set_def_reset_request, 2693 .get_channels = hclgevf_get_channels, 2694 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2695 .get_regs_len = hclgevf_get_regs_len, 2696 .get_regs = hclgevf_get_regs, 2697 .get_status = hclgevf_get_status, 2698 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2699 .get_media_type = hclgevf_get_media_type, 2700 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 2701 .ae_dev_resetting = hclgevf_ae_dev_resetting, 2702 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 2703 .set_gro_en = hclgevf_gro_en, 2704 .set_mtu = hclgevf_set_mtu, 2705 .get_global_queue_id = hclgevf_get_qid_global, 2706 .set_timer_task = hclgevf_set_timer_task, 2707 }; 2708 2709 static struct hnae3_ae_algo ae_algovf = { 2710 .ops = &hclgevf_ops, 2711 .pdev_id_table = ae_algovf_pci_tbl, 2712 }; 2713 2714 static int hclgevf_init(void) 2715 { 2716 pr_info("%s is initializing\n", HCLGEVF_NAME); 2717 2718 hnae3_register_ae_algo(&ae_algovf); 2719 2720 return 0; 2721 } 2722 2723 static void hclgevf_exit(void) 2724 { 2725 hnae3_unregister_ae_algo(&ae_algovf); 2726 } 2727 module_init(hclgevf_init); 2728 module_exit(hclgevf_exit); 2729 2730 MODULE_LICENSE("GPL"); 2731 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2732 MODULE_DESCRIPTION("HCLGEVF Driver"); 2733 MODULE_VERSION(HCLGEVF_MOD_VERSION); 2734