1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 static const u8 hclgevf_hash_key[] = { 25 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 26 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 27 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 28 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 29 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 30 }; 31 32 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 33 34 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 35 HCLGEVF_CMDQ_TX_ADDR_H_REG, 36 HCLGEVF_CMDQ_TX_DEPTH_REG, 37 HCLGEVF_CMDQ_TX_TAIL_REG, 38 HCLGEVF_CMDQ_TX_HEAD_REG, 39 HCLGEVF_CMDQ_RX_ADDR_L_REG, 40 HCLGEVF_CMDQ_RX_ADDR_H_REG, 41 HCLGEVF_CMDQ_RX_DEPTH_REG, 42 HCLGEVF_CMDQ_RX_TAIL_REG, 43 HCLGEVF_CMDQ_RX_HEAD_REG, 44 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 45 HCLGEVF_CMDQ_INTR_STS_REG, 46 HCLGEVF_CMDQ_INTR_EN_REG, 47 HCLGEVF_CMDQ_INTR_GEN_REG}; 48 49 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 50 HCLGEVF_RST_ING, 51 HCLGEVF_GRO_EN_REG}; 52 53 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 54 HCLGEVF_RING_RX_ADDR_H_REG, 55 HCLGEVF_RING_RX_BD_NUM_REG, 56 HCLGEVF_RING_RX_BD_LENGTH_REG, 57 HCLGEVF_RING_RX_MERGE_EN_REG, 58 HCLGEVF_RING_RX_TAIL_REG, 59 HCLGEVF_RING_RX_HEAD_REG, 60 HCLGEVF_RING_RX_FBD_NUM_REG, 61 HCLGEVF_RING_RX_OFFSET_REG, 62 HCLGEVF_RING_RX_FBD_OFFSET_REG, 63 HCLGEVF_RING_RX_STASH_REG, 64 HCLGEVF_RING_RX_BD_ERR_REG, 65 HCLGEVF_RING_TX_ADDR_L_REG, 66 HCLGEVF_RING_TX_ADDR_H_REG, 67 HCLGEVF_RING_TX_BD_NUM_REG, 68 HCLGEVF_RING_TX_PRIORITY_REG, 69 HCLGEVF_RING_TX_TC_REG, 70 HCLGEVF_RING_TX_MERGE_EN_REG, 71 HCLGEVF_RING_TX_TAIL_REG, 72 HCLGEVF_RING_TX_HEAD_REG, 73 HCLGEVF_RING_TX_FBD_NUM_REG, 74 HCLGEVF_RING_TX_OFFSET_REG, 75 HCLGEVF_RING_TX_EBD_NUM_REG, 76 HCLGEVF_RING_TX_EBD_OFFSET_REG, 77 HCLGEVF_RING_TX_BD_ERR_REG, 78 HCLGEVF_RING_EN_REG}; 79 80 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 81 HCLGEVF_TQP_INTR_GL0_REG, 82 HCLGEVF_TQP_INTR_GL1_REG, 83 HCLGEVF_TQP_INTR_GL2_REG, 84 HCLGEVF_TQP_INTR_RL_REG}; 85 86 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 87 struct hnae3_handle *handle) 88 { 89 if (!handle->client) 90 return container_of(handle, struct hclgevf_dev, nic); 91 else if (handle->client->type == HNAE3_CLIENT_ROCE) 92 return container_of(handle, struct hclgevf_dev, roce); 93 else 94 return container_of(handle, struct hclgevf_dev, nic); 95 } 96 97 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 98 { 99 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 100 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 101 struct hclgevf_desc desc; 102 struct hclgevf_tqp *tqp; 103 int status; 104 int i; 105 106 for (i = 0; i < kinfo->num_tqps; i++) { 107 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 108 hclgevf_cmd_setup_basic_desc(&desc, 109 HCLGEVF_OPC_QUERY_RX_STATUS, 110 true); 111 112 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 113 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 114 if (status) { 115 dev_err(&hdev->pdev->dev, 116 "Query tqp stat fail, status = %d,queue = %d\n", 117 status, i); 118 return status; 119 } 120 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 121 le32_to_cpu(desc.data[1]); 122 123 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 124 true); 125 126 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 127 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 128 if (status) { 129 dev_err(&hdev->pdev->dev, 130 "Query tqp stat fail, status = %d,queue = %d\n", 131 status, i); 132 return status; 133 } 134 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 135 le32_to_cpu(desc.data[1]); 136 } 137 138 return 0; 139 } 140 141 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 142 { 143 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 144 struct hclgevf_tqp *tqp; 145 u64 *buff = data; 146 int i; 147 148 for (i = 0; i < kinfo->num_tqps; i++) { 149 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 150 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 151 } 152 for (i = 0; i < kinfo->num_tqps; i++) { 153 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 154 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 155 } 156 157 return buff; 158 } 159 160 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 161 { 162 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 163 164 return kinfo->num_tqps * 2; 165 } 166 167 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 168 { 169 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 170 u8 *buff = data; 171 int i = 0; 172 173 for (i = 0; i < kinfo->num_tqps; i++) { 174 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 175 struct hclgevf_tqp, q); 176 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 177 tqp->index); 178 buff += ETH_GSTRING_LEN; 179 } 180 181 for (i = 0; i < kinfo->num_tqps; i++) { 182 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 183 struct hclgevf_tqp, q); 184 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 185 tqp->index); 186 buff += ETH_GSTRING_LEN; 187 } 188 189 return buff; 190 } 191 192 static void hclgevf_update_stats(struct hnae3_handle *handle, 193 struct net_device_stats *net_stats) 194 { 195 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 196 int status; 197 198 status = hclgevf_tqps_update_stats(handle); 199 if (status) 200 dev_err(&hdev->pdev->dev, 201 "VF update of TQPS stats fail, status = %d.\n", 202 status); 203 } 204 205 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 206 { 207 if (strset == ETH_SS_TEST) 208 return -EOPNOTSUPP; 209 else if (strset == ETH_SS_STATS) 210 return hclgevf_tqps_get_sset_count(handle, strset); 211 212 return 0; 213 } 214 215 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 216 u8 *data) 217 { 218 u8 *p = (char *)data; 219 220 if (strset == ETH_SS_STATS) 221 p = hclgevf_tqps_get_strings(handle, p); 222 } 223 224 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 225 { 226 hclgevf_tqps_get_stats(handle, data); 227 } 228 229 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 230 { 231 u8 resp_msg; 232 int status; 233 234 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 235 true, &resp_msg, sizeof(u8)); 236 if (status) { 237 dev_err(&hdev->pdev->dev, 238 "VF request to get TC info from PF failed %d", 239 status); 240 return status; 241 } 242 243 hdev->hw_tc_map = resp_msg; 244 245 return 0; 246 } 247 248 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 249 { 250 #define HCLGEVF_TQPS_RSS_INFO_LEN 8 251 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 252 int status; 253 254 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 255 true, resp_msg, 256 HCLGEVF_TQPS_RSS_INFO_LEN); 257 if (status) { 258 dev_err(&hdev->pdev->dev, 259 "VF request to get tqp info from PF failed %d", 260 status); 261 return status; 262 } 263 264 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 265 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 266 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 267 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 268 269 return 0; 270 } 271 272 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 273 { 274 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 275 u8 msg_data[2], resp_data[2]; 276 u16 qid_in_pf = 0; 277 int ret; 278 279 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 280 281 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, 282 2, true, resp_data, 2); 283 if (!ret) 284 qid_in_pf = *(u16 *)resp_data; 285 286 return qid_in_pf; 287 } 288 289 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 290 { 291 struct hclgevf_tqp *tqp; 292 int i; 293 294 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 295 sizeof(struct hclgevf_tqp), GFP_KERNEL); 296 if (!hdev->htqp) 297 return -ENOMEM; 298 299 tqp = hdev->htqp; 300 301 for (i = 0; i < hdev->num_tqps; i++) { 302 tqp->dev = &hdev->pdev->dev; 303 tqp->index = i; 304 305 tqp->q.ae_algo = &ae_algovf; 306 tqp->q.buf_size = hdev->rx_buf_len; 307 tqp->q.desc_num = hdev->num_desc; 308 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 309 i * HCLGEVF_TQP_REG_SIZE; 310 311 tqp++; 312 } 313 314 return 0; 315 } 316 317 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 318 { 319 struct hnae3_handle *nic = &hdev->nic; 320 struct hnae3_knic_private_info *kinfo; 321 u16 new_tqps = hdev->num_tqps; 322 int i; 323 324 kinfo = &nic->kinfo; 325 kinfo->num_tc = 0; 326 kinfo->num_desc = hdev->num_desc; 327 kinfo->rx_buf_len = hdev->rx_buf_len; 328 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 329 if (hdev->hw_tc_map & BIT(i)) 330 kinfo->num_tc++; 331 332 kinfo->rss_size 333 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 334 new_tqps = kinfo->rss_size * kinfo->num_tc; 335 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 336 337 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 338 sizeof(struct hnae3_queue *), GFP_KERNEL); 339 if (!kinfo->tqp) 340 return -ENOMEM; 341 342 for (i = 0; i < kinfo->num_tqps; i++) { 343 hdev->htqp[i].q.handle = &hdev->nic; 344 hdev->htqp[i].q.tqp_index = i; 345 kinfo->tqp[i] = &hdev->htqp[i].q; 346 } 347 348 return 0; 349 } 350 351 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 352 { 353 int status; 354 u8 resp_msg; 355 356 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 357 0, false, &resp_msg, sizeof(u8)); 358 if (status) 359 dev_err(&hdev->pdev->dev, 360 "VF failed to fetch link status(%d) from PF", status); 361 } 362 363 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 364 { 365 struct hnae3_handle *rhandle = &hdev->roce; 366 struct hnae3_handle *handle = &hdev->nic; 367 struct hnae3_client *rclient; 368 struct hnae3_client *client; 369 370 client = handle->client; 371 rclient = hdev->roce_client; 372 373 link_state = 374 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 375 376 if (link_state != hdev->hw.mac.link) { 377 client->ops->link_status_change(handle, !!link_state); 378 if (rclient && rclient->ops->link_status_change) 379 rclient->ops->link_status_change(rhandle, !!link_state); 380 hdev->hw.mac.link = link_state; 381 } 382 } 383 384 void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 385 { 386 #define HCLGEVF_ADVERTISING 0 387 #define HCLGEVF_SUPPORTED 1 388 u8 send_msg; 389 u8 resp_msg; 390 391 send_msg = HCLGEVF_ADVERTISING; 392 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, 393 sizeof(u8), false, &resp_msg, sizeof(u8)); 394 send_msg = HCLGEVF_SUPPORTED; 395 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, 396 sizeof(u8), false, &resp_msg, sizeof(u8)); 397 } 398 399 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 400 { 401 struct hnae3_handle *nic = &hdev->nic; 402 int ret; 403 404 nic->ae_algo = &ae_algovf; 405 nic->pdev = hdev->pdev; 406 nic->numa_node_mask = hdev->numa_node_mask; 407 nic->flags |= HNAE3_SUPPORT_VF; 408 409 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 410 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 411 hdev->ae_dev->dev_type); 412 return -EINVAL; 413 } 414 415 ret = hclgevf_knic_setup(hdev); 416 if (ret) 417 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 418 ret); 419 return ret; 420 } 421 422 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 423 { 424 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 425 dev_warn(&hdev->pdev->dev, 426 "vector(vector_id %d) has been freed.\n", vector_id); 427 return; 428 } 429 430 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 431 hdev->num_msi_left += 1; 432 hdev->num_msi_used -= 1; 433 } 434 435 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 436 struct hnae3_vector_info *vector_info) 437 { 438 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 439 struct hnae3_vector_info *vector = vector_info; 440 int alloc = 0; 441 int i, j; 442 443 vector_num = min(hdev->num_msi_left, vector_num); 444 445 for (j = 0; j < vector_num; j++) { 446 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 447 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 448 vector->vector = pci_irq_vector(hdev->pdev, i); 449 vector->io_addr = hdev->hw.io_base + 450 HCLGEVF_VECTOR_REG_BASE + 451 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 452 hdev->vector_status[i] = 0; 453 hdev->vector_irq[i] = vector->vector; 454 455 vector++; 456 alloc++; 457 458 break; 459 } 460 } 461 } 462 hdev->num_msi_left -= alloc; 463 hdev->num_msi_used += alloc; 464 465 return alloc; 466 } 467 468 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 469 { 470 int i; 471 472 for (i = 0; i < hdev->num_msi; i++) 473 if (vector == hdev->vector_irq[i]) 474 return i; 475 476 return -EINVAL; 477 } 478 479 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 480 const u8 hfunc, const u8 *key) 481 { 482 struct hclgevf_rss_config_cmd *req; 483 struct hclgevf_desc desc; 484 int key_offset; 485 int key_size; 486 int ret; 487 488 req = (struct hclgevf_rss_config_cmd *)desc.data; 489 490 for (key_offset = 0; key_offset < 3; key_offset++) { 491 hclgevf_cmd_setup_basic_desc(&desc, 492 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 493 false); 494 495 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 496 req->hash_config |= 497 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 498 499 if (key_offset == 2) 500 key_size = 501 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 502 else 503 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 504 505 memcpy(req->hash_key, 506 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 507 508 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 509 if (ret) { 510 dev_err(&hdev->pdev->dev, 511 "Configure RSS config fail, status = %d\n", 512 ret); 513 return ret; 514 } 515 } 516 517 return 0; 518 } 519 520 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 521 { 522 return HCLGEVF_RSS_KEY_SIZE; 523 } 524 525 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 526 { 527 return HCLGEVF_RSS_IND_TBL_SIZE; 528 } 529 530 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 531 { 532 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 533 struct hclgevf_rss_indirection_table_cmd *req; 534 struct hclgevf_desc desc; 535 int status; 536 int i, j; 537 538 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 539 540 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 541 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 542 false); 543 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 544 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 545 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 546 req->rss_result[j] = 547 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 548 549 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 550 if (status) { 551 dev_err(&hdev->pdev->dev, 552 "VF failed(=%d) to set RSS indirection table\n", 553 status); 554 return status; 555 } 556 } 557 558 return 0; 559 } 560 561 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 562 { 563 struct hclgevf_rss_tc_mode_cmd *req; 564 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 565 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 566 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 567 struct hclgevf_desc desc; 568 u16 roundup_size; 569 int status; 570 int i; 571 572 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 573 574 roundup_size = roundup_pow_of_two(rss_size); 575 roundup_size = ilog2(roundup_size); 576 577 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 578 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 579 tc_size[i] = roundup_size; 580 tc_offset[i] = rss_size * i; 581 } 582 583 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 584 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 585 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 586 (tc_valid[i] & 0x1)); 587 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 588 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 589 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 590 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 591 } 592 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 593 if (status) 594 dev_err(&hdev->pdev->dev, 595 "VF failed(=%d) to set rss tc mode\n", status); 596 597 return status; 598 } 599 600 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 601 u8 *hfunc) 602 { 603 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 604 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 605 int i; 606 607 if (handle->pdev->revision >= 0x21) { 608 /* Get hash algorithm */ 609 if (hfunc) { 610 switch (rss_cfg->hash_algo) { 611 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 612 *hfunc = ETH_RSS_HASH_TOP; 613 break; 614 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 615 *hfunc = ETH_RSS_HASH_XOR; 616 break; 617 default: 618 *hfunc = ETH_RSS_HASH_UNKNOWN; 619 break; 620 } 621 } 622 623 /* Get the RSS Key required by the user */ 624 if (key) 625 memcpy(key, rss_cfg->rss_hash_key, 626 HCLGEVF_RSS_KEY_SIZE); 627 } 628 629 if (indir) 630 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 631 indir[i] = rss_cfg->rss_indirection_tbl[i]; 632 633 return 0; 634 } 635 636 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 637 const u8 *key, const u8 hfunc) 638 { 639 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 640 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 641 int ret, i; 642 643 if (handle->pdev->revision >= 0x21) { 644 /* Set the RSS Hash Key if specififed by the user */ 645 if (key) { 646 switch (hfunc) { 647 case ETH_RSS_HASH_TOP: 648 rss_cfg->hash_algo = 649 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 650 break; 651 case ETH_RSS_HASH_XOR: 652 rss_cfg->hash_algo = 653 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 654 break; 655 case ETH_RSS_HASH_NO_CHANGE: 656 break; 657 default: 658 return -EINVAL; 659 } 660 661 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 662 key); 663 if (ret) 664 return ret; 665 666 /* Update the shadow RSS key with user specified qids */ 667 memcpy(rss_cfg->rss_hash_key, key, 668 HCLGEVF_RSS_KEY_SIZE); 669 } 670 } 671 672 /* update the shadow RSS table with user specified qids */ 673 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 674 rss_cfg->rss_indirection_tbl[i] = indir[i]; 675 676 /* update the hardware */ 677 return hclgevf_set_rss_indir_table(hdev); 678 } 679 680 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 681 { 682 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 683 684 if (nfc->data & RXH_L4_B_2_3) 685 hash_sets |= HCLGEVF_D_PORT_BIT; 686 else 687 hash_sets &= ~HCLGEVF_D_PORT_BIT; 688 689 if (nfc->data & RXH_IP_SRC) 690 hash_sets |= HCLGEVF_S_IP_BIT; 691 else 692 hash_sets &= ~HCLGEVF_S_IP_BIT; 693 694 if (nfc->data & RXH_IP_DST) 695 hash_sets |= HCLGEVF_D_IP_BIT; 696 else 697 hash_sets &= ~HCLGEVF_D_IP_BIT; 698 699 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 700 hash_sets |= HCLGEVF_V_TAG_BIT; 701 702 return hash_sets; 703 } 704 705 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 706 struct ethtool_rxnfc *nfc) 707 { 708 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 709 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 710 struct hclgevf_rss_input_tuple_cmd *req; 711 struct hclgevf_desc desc; 712 u8 tuple_sets; 713 int ret; 714 715 if (handle->pdev->revision == 0x20) 716 return -EOPNOTSUPP; 717 718 if (nfc->data & 719 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 720 return -EINVAL; 721 722 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 723 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 724 725 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 726 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 727 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 728 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 729 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 730 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 731 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 732 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 733 734 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 735 switch (nfc->flow_type) { 736 case TCP_V4_FLOW: 737 req->ipv4_tcp_en = tuple_sets; 738 break; 739 case TCP_V6_FLOW: 740 req->ipv6_tcp_en = tuple_sets; 741 break; 742 case UDP_V4_FLOW: 743 req->ipv4_udp_en = tuple_sets; 744 break; 745 case UDP_V6_FLOW: 746 req->ipv6_udp_en = tuple_sets; 747 break; 748 case SCTP_V4_FLOW: 749 req->ipv4_sctp_en = tuple_sets; 750 break; 751 case SCTP_V6_FLOW: 752 if ((nfc->data & RXH_L4_B_0_1) || 753 (nfc->data & RXH_L4_B_2_3)) 754 return -EINVAL; 755 756 req->ipv6_sctp_en = tuple_sets; 757 break; 758 case IPV4_FLOW: 759 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 760 break; 761 case IPV6_FLOW: 762 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 763 break; 764 default: 765 return -EINVAL; 766 } 767 768 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 769 if (ret) { 770 dev_err(&hdev->pdev->dev, 771 "Set rss tuple fail, status = %d\n", ret); 772 return ret; 773 } 774 775 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 776 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 777 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 778 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 779 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 780 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 781 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 782 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 783 return 0; 784 } 785 786 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 787 struct ethtool_rxnfc *nfc) 788 { 789 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 790 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 791 u8 tuple_sets; 792 793 if (handle->pdev->revision == 0x20) 794 return -EOPNOTSUPP; 795 796 nfc->data = 0; 797 798 switch (nfc->flow_type) { 799 case TCP_V4_FLOW: 800 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 801 break; 802 case UDP_V4_FLOW: 803 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 804 break; 805 case TCP_V6_FLOW: 806 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 807 break; 808 case UDP_V6_FLOW: 809 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 810 break; 811 case SCTP_V4_FLOW: 812 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 813 break; 814 case SCTP_V6_FLOW: 815 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 816 break; 817 case IPV4_FLOW: 818 case IPV6_FLOW: 819 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 820 break; 821 default: 822 return -EINVAL; 823 } 824 825 if (!tuple_sets) 826 return 0; 827 828 if (tuple_sets & HCLGEVF_D_PORT_BIT) 829 nfc->data |= RXH_L4_B_2_3; 830 if (tuple_sets & HCLGEVF_S_PORT_BIT) 831 nfc->data |= RXH_L4_B_0_1; 832 if (tuple_sets & HCLGEVF_D_IP_BIT) 833 nfc->data |= RXH_IP_DST; 834 if (tuple_sets & HCLGEVF_S_IP_BIT) 835 nfc->data |= RXH_IP_SRC; 836 837 return 0; 838 } 839 840 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 841 struct hclgevf_rss_cfg *rss_cfg) 842 { 843 struct hclgevf_rss_input_tuple_cmd *req; 844 struct hclgevf_desc desc; 845 int ret; 846 847 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 848 849 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 850 851 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 852 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 853 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 854 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 855 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 856 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 857 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 858 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 859 860 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 861 if (ret) 862 dev_err(&hdev->pdev->dev, 863 "Configure rss input fail, status = %d\n", ret); 864 return ret; 865 } 866 867 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 868 { 869 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 870 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 871 872 return rss_cfg->rss_size; 873 } 874 875 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 876 int vector_id, 877 struct hnae3_ring_chain_node *ring_chain) 878 { 879 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 880 struct hnae3_ring_chain_node *node; 881 struct hclge_mbx_vf_to_pf_cmd *req; 882 struct hclgevf_desc desc; 883 int i = 0; 884 int status; 885 u8 type; 886 887 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 888 889 for (node = ring_chain; node; node = node->next) { 890 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 891 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 892 893 if (i == 0) { 894 hclgevf_cmd_setup_basic_desc(&desc, 895 HCLGEVF_OPC_MBX_VF_TO_PF, 896 false); 897 type = en ? 898 HCLGE_MBX_MAP_RING_TO_VECTOR : 899 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 900 req->msg[0] = type; 901 req->msg[1] = vector_id; 902 } 903 904 req->msg[idx_offset] = 905 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 906 req->msg[idx_offset + 1] = node->tqp_index; 907 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 908 HNAE3_RING_GL_IDX_M, 909 HNAE3_RING_GL_IDX_S); 910 911 i++; 912 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 913 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 914 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 915 !node->next) { 916 req->msg[2] = i; 917 918 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 919 if (status) { 920 dev_err(&hdev->pdev->dev, 921 "Map TQP fail, status is %d.\n", 922 status); 923 return status; 924 } 925 i = 0; 926 hclgevf_cmd_setup_basic_desc(&desc, 927 HCLGEVF_OPC_MBX_VF_TO_PF, 928 false); 929 req->msg[0] = type; 930 req->msg[1] = vector_id; 931 } 932 } 933 934 return 0; 935 } 936 937 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 938 struct hnae3_ring_chain_node *ring_chain) 939 { 940 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 941 int vector_id; 942 943 vector_id = hclgevf_get_vector_index(hdev, vector); 944 if (vector_id < 0) { 945 dev_err(&handle->pdev->dev, 946 "Get vector index fail. ret =%d\n", vector_id); 947 return vector_id; 948 } 949 950 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 951 } 952 953 static int hclgevf_unmap_ring_from_vector( 954 struct hnae3_handle *handle, 955 int vector, 956 struct hnae3_ring_chain_node *ring_chain) 957 { 958 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 959 int ret, vector_id; 960 961 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 962 return 0; 963 964 vector_id = hclgevf_get_vector_index(hdev, vector); 965 if (vector_id < 0) { 966 dev_err(&handle->pdev->dev, 967 "Get vector index fail. ret =%d\n", vector_id); 968 return vector_id; 969 } 970 971 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 972 if (ret) 973 dev_err(&handle->pdev->dev, 974 "Unmap ring from vector fail. vector=%d, ret =%d\n", 975 vector_id, 976 ret); 977 978 return ret; 979 } 980 981 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 982 { 983 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 984 int vector_id; 985 986 vector_id = hclgevf_get_vector_index(hdev, vector); 987 if (vector_id < 0) { 988 dev_err(&handle->pdev->dev, 989 "hclgevf_put_vector get vector index fail. ret =%d\n", 990 vector_id); 991 return vector_id; 992 } 993 994 hclgevf_free_vector(hdev, vector_id); 995 996 return 0; 997 } 998 999 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1000 bool en_bc_pmc) 1001 { 1002 struct hclge_mbx_vf_to_pf_cmd *req; 1003 struct hclgevf_desc desc; 1004 int ret; 1005 1006 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 1007 1008 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 1009 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 1010 req->msg[1] = en_bc_pmc ? 1 : 0; 1011 1012 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1013 if (ret) 1014 dev_err(&hdev->pdev->dev, 1015 "Set promisc mode fail, status is %d.\n", ret); 1016 1017 return ret; 1018 } 1019 1020 static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc) 1021 { 1022 return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc); 1023 } 1024 1025 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 1026 int stream_id, bool enable) 1027 { 1028 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1029 struct hclgevf_desc desc; 1030 int status; 1031 1032 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1033 1034 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1035 false); 1036 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1037 req->stream_id = cpu_to_le16(stream_id); 1038 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 1039 1040 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1041 if (status) 1042 dev_err(&hdev->pdev->dev, 1043 "TQP enable fail, status =%d.\n", status); 1044 1045 return status; 1046 } 1047 1048 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1049 { 1050 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1051 struct hclgevf_tqp *tqp; 1052 int i; 1053 1054 for (i = 0; i < kinfo->num_tqps; i++) { 1055 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1056 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1057 } 1058 } 1059 1060 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1061 { 1062 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1063 1064 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1065 } 1066 1067 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1068 bool is_first) 1069 { 1070 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1071 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1072 u8 *new_mac_addr = (u8 *)p; 1073 u8 msg_data[ETH_ALEN * 2]; 1074 u16 subcode; 1075 int status; 1076 1077 ether_addr_copy(msg_data, new_mac_addr); 1078 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 1079 1080 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 1081 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1082 1083 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1084 subcode, msg_data, ETH_ALEN * 2, 1085 true, NULL, 0); 1086 if (!status) 1087 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1088 1089 return status; 1090 } 1091 1092 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1093 const unsigned char *addr) 1094 { 1095 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1096 1097 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1098 HCLGE_MBX_MAC_VLAN_UC_ADD, 1099 addr, ETH_ALEN, false, NULL, 0); 1100 } 1101 1102 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1103 const unsigned char *addr) 1104 { 1105 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1106 1107 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1108 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1109 addr, ETH_ALEN, false, NULL, 0); 1110 } 1111 1112 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1113 const unsigned char *addr) 1114 { 1115 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1116 1117 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1118 HCLGE_MBX_MAC_VLAN_MC_ADD, 1119 addr, ETH_ALEN, false, NULL, 0); 1120 } 1121 1122 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1123 const unsigned char *addr) 1124 { 1125 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1126 1127 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1128 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1129 addr, ETH_ALEN, false, NULL, 0); 1130 } 1131 1132 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1133 __be16 proto, u16 vlan_id, 1134 bool is_kill) 1135 { 1136 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1137 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1138 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1139 1140 if (vlan_id > 4095) 1141 return -EINVAL; 1142 1143 if (proto != htons(ETH_P_8021Q)) 1144 return -EPROTONOSUPPORT; 1145 1146 msg_data[0] = is_kill; 1147 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1148 memcpy(&msg_data[3], &proto, sizeof(proto)); 1149 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1150 HCLGE_MBX_VLAN_FILTER, msg_data, 1151 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 1152 } 1153 1154 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1155 { 1156 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1157 u8 msg_data; 1158 1159 msg_data = enable ? 1 : 0; 1160 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1161 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1162 1, false, NULL, 0); 1163 } 1164 1165 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1166 { 1167 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1168 u8 msg_data[2]; 1169 int ret; 1170 1171 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 1172 1173 /* disable vf queue before send queue reset msg to PF */ 1174 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1175 if (ret) 1176 return ret; 1177 1178 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1179 2, true, NULL, 0); 1180 } 1181 1182 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1183 { 1184 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1185 1186 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu, 1187 sizeof(new_mtu), true, NULL, 0); 1188 } 1189 1190 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1191 enum hnae3_reset_notify_type type) 1192 { 1193 struct hnae3_client *client = hdev->nic_client; 1194 struct hnae3_handle *handle = &hdev->nic; 1195 int ret; 1196 1197 if (!client->ops->reset_notify) 1198 return -EOPNOTSUPP; 1199 1200 ret = client->ops->reset_notify(handle, type); 1201 if (ret) 1202 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1203 type, ret); 1204 1205 return ret; 1206 } 1207 1208 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 1209 { 1210 struct hclgevf_dev *hdev = ae_dev->priv; 1211 1212 set_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1213 } 1214 1215 static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev, 1216 unsigned long delay_us, 1217 unsigned long wait_cnt) 1218 { 1219 unsigned long cnt = 0; 1220 1221 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && 1222 cnt++ < wait_cnt) 1223 usleep_range(delay_us, delay_us * 2); 1224 1225 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { 1226 dev_err(&hdev->pdev->dev, 1227 "flr wait timeout\n"); 1228 return -ETIMEDOUT; 1229 } 1230 1231 return 0; 1232 } 1233 1234 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1235 { 1236 #define HCLGEVF_RESET_WAIT_US 20000 1237 #define HCLGEVF_RESET_WAIT_CNT 2000 1238 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1239 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1240 1241 u32 val; 1242 int ret; 1243 1244 /* wait to check the hardware reset completion status */ 1245 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1246 dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val); 1247 1248 if (hdev->reset_type == HNAE3_FLR_RESET) 1249 return hclgevf_flr_poll_timeout(hdev, 1250 HCLGEVF_RESET_WAIT_US, 1251 HCLGEVF_RESET_WAIT_CNT); 1252 1253 ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val, 1254 !(val & HCLGEVF_RST_ING_BITS), 1255 HCLGEVF_RESET_WAIT_US, 1256 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1257 1258 /* hardware completion status should be available by this time */ 1259 if (ret) { 1260 dev_err(&hdev->pdev->dev, 1261 "could'nt get reset done status from h/w, timeout!\n"); 1262 return ret; 1263 } 1264 1265 /* we will wait a bit more to let reset of the stack to complete. This 1266 * might happen in case reset assertion was made by PF. Yes, this also 1267 * means we might end up waiting bit more even for VF reset. 1268 */ 1269 msleep(5000); 1270 1271 return 0; 1272 } 1273 1274 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1275 { 1276 int ret; 1277 1278 /* uninitialize the nic client */ 1279 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1280 if (ret) 1281 return ret; 1282 1283 /* re-initialize the hclge device */ 1284 ret = hclgevf_reset_hdev(hdev); 1285 if (ret) { 1286 dev_err(&hdev->pdev->dev, 1287 "hclge device re-init failed, VF is disabled!\n"); 1288 return ret; 1289 } 1290 1291 /* bring up the nic client again */ 1292 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1293 if (ret) 1294 return ret; 1295 1296 return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); 1297 } 1298 1299 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1300 { 1301 int ret = 0; 1302 1303 switch (hdev->reset_type) { 1304 case HNAE3_VF_FUNC_RESET: 1305 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1306 0, true, NULL, sizeof(u8)); 1307 break; 1308 case HNAE3_FLR_RESET: 1309 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1310 break; 1311 default: 1312 break; 1313 } 1314 1315 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1316 1317 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1318 hdev->reset_type, ret); 1319 1320 return ret; 1321 } 1322 1323 static int hclgevf_reset(struct hclgevf_dev *hdev) 1324 { 1325 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1326 int ret; 1327 1328 /* Initialize ae_dev reset status as well, in case enet layer wants to 1329 * know if device is undergoing reset 1330 */ 1331 ae_dev->reset_type = hdev->reset_type; 1332 hdev->reset_count++; 1333 rtnl_lock(); 1334 1335 /* bring down the nic to stop any ongoing TX/RX */ 1336 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1337 if (ret) 1338 goto err_reset_lock; 1339 1340 rtnl_unlock(); 1341 1342 ret = hclgevf_reset_prepare_wait(hdev); 1343 if (ret) 1344 goto err_reset; 1345 1346 /* check if VF could successfully fetch the hardware reset completion 1347 * status from the hardware 1348 */ 1349 ret = hclgevf_reset_wait(hdev); 1350 if (ret) { 1351 /* can't do much in this situation, will disable VF */ 1352 dev_err(&hdev->pdev->dev, 1353 "VF failed(=%d) to fetch H/W reset completion status\n", 1354 ret); 1355 goto err_reset; 1356 } 1357 1358 rtnl_lock(); 1359 1360 /* now, re-initialize the nic client and ae device*/ 1361 ret = hclgevf_reset_stack(hdev); 1362 if (ret) { 1363 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1364 goto err_reset_lock; 1365 } 1366 1367 /* bring up the nic to enable TX/RX again */ 1368 ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1369 if (ret) 1370 goto err_reset_lock; 1371 1372 rtnl_unlock(); 1373 1374 hdev->last_reset_time = jiffies; 1375 ae_dev->reset_type = HNAE3_NONE_RESET; 1376 1377 return ret; 1378 err_reset_lock: 1379 rtnl_unlock(); 1380 err_reset: 1381 /* When VF reset failed, only the higher level reset asserted by PF 1382 * can restore it, so re-initialize the command queue to receive 1383 * this higher reset event. 1384 */ 1385 hclgevf_cmd_init(hdev); 1386 dev_err(&hdev->pdev->dev, "failed to reset VF\n"); 1387 1388 return ret; 1389 } 1390 1391 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1392 unsigned long *addr) 1393 { 1394 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1395 1396 /* return the highest priority reset level amongst all */ 1397 if (test_bit(HNAE3_VF_RESET, addr)) { 1398 rst_level = HNAE3_VF_RESET; 1399 clear_bit(HNAE3_VF_RESET, addr); 1400 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1401 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1402 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1403 rst_level = HNAE3_VF_FULL_RESET; 1404 clear_bit(HNAE3_VF_FULL_RESET, addr); 1405 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1406 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1407 rst_level = HNAE3_VF_PF_FUNC_RESET; 1408 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1409 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1410 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1411 rst_level = HNAE3_VF_FUNC_RESET; 1412 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1413 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1414 rst_level = HNAE3_FLR_RESET; 1415 clear_bit(HNAE3_FLR_RESET, addr); 1416 } 1417 1418 return rst_level; 1419 } 1420 1421 static void hclgevf_reset_event(struct pci_dev *pdev, 1422 struct hnae3_handle *handle) 1423 { 1424 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1425 struct hclgevf_dev *hdev = ae_dev->priv; 1426 1427 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1428 1429 if (hdev->default_reset_request) 1430 hdev->reset_level = 1431 hclgevf_get_reset_level(hdev, 1432 &hdev->default_reset_request); 1433 else 1434 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1435 1436 /* reset of this VF requested */ 1437 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1438 hclgevf_reset_task_schedule(hdev); 1439 1440 hdev->last_reset_time = jiffies; 1441 } 1442 1443 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1444 enum hnae3_reset_type rst_type) 1445 { 1446 struct hclgevf_dev *hdev = ae_dev->priv; 1447 1448 set_bit(rst_type, &hdev->default_reset_request); 1449 } 1450 1451 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1452 { 1453 #define HCLGEVF_FLR_WAIT_MS 100 1454 #define HCLGEVF_FLR_WAIT_CNT 50 1455 struct hclgevf_dev *hdev = ae_dev->priv; 1456 int cnt = 0; 1457 1458 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1459 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1460 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); 1461 hclgevf_reset_event(hdev->pdev, NULL); 1462 1463 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && 1464 cnt++ < HCLGEVF_FLR_WAIT_CNT) 1465 msleep(HCLGEVF_FLR_WAIT_MS); 1466 1467 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) 1468 dev_err(&hdev->pdev->dev, 1469 "flr wait down timeout: %d\n", cnt); 1470 } 1471 1472 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1473 { 1474 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1475 1476 return hdev->fw_version; 1477 } 1478 1479 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1480 { 1481 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1482 1483 vector->vector_irq = pci_irq_vector(hdev->pdev, 1484 HCLGEVF_MISC_VECTOR_NUM); 1485 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1486 /* vector status always valid for Vector 0 */ 1487 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1488 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1489 1490 hdev->num_msi_left -= 1; 1491 hdev->num_msi_used += 1; 1492 } 1493 1494 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1495 { 1496 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1497 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1498 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1499 schedule_work(&hdev->rst_service_task); 1500 } 1501 } 1502 1503 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1504 { 1505 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1506 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1507 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1508 schedule_work(&hdev->mbx_service_task); 1509 } 1510 } 1511 1512 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1513 { 1514 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1515 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1516 schedule_work(&hdev->service_task); 1517 } 1518 1519 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1520 { 1521 /* if we have any pending mailbox event then schedule the mbx task */ 1522 if (hdev->mbx_event_pending) 1523 hclgevf_mbx_task_schedule(hdev); 1524 1525 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1526 hclgevf_reset_task_schedule(hdev); 1527 } 1528 1529 static void hclgevf_service_timer(struct timer_list *t) 1530 { 1531 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1532 1533 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1534 1535 hclgevf_task_schedule(hdev); 1536 } 1537 1538 static void hclgevf_reset_service_task(struct work_struct *work) 1539 { 1540 struct hclgevf_dev *hdev = 1541 container_of(work, struct hclgevf_dev, rst_service_task); 1542 int ret; 1543 1544 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1545 return; 1546 1547 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1548 1549 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1550 &hdev->reset_state)) { 1551 /* PF has initmated that it is about to reset the hardware. 1552 * We now have to poll & check if harware has actually completed 1553 * the reset sequence. On hardware reset completion, VF needs to 1554 * reset the client and ae device. 1555 */ 1556 hdev->reset_attempts = 0; 1557 1558 hdev->last_reset_time = jiffies; 1559 while ((hdev->reset_type = 1560 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 1561 != HNAE3_NONE_RESET) { 1562 ret = hclgevf_reset(hdev); 1563 if (ret) 1564 dev_err(&hdev->pdev->dev, 1565 "VF stack reset failed %d.\n", ret); 1566 } 1567 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1568 &hdev->reset_state)) { 1569 /* we could be here when either of below happens: 1570 * 1. reset was initiated due to watchdog timeout due to 1571 * a. IMP was earlier reset and our TX got choked down and 1572 * which resulted in watchdog reacting and inducing VF 1573 * reset. This also means our cmdq would be unreliable. 1574 * b. problem in TX due to other lower layer(example link 1575 * layer not functioning properly etc.) 1576 * 2. VF reset might have been initiated due to some config 1577 * change. 1578 * 1579 * NOTE: Theres no clear way to detect above cases than to react 1580 * to the response of PF for this reset request. PF will ack the 1581 * 1b and 2. cases but we will not get any intimation about 1a 1582 * from PF as cmdq would be in unreliable state i.e. mailbox 1583 * communication between PF and VF would be broken. 1584 */ 1585 1586 /* if we are never geting into pending state it means either: 1587 * 1. PF is not receiving our request which could be due to IMP 1588 * reset 1589 * 2. PF is screwed 1590 * We cannot do much for 2. but to check first we can try reset 1591 * our PCIe + stack and see if it alleviates the problem. 1592 */ 1593 if (hdev->reset_attempts > 3) { 1594 /* prepare for full reset of stack + pcie interface */ 1595 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1596 1597 /* "defer" schedule the reset task again */ 1598 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1599 } else { 1600 hdev->reset_attempts++; 1601 1602 set_bit(hdev->reset_level, &hdev->reset_pending); 1603 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1604 } 1605 hclgevf_reset_task_schedule(hdev); 1606 } 1607 1608 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1609 } 1610 1611 static void hclgevf_mailbox_service_task(struct work_struct *work) 1612 { 1613 struct hclgevf_dev *hdev; 1614 1615 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1616 1617 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1618 return; 1619 1620 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1621 1622 hclgevf_mbx_async_handler(hdev); 1623 1624 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1625 } 1626 1627 static void hclgevf_keep_alive_timer(struct timer_list *t) 1628 { 1629 struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); 1630 1631 schedule_work(&hdev->keep_alive_task); 1632 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 1633 } 1634 1635 static void hclgevf_keep_alive_task(struct work_struct *work) 1636 { 1637 struct hclgevf_dev *hdev; 1638 u8 respmsg; 1639 int ret; 1640 1641 hdev = container_of(work, struct hclgevf_dev, keep_alive_task); 1642 1643 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1644 return; 1645 1646 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, 1647 0, false, &respmsg, sizeof(u8)); 1648 if (ret) 1649 dev_err(&hdev->pdev->dev, 1650 "VF sends keep alive cmd failed(=%d)\n", ret); 1651 } 1652 1653 static void hclgevf_service_task(struct work_struct *work) 1654 { 1655 struct hclgevf_dev *hdev; 1656 1657 hdev = container_of(work, struct hclgevf_dev, service_task); 1658 1659 /* request the link status from the PF. PF would be able to tell VF 1660 * about such updates in future so we might remove this later 1661 */ 1662 hclgevf_request_link_info(hdev); 1663 1664 hclgevf_update_link_mode(hdev); 1665 1666 hclgevf_deferred_task_schedule(hdev); 1667 1668 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1669 } 1670 1671 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1672 { 1673 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1674 } 1675 1676 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1677 u32 *clearval) 1678 { 1679 u32 cmdq_src_reg, rst_ing_reg; 1680 1681 /* fetch the events from their corresponding regs */ 1682 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1683 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1684 1685 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) { 1686 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1687 dev_info(&hdev->pdev->dev, 1688 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1689 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1690 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1691 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1692 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); 1693 *clearval = cmdq_src_reg; 1694 return HCLGEVF_VECTOR0_EVENT_RST; 1695 } 1696 1697 /* check for vector0 mailbox(=CMDQ RX) event source */ 1698 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1699 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1700 *clearval = cmdq_src_reg; 1701 return HCLGEVF_VECTOR0_EVENT_MBX; 1702 } 1703 1704 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1705 1706 return HCLGEVF_VECTOR0_EVENT_OTHER; 1707 } 1708 1709 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1710 { 1711 writel(en ? 1 : 0, vector->addr); 1712 } 1713 1714 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1715 { 1716 enum hclgevf_evt_cause event_cause; 1717 struct hclgevf_dev *hdev = data; 1718 u32 clearval; 1719 1720 hclgevf_enable_vector(&hdev->misc_vector, false); 1721 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 1722 1723 switch (event_cause) { 1724 case HCLGEVF_VECTOR0_EVENT_RST: 1725 hclgevf_reset_task_schedule(hdev); 1726 break; 1727 case HCLGEVF_VECTOR0_EVENT_MBX: 1728 hclgevf_mbx_handler(hdev); 1729 break; 1730 default: 1731 break; 1732 } 1733 1734 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 1735 hclgevf_clear_event_cause(hdev, clearval); 1736 hclgevf_enable_vector(&hdev->misc_vector, true); 1737 } 1738 1739 return IRQ_HANDLED; 1740 } 1741 1742 static int hclgevf_configure(struct hclgevf_dev *hdev) 1743 { 1744 int ret; 1745 1746 /* get queue configuration from PF */ 1747 ret = hclgevf_get_queue_info(hdev); 1748 if (ret) 1749 return ret; 1750 /* get tc configuration from PF */ 1751 return hclgevf_get_tc_info(hdev); 1752 } 1753 1754 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1755 { 1756 struct pci_dev *pdev = ae_dev->pdev; 1757 struct hclgevf_dev *hdev; 1758 1759 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1760 if (!hdev) 1761 return -ENOMEM; 1762 1763 hdev->pdev = pdev; 1764 hdev->ae_dev = ae_dev; 1765 ae_dev->priv = hdev; 1766 1767 return 0; 1768 } 1769 1770 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1771 { 1772 struct hnae3_handle *roce = &hdev->roce; 1773 struct hnae3_handle *nic = &hdev->nic; 1774 1775 roce->rinfo.num_vectors = hdev->num_roce_msix; 1776 1777 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1778 hdev->num_msi_left == 0) 1779 return -EINVAL; 1780 1781 roce->rinfo.base_vector = hdev->roce_base_vector; 1782 1783 roce->rinfo.netdev = nic->kinfo.netdev; 1784 roce->rinfo.roce_io_base = hdev->hw.io_base; 1785 1786 roce->pdev = nic->pdev; 1787 roce->ae_algo = nic->ae_algo; 1788 roce->numa_node_mask = nic->numa_node_mask; 1789 1790 return 0; 1791 } 1792 1793 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 1794 { 1795 struct hclgevf_cfg_gro_status_cmd *req; 1796 struct hclgevf_desc desc; 1797 int ret; 1798 1799 if (!hnae3_dev_gro_supported(hdev)) 1800 return 0; 1801 1802 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 1803 false); 1804 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 1805 1806 req->gro_en = cpu_to_le16(en ? 1 : 0); 1807 1808 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1809 if (ret) 1810 dev_err(&hdev->pdev->dev, 1811 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 1812 1813 return ret; 1814 } 1815 1816 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1817 { 1818 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1819 int i, ret; 1820 1821 rss_cfg->rss_size = hdev->rss_size_max; 1822 1823 if (hdev->pdev->revision >= 0x21) { 1824 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 1825 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 1826 HCLGEVF_RSS_KEY_SIZE); 1827 1828 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 1829 rss_cfg->rss_hash_key); 1830 if (ret) 1831 return ret; 1832 1833 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 1834 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1835 rss_cfg->rss_tuple_sets.ipv4_udp_en = 1836 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1837 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 1838 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1839 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 1840 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1841 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 1842 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1843 rss_cfg->rss_tuple_sets.ipv6_udp_en = 1844 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1845 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 1846 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1847 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 1848 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1849 1850 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 1851 if (ret) 1852 return ret; 1853 1854 } 1855 1856 /* Initialize RSS indirect table for each vport */ 1857 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1858 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1859 1860 ret = hclgevf_set_rss_indir_table(hdev); 1861 if (ret) 1862 return ret; 1863 1864 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1865 } 1866 1867 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1868 { 1869 /* other vlan config(like, VLAN TX/RX offload) would also be added 1870 * here later 1871 */ 1872 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1873 false); 1874 } 1875 1876 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 1877 { 1878 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1879 1880 if (enable) { 1881 mod_timer(&hdev->service_timer, jiffies + HZ); 1882 } else { 1883 del_timer_sync(&hdev->service_timer); 1884 cancel_work_sync(&hdev->service_task); 1885 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1886 } 1887 } 1888 1889 static int hclgevf_ae_start(struct hnae3_handle *handle) 1890 { 1891 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1892 1893 /* reset tqp stats */ 1894 hclgevf_reset_tqp_stats(handle); 1895 1896 hclgevf_request_link_info(hdev); 1897 1898 hclgevf_update_link_mode(hdev); 1899 1900 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1901 1902 return 0; 1903 } 1904 1905 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1906 { 1907 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1908 int i; 1909 1910 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1911 1912 for (i = 0; i < handle->kinfo.num_tqps; i++) 1913 hclgevf_reset_tqp(handle, i); 1914 1915 /* reset tqp stats */ 1916 hclgevf_reset_tqp_stats(handle); 1917 hclgevf_update_link_status(hdev, 0); 1918 } 1919 1920 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 1921 { 1922 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1923 u8 msg_data; 1924 1925 msg_data = alive ? 1 : 0; 1926 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE, 1927 0, &msg_data, 1, false, NULL, 0); 1928 } 1929 1930 static int hclgevf_client_start(struct hnae3_handle *handle) 1931 { 1932 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1933 1934 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 1935 return hclgevf_set_alive(handle, true); 1936 } 1937 1938 static void hclgevf_client_stop(struct hnae3_handle *handle) 1939 { 1940 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1941 int ret; 1942 1943 ret = hclgevf_set_alive(handle, false); 1944 if (ret) 1945 dev_warn(&hdev->pdev->dev, 1946 "%s failed %d\n", __func__, ret); 1947 1948 del_timer_sync(&hdev->keep_alive_timer); 1949 cancel_work_sync(&hdev->keep_alive_task); 1950 } 1951 1952 static void hclgevf_state_init(struct hclgevf_dev *hdev) 1953 { 1954 /* setup tasks for the MBX */ 1955 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1956 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1957 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1958 1959 /* setup tasks for service timer */ 1960 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1961 1962 INIT_WORK(&hdev->service_task, hclgevf_service_task); 1963 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1964 1965 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 1966 1967 mutex_init(&hdev->mbx_resp.mbx_mutex); 1968 1969 /* bring the device down */ 1970 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1971 } 1972 1973 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1974 { 1975 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1976 1977 if (hdev->service_timer.function) 1978 del_timer_sync(&hdev->service_timer); 1979 if (hdev->service_task.func) 1980 cancel_work_sync(&hdev->service_task); 1981 if (hdev->mbx_service_task.func) 1982 cancel_work_sync(&hdev->mbx_service_task); 1983 if (hdev->rst_service_task.func) 1984 cancel_work_sync(&hdev->rst_service_task); 1985 1986 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1987 } 1988 1989 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1990 { 1991 struct pci_dev *pdev = hdev->pdev; 1992 int vectors; 1993 int i; 1994 1995 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 1996 vectors = pci_alloc_irq_vectors(pdev, 1997 hdev->roce_base_msix_offset + 1, 1998 hdev->num_msi, 1999 PCI_IRQ_MSIX); 2000 else 2001 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 2002 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2003 2004 if (vectors < 0) { 2005 dev_err(&pdev->dev, 2006 "failed(%d) to allocate MSI/MSI-X vectors\n", 2007 vectors); 2008 return vectors; 2009 } 2010 if (vectors < hdev->num_msi) 2011 dev_warn(&hdev->pdev->dev, 2012 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2013 hdev->num_msi, vectors); 2014 2015 hdev->num_msi = vectors; 2016 hdev->num_msi_left = vectors; 2017 hdev->base_msi_vector = pdev->irq; 2018 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2019 2020 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2021 sizeof(u16), GFP_KERNEL); 2022 if (!hdev->vector_status) { 2023 pci_free_irq_vectors(pdev); 2024 return -ENOMEM; 2025 } 2026 2027 for (i = 0; i < hdev->num_msi; i++) 2028 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2029 2030 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2031 sizeof(int), GFP_KERNEL); 2032 if (!hdev->vector_irq) { 2033 devm_kfree(&pdev->dev, hdev->vector_status); 2034 pci_free_irq_vectors(pdev); 2035 return -ENOMEM; 2036 } 2037 2038 return 0; 2039 } 2040 2041 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2042 { 2043 struct pci_dev *pdev = hdev->pdev; 2044 2045 devm_kfree(&pdev->dev, hdev->vector_status); 2046 devm_kfree(&pdev->dev, hdev->vector_irq); 2047 pci_free_irq_vectors(pdev); 2048 } 2049 2050 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2051 { 2052 int ret = 0; 2053 2054 hclgevf_get_misc_vector(hdev); 2055 2056 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2057 0, "hclgevf_cmd", hdev); 2058 if (ret) { 2059 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2060 hdev->misc_vector.vector_irq); 2061 return ret; 2062 } 2063 2064 hclgevf_clear_event_cause(hdev, 0); 2065 2066 /* enable misc. vector(vector 0) */ 2067 hclgevf_enable_vector(&hdev->misc_vector, true); 2068 2069 return ret; 2070 } 2071 2072 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2073 { 2074 /* disable misc vector(vector 0) */ 2075 hclgevf_enable_vector(&hdev->misc_vector, false); 2076 synchronize_irq(hdev->misc_vector.vector_irq); 2077 free_irq(hdev->misc_vector.vector_irq, hdev); 2078 hclgevf_free_vector(hdev, 0); 2079 } 2080 2081 static int hclgevf_init_client_instance(struct hnae3_client *client, 2082 struct hnae3_ae_dev *ae_dev) 2083 { 2084 struct hclgevf_dev *hdev = ae_dev->priv; 2085 int ret; 2086 2087 switch (client->type) { 2088 case HNAE3_CLIENT_KNIC: 2089 hdev->nic_client = client; 2090 hdev->nic.client = client; 2091 2092 ret = client->ops->init_instance(&hdev->nic); 2093 if (ret) 2094 goto clear_nic; 2095 2096 hnae3_set_client_init_flag(client, ae_dev, 1); 2097 2098 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 2099 struct hnae3_client *rc = hdev->roce_client; 2100 2101 ret = hclgevf_init_roce_base_info(hdev); 2102 if (ret) 2103 goto clear_roce; 2104 ret = rc->ops->init_instance(&hdev->roce); 2105 if (ret) 2106 goto clear_roce; 2107 2108 hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 2109 1); 2110 } 2111 break; 2112 case HNAE3_CLIENT_UNIC: 2113 hdev->nic_client = client; 2114 hdev->nic.client = client; 2115 2116 ret = client->ops->init_instance(&hdev->nic); 2117 if (ret) 2118 goto clear_nic; 2119 2120 hnae3_set_client_init_flag(client, ae_dev, 1); 2121 break; 2122 case HNAE3_CLIENT_ROCE: 2123 if (hnae3_dev_roce_supported(hdev)) { 2124 hdev->roce_client = client; 2125 hdev->roce.client = client; 2126 } 2127 2128 if (hdev->roce_client && hdev->nic_client) { 2129 ret = hclgevf_init_roce_base_info(hdev); 2130 if (ret) 2131 goto clear_roce; 2132 2133 ret = client->ops->init_instance(&hdev->roce); 2134 if (ret) 2135 goto clear_roce; 2136 } 2137 2138 hnae3_set_client_init_flag(client, ae_dev, 1); 2139 break; 2140 default: 2141 return -EINVAL; 2142 } 2143 2144 return 0; 2145 2146 clear_nic: 2147 hdev->nic_client = NULL; 2148 hdev->nic.client = NULL; 2149 return ret; 2150 clear_roce: 2151 hdev->roce_client = NULL; 2152 hdev->roce.client = NULL; 2153 return ret; 2154 } 2155 2156 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2157 struct hnae3_ae_dev *ae_dev) 2158 { 2159 struct hclgevf_dev *hdev = ae_dev->priv; 2160 2161 /* un-init roce, if it exists */ 2162 if (hdev->roce_client) { 2163 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2164 hdev->roce_client = NULL; 2165 hdev->roce.client = NULL; 2166 } 2167 2168 /* un-init nic/unic, if this was not called by roce client */ 2169 if (client->ops->uninit_instance && hdev->nic_client && 2170 client->type != HNAE3_CLIENT_ROCE) { 2171 client->ops->uninit_instance(&hdev->nic, 0); 2172 hdev->nic_client = NULL; 2173 hdev->nic.client = NULL; 2174 } 2175 } 2176 2177 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2178 { 2179 struct pci_dev *pdev = hdev->pdev; 2180 struct hclgevf_hw *hw; 2181 int ret; 2182 2183 ret = pci_enable_device(pdev); 2184 if (ret) { 2185 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2186 return ret; 2187 } 2188 2189 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2190 if (ret) { 2191 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2192 goto err_disable_device; 2193 } 2194 2195 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2196 if (ret) { 2197 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2198 goto err_disable_device; 2199 } 2200 2201 pci_set_master(pdev); 2202 hw = &hdev->hw; 2203 hw->hdev = hdev; 2204 hw->io_base = pci_iomap(pdev, 2, 0); 2205 if (!hw->io_base) { 2206 dev_err(&pdev->dev, "can't map configuration register space\n"); 2207 ret = -ENOMEM; 2208 goto err_clr_master; 2209 } 2210 2211 return 0; 2212 2213 err_clr_master: 2214 pci_clear_master(pdev); 2215 pci_release_regions(pdev); 2216 err_disable_device: 2217 pci_disable_device(pdev); 2218 2219 return ret; 2220 } 2221 2222 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2223 { 2224 struct pci_dev *pdev = hdev->pdev; 2225 2226 pci_iounmap(pdev, hdev->hw.io_base); 2227 pci_clear_master(pdev); 2228 pci_release_regions(pdev); 2229 pci_disable_device(pdev); 2230 } 2231 2232 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2233 { 2234 struct hclgevf_query_res_cmd *req; 2235 struct hclgevf_desc desc; 2236 int ret; 2237 2238 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2239 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2240 if (ret) { 2241 dev_err(&hdev->pdev->dev, 2242 "query vf resource failed, ret = %d.\n", ret); 2243 return ret; 2244 } 2245 2246 req = (struct hclgevf_query_res_cmd *)desc.data; 2247 2248 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 2249 hdev->roce_base_msix_offset = 2250 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 2251 HCLGEVF_MSIX_OFT_ROCEE_M, 2252 HCLGEVF_MSIX_OFT_ROCEE_S); 2253 hdev->num_roce_msix = 2254 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2255 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2256 2257 /* VF should have NIC vectors and Roce vectors, NIC vectors 2258 * are queued before Roce vectors. The offset is fixed to 64. 2259 */ 2260 hdev->num_msi = hdev->num_roce_msix + 2261 hdev->roce_base_msix_offset; 2262 } else { 2263 hdev->num_msi = 2264 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2265 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2266 } 2267 2268 return 0; 2269 } 2270 2271 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2272 { 2273 struct pci_dev *pdev = hdev->pdev; 2274 int ret = 0; 2275 2276 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2277 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2278 hclgevf_misc_irq_uninit(hdev); 2279 hclgevf_uninit_msi(hdev); 2280 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2281 } 2282 2283 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2284 pci_set_master(pdev); 2285 ret = hclgevf_init_msi(hdev); 2286 if (ret) { 2287 dev_err(&pdev->dev, 2288 "failed(%d) to init MSI/MSI-X\n", ret); 2289 return ret; 2290 } 2291 2292 ret = hclgevf_misc_irq_init(hdev); 2293 if (ret) { 2294 hclgevf_uninit_msi(hdev); 2295 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2296 ret); 2297 return ret; 2298 } 2299 2300 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2301 } 2302 2303 return ret; 2304 } 2305 2306 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2307 { 2308 struct pci_dev *pdev = hdev->pdev; 2309 int ret; 2310 2311 ret = hclgevf_pci_reset(hdev); 2312 if (ret) { 2313 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2314 return ret; 2315 } 2316 2317 ret = hclgevf_cmd_init(hdev); 2318 if (ret) { 2319 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2320 return ret; 2321 } 2322 2323 ret = hclgevf_rss_init_hw(hdev); 2324 if (ret) { 2325 dev_err(&hdev->pdev->dev, 2326 "failed(%d) to initialize RSS\n", ret); 2327 return ret; 2328 } 2329 2330 ret = hclgevf_config_gro(hdev, true); 2331 if (ret) 2332 return ret; 2333 2334 ret = hclgevf_init_vlan_config(hdev); 2335 if (ret) { 2336 dev_err(&hdev->pdev->dev, 2337 "failed(%d) to initialize VLAN config\n", ret); 2338 return ret; 2339 } 2340 2341 dev_info(&hdev->pdev->dev, "Reset done\n"); 2342 2343 return 0; 2344 } 2345 2346 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2347 { 2348 struct pci_dev *pdev = hdev->pdev; 2349 int ret; 2350 2351 ret = hclgevf_pci_init(hdev); 2352 if (ret) { 2353 dev_err(&pdev->dev, "PCI initialization failed\n"); 2354 return ret; 2355 } 2356 2357 ret = hclgevf_cmd_queue_init(hdev); 2358 if (ret) { 2359 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); 2360 goto err_cmd_queue_init; 2361 } 2362 2363 ret = hclgevf_cmd_init(hdev); 2364 if (ret) 2365 goto err_cmd_init; 2366 2367 /* Get vf resource */ 2368 ret = hclgevf_query_vf_resource(hdev); 2369 if (ret) { 2370 dev_err(&hdev->pdev->dev, 2371 "Query vf status error, ret = %d.\n", ret); 2372 goto err_cmd_init; 2373 } 2374 2375 ret = hclgevf_init_msi(hdev); 2376 if (ret) { 2377 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2378 goto err_cmd_init; 2379 } 2380 2381 hclgevf_state_init(hdev); 2382 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2383 2384 ret = hclgevf_misc_irq_init(hdev); 2385 if (ret) { 2386 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2387 ret); 2388 goto err_misc_irq_init; 2389 } 2390 2391 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2392 2393 ret = hclgevf_configure(hdev); 2394 if (ret) { 2395 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2396 goto err_config; 2397 } 2398 2399 ret = hclgevf_alloc_tqps(hdev); 2400 if (ret) { 2401 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2402 goto err_config; 2403 } 2404 2405 ret = hclgevf_set_handle_info(hdev); 2406 if (ret) { 2407 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2408 goto err_config; 2409 } 2410 2411 ret = hclgevf_config_gro(hdev, true); 2412 if (ret) 2413 goto err_config; 2414 2415 /* vf is not allowed to enable unicast/multicast promisc mode. 2416 * For revision 0x20, default to disable broadcast promisc mode, 2417 * firmware makes sure broadcast packets can be accepted. 2418 * For revision 0x21, default to enable broadcast promisc mode. 2419 */ 2420 ret = hclgevf_set_promisc_mode(hdev, true); 2421 if (ret) 2422 goto err_config; 2423 2424 /* Initialize RSS for this VF */ 2425 ret = hclgevf_rss_init_hw(hdev); 2426 if (ret) { 2427 dev_err(&hdev->pdev->dev, 2428 "failed(%d) to initialize RSS\n", ret); 2429 goto err_config; 2430 } 2431 2432 ret = hclgevf_init_vlan_config(hdev); 2433 if (ret) { 2434 dev_err(&hdev->pdev->dev, 2435 "failed(%d) to initialize VLAN config\n", ret); 2436 goto err_config; 2437 } 2438 2439 hdev->last_reset_time = jiffies; 2440 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 2441 2442 return 0; 2443 2444 err_config: 2445 hclgevf_misc_irq_uninit(hdev); 2446 err_misc_irq_init: 2447 hclgevf_state_uninit(hdev); 2448 hclgevf_uninit_msi(hdev); 2449 err_cmd_init: 2450 hclgevf_cmd_uninit(hdev); 2451 err_cmd_queue_init: 2452 hclgevf_pci_uninit(hdev); 2453 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2454 return ret; 2455 } 2456 2457 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2458 { 2459 hclgevf_state_uninit(hdev); 2460 2461 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2462 hclgevf_misc_irq_uninit(hdev); 2463 hclgevf_uninit_msi(hdev); 2464 } 2465 2466 hclgevf_pci_uninit(hdev); 2467 hclgevf_cmd_uninit(hdev); 2468 } 2469 2470 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2471 { 2472 struct pci_dev *pdev = ae_dev->pdev; 2473 struct hclgevf_dev *hdev; 2474 int ret; 2475 2476 ret = hclgevf_alloc_hdev(ae_dev); 2477 if (ret) { 2478 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2479 return ret; 2480 } 2481 2482 ret = hclgevf_init_hdev(ae_dev->priv); 2483 if (ret) { 2484 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2485 return ret; 2486 } 2487 2488 hdev = ae_dev->priv; 2489 timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0); 2490 INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task); 2491 2492 return 0; 2493 } 2494 2495 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2496 { 2497 struct hclgevf_dev *hdev = ae_dev->priv; 2498 2499 hclgevf_uninit_hdev(hdev); 2500 ae_dev->priv = NULL; 2501 } 2502 2503 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2504 { 2505 struct hnae3_handle *nic = &hdev->nic; 2506 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2507 2508 return min_t(u32, hdev->rss_size_max, 2509 hdev->num_tqps / kinfo->num_tc); 2510 } 2511 2512 /** 2513 * hclgevf_get_channels - Get the current channels enabled and max supported. 2514 * @handle: hardware information for network interface 2515 * @ch: ethtool channels structure 2516 * 2517 * We don't support separate tx and rx queues as channels. The other count 2518 * represents how many queues are being used for control. max_combined counts 2519 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2520 * q_vectors since we support a lot more queue pairs than q_vectors. 2521 **/ 2522 static void hclgevf_get_channels(struct hnae3_handle *handle, 2523 struct ethtool_channels *ch) 2524 { 2525 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2526 2527 ch->max_combined = hclgevf_get_max_channels(hdev); 2528 ch->other_count = 0; 2529 ch->max_other = 0; 2530 ch->combined_count = handle->kinfo.rss_size; 2531 } 2532 2533 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2534 u16 *alloc_tqps, u16 *max_rss_size) 2535 { 2536 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2537 2538 *alloc_tqps = hdev->num_tqps; 2539 *max_rss_size = hdev->rss_size_max; 2540 } 2541 2542 static int hclgevf_get_status(struct hnae3_handle *handle) 2543 { 2544 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2545 2546 return hdev->hw.mac.link; 2547 } 2548 2549 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2550 u8 *auto_neg, u32 *speed, 2551 u8 *duplex) 2552 { 2553 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2554 2555 if (speed) 2556 *speed = hdev->hw.mac.speed; 2557 if (duplex) 2558 *duplex = hdev->hw.mac.duplex; 2559 if (auto_neg) 2560 *auto_neg = AUTONEG_DISABLE; 2561 } 2562 2563 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 2564 u8 duplex) 2565 { 2566 hdev->hw.mac.speed = speed; 2567 hdev->hw.mac.duplex = duplex; 2568 } 2569 2570 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 2571 { 2572 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2573 2574 return hclgevf_config_gro(hdev, enable); 2575 } 2576 2577 static void hclgevf_get_media_type(struct hnae3_handle *handle, 2578 u8 *media_type) 2579 { 2580 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2581 if (media_type) 2582 *media_type = hdev->hw.mac.media_type; 2583 } 2584 2585 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 2586 { 2587 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2588 2589 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2590 } 2591 2592 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 2593 { 2594 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2595 2596 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2597 } 2598 2599 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 2600 { 2601 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2602 2603 return hdev->reset_count; 2604 } 2605 2606 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 2607 unsigned long *supported, 2608 unsigned long *advertising) 2609 { 2610 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2611 2612 *supported = hdev->hw.mac.supported; 2613 *advertising = hdev->hw.mac.advertising; 2614 } 2615 2616 #define MAX_SEPARATE_NUM 4 2617 #define SEPARATOR_VALUE 0xFFFFFFFF 2618 #define REG_NUM_PER_LINE 4 2619 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 2620 2621 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 2622 { 2623 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 2624 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2625 2626 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 2627 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 2628 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 2629 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 2630 2631 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 2632 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 2633 } 2634 2635 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 2636 void *data) 2637 { 2638 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2639 int i, j, reg_um, separator_num; 2640 u32 *reg = data; 2641 2642 *version = hdev->fw_version; 2643 2644 /* fetching per-VF registers values from VF PCIe register space */ 2645 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 2646 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2647 for (i = 0; i < reg_um; i++) 2648 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 2649 for (i = 0; i < separator_num; i++) 2650 *reg++ = SEPARATOR_VALUE; 2651 2652 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 2653 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2654 for (i = 0; i < reg_um; i++) 2655 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 2656 for (i = 0; i < separator_num; i++) 2657 *reg++ = SEPARATOR_VALUE; 2658 2659 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 2660 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2661 for (j = 0; j < hdev->num_tqps; j++) { 2662 for (i = 0; i < reg_um; i++) 2663 *reg++ = hclgevf_read_dev(&hdev->hw, 2664 ring_reg_addr_list[i] + 2665 0x200 * j); 2666 for (i = 0; i < separator_num; i++) 2667 *reg++ = SEPARATOR_VALUE; 2668 } 2669 2670 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 2671 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2672 for (j = 0; j < hdev->num_msi_used - 1; j++) { 2673 for (i = 0; i < reg_um; i++) 2674 *reg++ = hclgevf_read_dev(&hdev->hw, 2675 tqp_intr_reg_addr_list[i] + 2676 4 * j); 2677 for (i = 0; i < separator_num; i++) 2678 *reg++ = SEPARATOR_VALUE; 2679 } 2680 } 2681 2682 static const struct hnae3_ae_ops hclgevf_ops = { 2683 .init_ae_dev = hclgevf_init_ae_dev, 2684 .uninit_ae_dev = hclgevf_uninit_ae_dev, 2685 .flr_prepare = hclgevf_flr_prepare, 2686 .flr_done = hclgevf_flr_done, 2687 .init_client_instance = hclgevf_init_client_instance, 2688 .uninit_client_instance = hclgevf_uninit_client_instance, 2689 .start = hclgevf_ae_start, 2690 .stop = hclgevf_ae_stop, 2691 .client_start = hclgevf_client_start, 2692 .client_stop = hclgevf_client_stop, 2693 .map_ring_to_vector = hclgevf_map_ring_to_vector, 2694 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2695 .get_vector = hclgevf_get_vector, 2696 .put_vector = hclgevf_put_vector, 2697 .reset_queue = hclgevf_reset_tqp, 2698 .get_mac_addr = hclgevf_get_mac_addr, 2699 .set_mac_addr = hclgevf_set_mac_addr, 2700 .add_uc_addr = hclgevf_add_uc_addr, 2701 .rm_uc_addr = hclgevf_rm_uc_addr, 2702 .add_mc_addr = hclgevf_add_mc_addr, 2703 .rm_mc_addr = hclgevf_rm_mc_addr, 2704 .get_stats = hclgevf_get_stats, 2705 .update_stats = hclgevf_update_stats, 2706 .get_strings = hclgevf_get_strings, 2707 .get_sset_count = hclgevf_get_sset_count, 2708 .get_rss_key_size = hclgevf_get_rss_key_size, 2709 .get_rss_indir_size = hclgevf_get_rss_indir_size, 2710 .get_rss = hclgevf_get_rss, 2711 .set_rss = hclgevf_set_rss, 2712 .get_rss_tuple = hclgevf_get_rss_tuple, 2713 .set_rss_tuple = hclgevf_set_rss_tuple, 2714 .get_tc_size = hclgevf_get_tc_size, 2715 .get_fw_version = hclgevf_get_fw_version, 2716 .set_vlan_filter = hclgevf_set_vlan_filter, 2717 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 2718 .reset_event = hclgevf_reset_event, 2719 .set_default_reset_request = hclgevf_set_def_reset_request, 2720 .get_channels = hclgevf_get_channels, 2721 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2722 .get_regs_len = hclgevf_get_regs_len, 2723 .get_regs = hclgevf_get_regs, 2724 .get_status = hclgevf_get_status, 2725 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2726 .get_media_type = hclgevf_get_media_type, 2727 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 2728 .ae_dev_resetting = hclgevf_ae_dev_resetting, 2729 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 2730 .set_gro_en = hclgevf_gro_en, 2731 .set_mtu = hclgevf_set_mtu, 2732 .get_global_queue_id = hclgevf_get_qid_global, 2733 .set_timer_task = hclgevf_set_timer_task, 2734 .get_link_mode = hclgevf_get_link_mode, 2735 }; 2736 2737 static struct hnae3_ae_algo ae_algovf = { 2738 .ops = &hclgevf_ops, 2739 .pdev_id_table = ae_algovf_pci_tbl, 2740 }; 2741 2742 static int hclgevf_init(void) 2743 { 2744 pr_info("%s is initializing\n", HCLGEVF_NAME); 2745 2746 hnae3_register_ae_algo(&ae_algovf); 2747 2748 return 0; 2749 } 2750 2751 static void hclgevf_exit(void) 2752 { 2753 hnae3_unregister_ae_algo(&ae_algovf); 2754 } 2755 module_init(hclgevf_init); 2756 module_exit(hclgevf_exit); 2757 2758 MODULE_LICENSE("GPL"); 2759 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2760 MODULE_DESCRIPTION("HCLGEVF Driver"); 2761 MODULE_VERSION(HCLGEVF_MOD_VERSION); 2762