1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 #define HCLGEVF_RESET_MAX_FAIL_CNT 5 15 16 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 17 static struct hnae3_ae_algo ae_algovf; 18 19 static struct workqueue_struct *hclgevf_wq; 20 21 static const struct pci_device_id ae_algovf_pci_tbl[] = { 22 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 23 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 24 /* required last entry */ 25 {0, } 26 }; 27 28 static const u8 hclgevf_hash_key[] = { 29 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 30 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 31 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 32 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 33 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 34 }; 35 36 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 37 38 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 39 HCLGEVF_CMDQ_TX_ADDR_H_REG, 40 HCLGEVF_CMDQ_TX_DEPTH_REG, 41 HCLGEVF_CMDQ_TX_TAIL_REG, 42 HCLGEVF_CMDQ_TX_HEAD_REG, 43 HCLGEVF_CMDQ_RX_ADDR_L_REG, 44 HCLGEVF_CMDQ_RX_ADDR_H_REG, 45 HCLGEVF_CMDQ_RX_DEPTH_REG, 46 HCLGEVF_CMDQ_RX_TAIL_REG, 47 HCLGEVF_CMDQ_RX_HEAD_REG, 48 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 49 HCLGEVF_CMDQ_INTR_STS_REG, 50 HCLGEVF_CMDQ_INTR_EN_REG, 51 HCLGEVF_CMDQ_INTR_GEN_REG}; 52 53 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 54 HCLGEVF_RST_ING, 55 HCLGEVF_GRO_EN_REG}; 56 57 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 58 HCLGEVF_RING_RX_ADDR_H_REG, 59 HCLGEVF_RING_RX_BD_NUM_REG, 60 HCLGEVF_RING_RX_BD_LENGTH_REG, 61 HCLGEVF_RING_RX_MERGE_EN_REG, 62 HCLGEVF_RING_RX_TAIL_REG, 63 HCLGEVF_RING_RX_HEAD_REG, 64 HCLGEVF_RING_RX_FBD_NUM_REG, 65 HCLGEVF_RING_RX_OFFSET_REG, 66 HCLGEVF_RING_RX_FBD_OFFSET_REG, 67 HCLGEVF_RING_RX_STASH_REG, 68 HCLGEVF_RING_RX_BD_ERR_REG, 69 HCLGEVF_RING_TX_ADDR_L_REG, 70 HCLGEVF_RING_TX_ADDR_H_REG, 71 HCLGEVF_RING_TX_BD_NUM_REG, 72 HCLGEVF_RING_TX_PRIORITY_REG, 73 HCLGEVF_RING_TX_TC_REG, 74 HCLGEVF_RING_TX_MERGE_EN_REG, 75 HCLGEVF_RING_TX_TAIL_REG, 76 HCLGEVF_RING_TX_HEAD_REG, 77 HCLGEVF_RING_TX_FBD_NUM_REG, 78 HCLGEVF_RING_TX_OFFSET_REG, 79 HCLGEVF_RING_TX_EBD_NUM_REG, 80 HCLGEVF_RING_TX_EBD_OFFSET_REG, 81 HCLGEVF_RING_TX_BD_ERR_REG, 82 HCLGEVF_RING_EN_REG}; 83 84 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 85 HCLGEVF_TQP_INTR_GL0_REG, 86 HCLGEVF_TQP_INTR_GL1_REG, 87 HCLGEVF_TQP_INTR_GL2_REG, 88 HCLGEVF_TQP_INTR_RL_REG}; 89 90 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 91 { 92 if (!handle->client) 93 return container_of(handle, struct hclgevf_dev, nic); 94 else if (handle->client->type == HNAE3_CLIENT_ROCE) 95 return container_of(handle, struct hclgevf_dev, roce); 96 else 97 return container_of(handle, struct hclgevf_dev, nic); 98 } 99 100 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 101 { 102 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 103 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 104 struct hclgevf_desc desc; 105 struct hclgevf_tqp *tqp; 106 int status; 107 int i; 108 109 for (i = 0; i < kinfo->num_tqps; i++) { 110 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 111 hclgevf_cmd_setup_basic_desc(&desc, 112 HCLGEVF_OPC_QUERY_RX_STATUS, 113 true); 114 115 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 116 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 117 if (status) { 118 dev_err(&hdev->pdev->dev, 119 "Query tqp stat fail, status = %d,queue = %d\n", 120 status, i); 121 return status; 122 } 123 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 124 le32_to_cpu(desc.data[1]); 125 126 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 127 true); 128 129 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 130 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 131 if (status) { 132 dev_err(&hdev->pdev->dev, 133 "Query tqp stat fail, status = %d,queue = %d\n", 134 status, i); 135 return status; 136 } 137 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 138 le32_to_cpu(desc.data[1]); 139 } 140 141 return 0; 142 } 143 144 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 145 { 146 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 147 struct hclgevf_tqp *tqp; 148 u64 *buff = data; 149 int i; 150 151 for (i = 0; i < kinfo->num_tqps; i++) { 152 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 153 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 154 } 155 for (i = 0; i < kinfo->num_tqps; i++) { 156 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 157 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 158 } 159 160 return buff; 161 } 162 163 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 164 { 165 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 166 167 return kinfo->num_tqps * 2; 168 } 169 170 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 171 { 172 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 173 u8 *buff = data; 174 int i = 0; 175 176 for (i = 0; i < kinfo->num_tqps; i++) { 177 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 178 struct hclgevf_tqp, q); 179 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 180 tqp->index); 181 buff += ETH_GSTRING_LEN; 182 } 183 184 for (i = 0; i < kinfo->num_tqps; i++) { 185 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 186 struct hclgevf_tqp, q); 187 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 188 tqp->index); 189 buff += ETH_GSTRING_LEN; 190 } 191 192 return buff; 193 } 194 195 static void hclgevf_update_stats(struct hnae3_handle *handle, 196 struct net_device_stats *net_stats) 197 { 198 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 199 int status; 200 201 status = hclgevf_tqps_update_stats(handle); 202 if (status) 203 dev_err(&hdev->pdev->dev, 204 "VF update of TQPS stats fail, status = %d.\n", 205 status); 206 } 207 208 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 209 { 210 if (strset == ETH_SS_TEST) 211 return -EOPNOTSUPP; 212 else if (strset == ETH_SS_STATS) 213 return hclgevf_tqps_get_sset_count(handle, strset); 214 215 return 0; 216 } 217 218 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 219 u8 *data) 220 { 221 u8 *p = (char *)data; 222 223 if (strset == ETH_SS_STATS) 224 p = hclgevf_tqps_get_strings(handle, p); 225 } 226 227 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 228 { 229 hclgevf_tqps_get_stats(handle, data); 230 } 231 232 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 233 u8 subcode) 234 { 235 if (msg) { 236 memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 237 msg->code = code; 238 msg->subcode = subcode; 239 } 240 } 241 242 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 243 { 244 struct hclge_vf_to_pf_msg send_msg; 245 u8 resp_msg; 246 int status; 247 248 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0); 249 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 250 sizeof(resp_msg)); 251 if (status) { 252 dev_err(&hdev->pdev->dev, 253 "VF request to get TC info from PF failed %d", 254 status); 255 return status; 256 } 257 258 hdev->hw_tc_map = resp_msg; 259 260 return 0; 261 } 262 263 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 264 { 265 struct hnae3_handle *nic = &hdev->nic; 266 struct hclge_vf_to_pf_msg send_msg; 267 u8 resp_msg; 268 int ret; 269 270 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 271 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 272 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 273 sizeof(u8)); 274 if (ret) { 275 dev_err(&hdev->pdev->dev, 276 "VF request to get port based vlan state failed %d", 277 ret); 278 return ret; 279 } 280 281 nic->port_base_vlan_state = resp_msg; 282 283 return 0; 284 } 285 286 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 287 { 288 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 289 #define HCLGEVF_TQPS_ALLOC_OFFSET 0 290 #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 291 #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 292 293 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 294 struct hclge_vf_to_pf_msg send_msg; 295 int status; 296 297 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 298 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 299 HCLGEVF_TQPS_RSS_INFO_LEN); 300 if (status) { 301 dev_err(&hdev->pdev->dev, 302 "VF request to get tqp info from PF failed %d", 303 status); 304 return status; 305 } 306 307 memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], 308 sizeof(u16)); 309 memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], 310 sizeof(u16)); 311 memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], 312 sizeof(u16)); 313 314 return 0; 315 } 316 317 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 318 { 319 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 320 #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 321 #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 322 323 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 324 struct hclge_vf_to_pf_msg send_msg; 325 int ret; 326 327 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 328 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 329 HCLGEVF_TQPS_DEPTH_INFO_LEN); 330 if (ret) { 331 dev_err(&hdev->pdev->dev, 332 "VF request to get tqp depth info from PF failed %d", 333 ret); 334 return ret; 335 } 336 337 memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], 338 sizeof(u16)); 339 memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], 340 sizeof(u16)); 341 342 return 0; 343 } 344 345 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 346 { 347 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 348 struct hclge_vf_to_pf_msg send_msg; 349 u16 qid_in_pf = 0; 350 u8 resp_data[2]; 351 int ret; 352 353 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 354 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 355 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 356 sizeof(resp_data)); 357 if (!ret) 358 qid_in_pf = *(u16 *)resp_data; 359 360 return qid_in_pf; 361 } 362 363 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 364 { 365 struct hclge_vf_to_pf_msg send_msg; 366 u8 resp_msg[2]; 367 int ret; 368 369 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 370 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 371 sizeof(resp_msg)); 372 if (ret) { 373 dev_err(&hdev->pdev->dev, 374 "VF request to get the pf port media type failed %d", 375 ret); 376 return ret; 377 } 378 379 hdev->hw.mac.media_type = resp_msg[0]; 380 hdev->hw.mac.module_type = resp_msg[1]; 381 382 return 0; 383 } 384 385 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 386 { 387 struct hclgevf_tqp *tqp; 388 int i; 389 390 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 391 sizeof(struct hclgevf_tqp), GFP_KERNEL); 392 if (!hdev->htqp) 393 return -ENOMEM; 394 395 tqp = hdev->htqp; 396 397 for (i = 0; i < hdev->num_tqps; i++) { 398 tqp->dev = &hdev->pdev->dev; 399 tqp->index = i; 400 401 tqp->q.ae_algo = &ae_algovf; 402 tqp->q.buf_size = hdev->rx_buf_len; 403 tqp->q.tx_desc_num = hdev->num_tx_desc; 404 tqp->q.rx_desc_num = hdev->num_rx_desc; 405 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 406 i * HCLGEVF_TQP_REG_SIZE; 407 408 tqp++; 409 } 410 411 return 0; 412 } 413 414 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 415 { 416 struct hnae3_handle *nic = &hdev->nic; 417 struct hnae3_knic_private_info *kinfo; 418 u16 new_tqps = hdev->num_tqps; 419 unsigned int i; 420 421 kinfo = &nic->kinfo; 422 kinfo->num_tc = 0; 423 kinfo->num_tx_desc = hdev->num_tx_desc; 424 kinfo->num_rx_desc = hdev->num_rx_desc; 425 kinfo->rx_buf_len = hdev->rx_buf_len; 426 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 427 if (hdev->hw_tc_map & BIT(i)) 428 kinfo->num_tc++; 429 430 kinfo->rss_size 431 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 432 new_tqps = kinfo->rss_size * kinfo->num_tc; 433 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 434 435 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 436 sizeof(struct hnae3_queue *), GFP_KERNEL); 437 if (!kinfo->tqp) 438 return -ENOMEM; 439 440 for (i = 0; i < kinfo->num_tqps; i++) { 441 hdev->htqp[i].q.handle = &hdev->nic; 442 hdev->htqp[i].q.tqp_index = i; 443 kinfo->tqp[i] = &hdev->htqp[i].q; 444 } 445 446 /* after init the max rss_size and tqps, adjust the default tqp numbers 447 * and rss size with the actual vector numbers 448 */ 449 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 450 kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc, 451 kinfo->rss_size); 452 453 return 0; 454 } 455 456 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 457 { 458 struct hclge_vf_to_pf_msg send_msg; 459 int status; 460 461 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 462 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 463 if (status) 464 dev_err(&hdev->pdev->dev, 465 "VF failed to fetch link status(%d) from PF", status); 466 } 467 468 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 469 { 470 struct hnae3_handle *rhandle = &hdev->roce; 471 struct hnae3_handle *handle = &hdev->nic; 472 struct hnae3_client *rclient; 473 struct hnae3_client *client; 474 475 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 476 return; 477 478 client = handle->client; 479 rclient = hdev->roce_client; 480 481 link_state = 482 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 483 484 if (link_state != hdev->hw.mac.link) { 485 client->ops->link_status_change(handle, !!link_state); 486 if (rclient && rclient->ops->link_status_change) 487 rclient->ops->link_status_change(rhandle, !!link_state); 488 hdev->hw.mac.link = link_state; 489 } 490 491 clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 492 } 493 494 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 495 { 496 #define HCLGEVF_ADVERTISING 0 497 #define HCLGEVF_SUPPORTED 1 498 499 struct hclge_vf_to_pf_msg send_msg; 500 501 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 502 send_msg.data[0] = HCLGEVF_ADVERTISING; 503 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 504 send_msg.data[0] = HCLGEVF_SUPPORTED; 505 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 506 } 507 508 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 509 { 510 struct hnae3_handle *nic = &hdev->nic; 511 int ret; 512 513 nic->ae_algo = &ae_algovf; 514 nic->pdev = hdev->pdev; 515 nic->numa_node_mask = hdev->numa_node_mask; 516 nic->flags |= HNAE3_SUPPORT_VF; 517 518 ret = hclgevf_knic_setup(hdev); 519 if (ret) 520 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 521 ret); 522 return ret; 523 } 524 525 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 526 { 527 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 528 dev_warn(&hdev->pdev->dev, 529 "vector(vector_id %d) has been freed.\n", vector_id); 530 return; 531 } 532 533 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 534 hdev->num_msi_left += 1; 535 hdev->num_msi_used -= 1; 536 } 537 538 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 539 struct hnae3_vector_info *vector_info) 540 { 541 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 542 struct hnae3_vector_info *vector = vector_info; 543 int alloc = 0; 544 int i, j; 545 546 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 547 vector_num = min(hdev->num_msi_left, vector_num); 548 549 for (j = 0; j < vector_num; j++) { 550 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 551 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 552 vector->vector = pci_irq_vector(hdev->pdev, i); 553 vector->io_addr = hdev->hw.io_base + 554 HCLGEVF_VECTOR_REG_BASE + 555 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 556 hdev->vector_status[i] = 0; 557 hdev->vector_irq[i] = vector->vector; 558 559 vector++; 560 alloc++; 561 562 break; 563 } 564 } 565 } 566 hdev->num_msi_left -= alloc; 567 hdev->num_msi_used += alloc; 568 569 return alloc; 570 } 571 572 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 573 { 574 int i; 575 576 for (i = 0; i < hdev->num_msi; i++) 577 if (vector == hdev->vector_irq[i]) 578 return i; 579 580 return -EINVAL; 581 } 582 583 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 584 const u8 hfunc, const u8 *key) 585 { 586 struct hclgevf_rss_config_cmd *req; 587 unsigned int key_offset = 0; 588 struct hclgevf_desc desc; 589 int key_counts; 590 int key_size; 591 int ret; 592 593 key_counts = HCLGEVF_RSS_KEY_SIZE; 594 req = (struct hclgevf_rss_config_cmd *)desc.data; 595 596 while (key_counts) { 597 hclgevf_cmd_setup_basic_desc(&desc, 598 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 599 false); 600 601 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 602 req->hash_config |= 603 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 604 605 key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 606 memcpy(req->hash_key, 607 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 608 609 key_counts -= key_size; 610 key_offset++; 611 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 612 if (ret) { 613 dev_err(&hdev->pdev->dev, 614 "Configure RSS config fail, status = %d\n", 615 ret); 616 return ret; 617 } 618 } 619 620 return 0; 621 } 622 623 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 624 { 625 return HCLGEVF_RSS_KEY_SIZE; 626 } 627 628 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 629 { 630 return HCLGEVF_RSS_IND_TBL_SIZE; 631 } 632 633 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 634 { 635 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 636 struct hclgevf_rss_indirection_table_cmd *req; 637 struct hclgevf_desc desc; 638 int status; 639 int i, j; 640 641 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 642 643 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 644 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 645 false); 646 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 647 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 648 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 649 req->rss_result[j] = 650 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 651 652 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 653 if (status) { 654 dev_err(&hdev->pdev->dev, 655 "VF failed(=%d) to set RSS indirection table\n", 656 status); 657 return status; 658 } 659 } 660 661 return 0; 662 } 663 664 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 665 { 666 struct hclgevf_rss_tc_mode_cmd *req; 667 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 668 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 669 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 670 struct hclgevf_desc desc; 671 u16 roundup_size; 672 int status; 673 unsigned int i; 674 675 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 676 677 roundup_size = roundup_pow_of_two(rss_size); 678 roundup_size = ilog2(roundup_size); 679 680 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 681 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 682 tc_size[i] = roundup_size; 683 tc_offset[i] = rss_size * i; 684 } 685 686 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 687 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 688 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 689 (tc_valid[i] & 0x1)); 690 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 691 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 692 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 693 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 694 } 695 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 696 if (status) 697 dev_err(&hdev->pdev->dev, 698 "VF failed(=%d) to set rss tc mode\n", status); 699 700 return status; 701 } 702 703 /* for revision 0x20, vf shared the same rss config with pf */ 704 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 705 { 706 #define HCLGEVF_RSS_MBX_RESP_LEN 8 707 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 708 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 709 struct hclge_vf_to_pf_msg send_msg; 710 u16 msg_num, hash_key_index; 711 u8 index; 712 int ret; 713 714 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 715 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 716 HCLGEVF_RSS_MBX_RESP_LEN; 717 for (index = 0; index < msg_num; index++) { 718 send_msg.data[0] = index; 719 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 720 HCLGEVF_RSS_MBX_RESP_LEN); 721 if (ret) { 722 dev_err(&hdev->pdev->dev, 723 "VF get rss hash key from PF failed, ret=%d", 724 ret); 725 return ret; 726 } 727 728 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 729 if (index == msg_num - 1) 730 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 731 &resp_msg[0], 732 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 733 else 734 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 735 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 736 } 737 738 return 0; 739 } 740 741 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 742 u8 *hfunc) 743 { 744 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 745 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 746 int i, ret; 747 748 if (handle->pdev->revision >= 0x21) { 749 /* Get hash algorithm */ 750 if (hfunc) { 751 switch (rss_cfg->hash_algo) { 752 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 753 *hfunc = ETH_RSS_HASH_TOP; 754 break; 755 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 756 *hfunc = ETH_RSS_HASH_XOR; 757 break; 758 default: 759 *hfunc = ETH_RSS_HASH_UNKNOWN; 760 break; 761 } 762 } 763 764 /* Get the RSS Key required by the user */ 765 if (key) 766 memcpy(key, rss_cfg->rss_hash_key, 767 HCLGEVF_RSS_KEY_SIZE); 768 } else { 769 if (hfunc) 770 *hfunc = ETH_RSS_HASH_TOP; 771 if (key) { 772 ret = hclgevf_get_rss_hash_key(hdev); 773 if (ret) 774 return ret; 775 memcpy(key, rss_cfg->rss_hash_key, 776 HCLGEVF_RSS_KEY_SIZE); 777 } 778 } 779 780 if (indir) 781 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 782 indir[i] = rss_cfg->rss_indirection_tbl[i]; 783 784 return 0; 785 } 786 787 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 788 const u8 *key, const u8 hfunc) 789 { 790 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 791 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 792 int ret, i; 793 794 if (handle->pdev->revision >= 0x21) { 795 /* Set the RSS Hash Key if specififed by the user */ 796 if (key) { 797 switch (hfunc) { 798 case ETH_RSS_HASH_TOP: 799 rss_cfg->hash_algo = 800 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 801 break; 802 case ETH_RSS_HASH_XOR: 803 rss_cfg->hash_algo = 804 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 805 break; 806 case ETH_RSS_HASH_NO_CHANGE: 807 break; 808 default: 809 return -EINVAL; 810 } 811 812 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 813 key); 814 if (ret) 815 return ret; 816 817 /* Update the shadow RSS key with user specified qids */ 818 memcpy(rss_cfg->rss_hash_key, key, 819 HCLGEVF_RSS_KEY_SIZE); 820 } 821 } 822 823 /* update the shadow RSS table with user specified qids */ 824 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 825 rss_cfg->rss_indirection_tbl[i] = indir[i]; 826 827 /* update the hardware */ 828 return hclgevf_set_rss_indir_table(hdev); 829 } 830 831 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 832 { 833 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 834 835 if (nfc->data & RXH_L4_B_2_3) 836 hash_sets |= HCLGEVF_D_PORT_BIT; 837 else 838 hash_sets &= ~HCLGEVF_D_PORT_BIT; 839 840 if (nfc->data & RXH_IP_SRC) 841 hash_sets |= HCLGEVF_S_IP_BIT; 842 else 843 hash_sets &= ~HCLGEVF_S_IP_BIT; 844 845 if (nfc->data & RXH_IP_DST) 846 hash_sets |= HCLGEVF_D_IP_BIT; 847 else 848 hash_sets &= ~HCLGEVF_D_IP_BIT; 849 850 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 851 hash_sets |= HCLGEVF_V_TAG_BIT; 852 853 return hash_sets; 854 } 855 856 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 857 struct ethtool_rxnfc *nfc) 858 { 859 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 860 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 861 struct hclgevf_rss_input_tuple_cmd *req; 862 struct hclgevf_desc desc; 863 u8 tuple_sets; 864 int ret; 865 866 if (handle->pdev->revision == 0x20) 867 return -EOPNOTSUPP; 868 869 if (nfc->data & 870 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 871 return -EINVAL; 872 873 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 874 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 875 876 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 877 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 878 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 879 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 880 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 881 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 882 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 883 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 884 885 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 886 switch (nfc->flow_type) { 887 case TCP_V4_FLOW: 888 req->ipv4_tcp_en = tuple_sets; 889 break; 890 case TCP_V6_FLOW: 891 req->ipv6_tcp_en = tuple_sets; 892 break; 893 case UDP_V4_FLOW: 894 req->ipv4_udp_en = tuple_sets; 895 break; 896 case UDP_V6_FLOW: 897 req->ipv6_udp_en = tuple_sets; 898 break; 899 case SCTP_V4_FLOW: 900 req->ipv4_sctp_en = tuple_sets; 901 break; 902 case SCTP_V6_FLOW: 903 if ((nfc->data & RXH_L4_B_0_1) || 904 (nfc->data & RXH_L4_B_2_3)) 905 return -EINVAL; 906 907 req->ipv6_sctp_en = tuple_sets; 908 break; 909 case IPV4_FLOW: 910 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 911 break; 912 case IPV6_FLOW: 913 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 914 break; 915 default: 916 return -EINVAL; 917 } 918 919 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 920 if (ret) { 921 dev_err(&hdev->pdev->dev, 922 "Set rss tuple fail, status = %d\n", ret); 923 return ret; 924 } 925 926 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 927 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 928 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 929 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 930 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 931 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 932 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 933 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 934 return 0; 935 } 936 937 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 938 struct ethtool_rxnfc *nfc) 939 { 940 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 941 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 942 u8 tuple_sets; 943 944 if (handle->pdev->revision == 0x20) 945 return -EOPNOTSUPP; 946 947 nfc->data = 0; 948 949 switch (nfc->flow_type) { 950 case TCP_V4_FLOW: 951 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 952 break; 953 case UDP_V4_FLOW: 954 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 955 break; 956 case TCP_V6_FLOW: 957 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 958 break; 959 case UDP_V6_FLOW: 960 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 961 break; 962 case SCTP_V4_FLOW: 963 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 964 break; 965 case SCTP_V6_FLOW: 966 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 967 break; 968 case IPV4_FLOW: 969 case IPV6_FLOW: 970 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 971 break; 972 default: 973 return -EINVAL; 974 } 975 976 if (!tuple_sets) 977 return 0; 978 979 if (tuple_sets & HCLGEVF_D_PORT_BIT) 980 nfc->data |= RXH_L4_B_2_3; 981 if (tuple_sets & HCLGEVF_S_PORT_BIT) 982 nfc->data |= RXH_L4_B_0_1; 983 if (tuple_sets & HCLGEVF_D_IP_BIT) 984 nfc->data |= RXH_IP_DST; 985 if (tuple_sets & HCLGEVF_S_IP_BIT) 986 nfc->data |= RXH_IP_SRC; 987 988 return 0; 989 } 990 991 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 992 struct hclgevf_rss_cfg *rss_cfg) 993 { 994 struct hclgevf_rss_input_tuple_cmd *req; 995 struct hclgevf_desc desc; 996 int ret; 997 998 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 999 1000 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 1001 1002 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 1003 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 1004 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 1005 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 1006 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 1007 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 1008 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 1009 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 1010 1011 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1012 if (ret) 1013 dev_err(&hdev->pdev->dev, 1014 "Configure rss input fail, status = %d\n", ret); 1015 return ret; 1016 } 1017 1018 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 1019 { 1020 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1021 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1022 1023 return rss_cfg->rss_size; 1024 } 1025 1026 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 1027 int vector_id, 1028 struct hnae3_ring_chain_node *ring_chain) 1029 { 1030 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1031 struct hclge_vf_to_pf_msg send_msg; 1032 struct hnae3_ring_chain_node *node; 1033 int status; 1034 int i = 0; 1035 1036 memset(&send_msg, 0, sizeof(send_msg)); 1037 send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 1038 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1039 send_msg.vector_id = vector_id; 1040 1041 for (node = ring_chain; node; node = node->next) { 1042 send_msg.param[i].ring_type = 1043 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1044 1045 send_msg.param[i].tqp_index = node->tqp_index; 1046 send_msg.param[i].int_gl_index = 1047 hnae3_get_field(node->int_gl_idx, 1048 HNAE3_RING_GL_IDX_M, 1049 HNAE3_RING_GL_IDX_S); 1050 1051 i++; 1052 if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 1053 send_msg.ring_num = i; 1054 1055 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 1056 NULL, 0); 1057 if (status) { 1058 dev_err(&hdev->pdev->dev, 1059 "Map TQP fail, status is %d.\n", 1060 status); 1061 return status; 1062 } 1063 i = 0; 1064 } 1065 } 1066 1067 return 0; 1068 } 1069 1070 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1071 struct hnae3_ring_chain_node *ring_chain) 1072 { 1073 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1074 int vector_id; 1075 1076 vector_id = hclgevf_get_vector_index(hdev, vector); 1077 if (vector_id < 0) { 1078 dev_err(&handle->pdev->dev, 1079 "Get vector index fail. ret =%d\n", vector_id); 1080 return vector_id; 1081 } 1082 1083 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1084 } 1085 1086 static int hclgevf_unmap_ring_from_vector( 1087 struct hnae3_handle *handle, 1088 int vector, 1089 struct hnae3_ring_chain_node *ring_chain) 1090 { 1091 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1092 int ret, vector_id; 1093 1094 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1095 return 0; 1096 1097 vector_id = hclgevf_get_vector_index(hdev, vector); 1098 if (vector_id < 0) { 1099 dev_err(&handle->pdev->dev, 1100 "Get vector index fail. ret =%d\n", vector_id); 1101 return vector_id; 1102 } 1103 1104 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1105 if (ret) 1106 dev_err(&handle->pdev->dev, 1107 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1108 vector_id, 1109 ret); 1110 1111 return ret; 1112 } 1113 1114 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1115 { 1116 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1117 int vector_id; 1118 1119 vector_id = hclgevf_get_vector_index(hdev, vector); 1120 if (vector_id < 0) { 1121 dev_err(&handle->pdev->dev, 1122 "hclgevf_put_vector get vector index fail. ret =%d\n", 1123 vector_id); 1124 return vector_id; 1125 } 1126 1127 hclgevf_free_vector(hdev, vector_id); 1128 1129 return 0; 1130 } 1131 1132 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1133 bool en_uc_pmc, bool en_mc_pmc, 1134 bool en_bc_pmc) 1135 { 1136 struct hclge_vf_to_pf_msg send_msg; 1137 int ret; 1138 1139 memset(&send_msg, 0, sizeof(send_msg)); 1140 send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 1141 send_msg.en_bc = en_bc_pmc ? 1 : 0; 1142 send_msg.en_uc = en_uc_pmc ? 1 : 0; 1143 send_msg.en_mc = en_mc_pmc ? 1 : 0; 1144 1145 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1146 1147 if (ret) 1148 dev_err(&hdev->pdev->dev, 1149 "Set promisc mode fail, status is %d.\n", ret); 1150 1151 return ret; 1152 } 1153 1154 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 1155 bool en_mc_pmc) 1156 { 1157 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1158 struct pci_dev *pdev = hdev->pdev; 1159 bool en_bc_pmc; 1160 1161 en_bc_pmc = pdev->revision != 0x20; 1162 1163 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 1164 en_bc_pmc); 1165 } 1166 1167 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, 1168 int stream_id, bool enable) 1169 { 1170 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1171 struct hclgevf_desc desc; 1172 int status; 1173 1174 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1175 1176 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1177 false); 1178 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1179 req->stream_id = cpu_to_le16(stream_id); 1180 if (enable) 1181 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1182 1183 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1184 if (status) 1185 dev_err(&hdev->pdev->dev, 1186 "TQP enable fail, status =%d.\n", status); 1187 1188 return status; 1189 } 1190 1191 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1192 { 1193 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1194 struct hclgevf_tqp *tqp; 1195 int i; 1196 1197 for (i = 0; i < kinfo->num_tqps; i++) { 1198 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1199 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1200 } 1201 } 1202 1203 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 1204 { 1205 struct hclge_vf_to_pf_msg send_msg; 1206 u8 host_mac[ETH_ALEN]; 1207 int status; 1208 1209 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 1210 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 1211 ETH_ALEN); 1212 if (status) { 1213 dev_err(&hdev->pdev->dev, 1214 "fail to get VF MAC from host %d", status); 1215 return status; 1216 } 1217 1218 ether_addr_copy(p, host_mac); 1219 1220 return 0; 1221 } 1222 1223 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1224 { 1225 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1226 u8 host_mac_addr[ETH_ALEN]; 1227 1228 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 1229 return; 1230 1231 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 1232 if (hdev->has_pf_mac) 1233 ether_addr_copy(p, host_mac_addr); 1234 else 1235 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1236 } 1237 1238 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1239 bool is_first) 1240 { 1241 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1242 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1243 struct hclge_vf_to_pf_msg send_msg; 1244 u8 *new_mac_addr = (u8 *)p; 1245 int status; 1246 1247 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 1248 send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1249 ether_addr_copy(send_msg.data, new_mac_addr); 1250 if (is_first && !hdev->has_pf_mac) 1251 eth_zero_addr(&send_msg.data[ETH_ALEN]); 1252 else 1253 ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 1254 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1255 if (!status) 1256 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1257 1258 return status; 1259 } 1260 1261 static struct hclgevf_mac_addr_node * 1262 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) 1263 { 1264 struct hclgevf_mac_addr_node *mac_node, *tmp; 1265 1266 list_for_each_entry_safe(mac_node, tmp, list, node) 1267 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 1268 return mac_node; 1269 1270 return NULL; 1271 } 1272 1273 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, 1274 enum HCLGEVF_MAC_NODE_STATE state) 1275 { 1276 switch (state) { 1277 /* from set_rx_mode or tmp_add_list */ 1278 case HCLGEVF_MAC_TO_ADD: 1279 if (mac_node->state == HCLGEVF_MAC_TO_DEL) 1280 mac_node->state = HCLGEVF_MAC_ACTIVE; 1281 break; 1282 /* only from set_rx_mode */ 1283 case HCLGEVF_MAC_TO_DEL: 1284 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1285 list_del(&mac_node->node); 1286 kfree(mac_node); 1287 } else { 1288 mac_node->state = HCLGEVF_MAC_TO_DEL; 1289 } 1290 break; 1291 /* only from tmp_add_list, the mac_node->state won't be 1292 * HCLGEVF_MAC_ACTIVE 1293 */ 1294 case HCLGEVF_MAC_ACTIVE: 1295 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1296 mac_node->state = HCLGEVF_MAC_ACTIVE; 1297 break; 1298 } 1299 } 1300 1301 static int hclgevf_update_mac_list(struct hnae3_handle *handle, 1302 enum HCLGEVF_MAC_NODE_STATE state, 1303 enum HCLGEVF_MAC_ADDR_TYPE mac_type, 1304 const unsigned char *addr) 1305 { 1306 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1307 struct hclgevf_mac_addr_node *mac_node; 1308 struct list_head *list; 1309 1310 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1311 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1312 1313 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1314 1315 /* if the mac addr is already in the mac list, no need to add a new 1316 * one into it, just check the mac addr state, convert it to a new 1317 * new state, or just remove it, or do nothing. 1318 */ 1319 mac_node = hclgevf_find_mac_node(list, addr); 1320 if (mac_node) { 1321 hclgevf_update_mac_node(mac_node, state); 1322 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1323 return 0; 1324 } 1325 /* if this address is never added, unnecessary to delete */ 1326 if (state == HCLGEVF_MAC_TO_DEL) { 1327 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1328 return -ENOENT; 1329 } 1330 1331 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 1332 if (!mac_node) { 1333 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1334 return -ENOMEM; 1335 } 1336 1337 mac_node->state = state; 1338 ether_addr_copy(mac_node->mac_addr, addr); 1339 list_add_tail(&mac_node->node, list); 1340 1341 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1342 return 0; 1343 } 1344 1345 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1346 const unsigned char *addr) 1347 { 1348 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1349 HCLGEVF_MAC_ADDR_UC, addr); 1350 } 1351 1352 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1353 const unsigned char *addr) 1354 { 1355 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1356 HCLGEVF_MAC_ADDR_UC, addr); 1357 } 1358 1359 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1360 const unsigned char *addr) 1361 { 1362 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1363 HCLGEVF_MAC_ADDR_MC, addr); 1364 } 1365 1366 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1367 const unsigned char *addr) 1368 { 1369 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1370 HCLGEVF_MAC_ADDR_MC, addr); 1371 } 1372 1373 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, 1374 struct hclgevf_mac_addr_node *mac_node, 1375 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1376 { 1377 struct hclge_vf_to_pf_msg send_msg; 1378 u8 code, subcode; 1379 1380 if (mac_type == HCLGEVF_MAC_ADDR_UC) { 1381 code = HCLGE_MBX_SET_UNICAST; 1382 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1383 subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; 1384 else 1385 subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; 1386 } else { 1387 code = HCLGE_MBX_SET_MULTICAST; 1388 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1389 subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; 1390 else 1391 subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; 1392 } 1393 1394 hclgevf_build_send_msg(&send_msg, code, subcode); 1395 ether_addr_copy(send_msg.data, mac_node->mac_addr); 1396 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1397 } 1398 1399 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, 1400 struct list_head *list, 1401 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1402 { 1403 struct hclgevf_mac_addr_node *mac_node, *tmp; 1404 int ret; 1405 1406 list_for_each_entry_safe(mac_node, tmp, list, node) { 1407 ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); 1408 if (ret) { 1409 dev_err(&hdev->pdev->dev, 1410 "failed to configure mac %pM, state = %d, ret = %d\n", 1411 mac_node->mac_addr, mac_node->state, ret); 1412 return; 1413 } 1414 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1415 mac_node->state = HCLGEVF_MAC_ACTIVE; 1416 } else { 1417 list_del(&mac_node->node); 1418 kfree(mac_node); 1419 } 1420 } 1421 } 1422 1423 static void hclgevf_sync_from_add_list(struct list_head *add_list, 1424 struct list_head *mac_list) 1425 { 1426 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1427 1428 list_for_each_entry_safe(mac_node, tmp, add_list, node) { 1429 /* if the mac address from tmp_add_list is not in the 1430 * uc/mc_mac_list, it means have received a TO_DEL request 1431 * during the time window of sending mac config request to PF 1432 * If mac_node state is ACTIVE, then change its state to TO_DEL, 1433 * then it will be removed at next time. If is TO_ADD, it means 1434 * send TO_ADD request failed, so just remove the mac node. 1435 */ 1436 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1437 if (new_node) { 1438 hclgevf_update_mac_node(new_node, mac_node->state); 1439 list_del(&mac_node->node); 1440 kfree(mac_node); 1441 } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { 1442 mac_node->state = HCLGEVF_MAC_TO_DEL; 1443 list_del(&mac_node->node); 1444 list_add_tail(&mac_node->node, mac_list); 1445 } else { 1446 list_del(&mac_node->node); 1447 kfree(mac_node); 1448 } 1449 } 1450 } 1451 1452 static void hclgevf_sync_from_del_list(struct list_head *del_list, 1453 struct list_head *mac_list) 1454 { 1455 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1456 1457 list_for_each_entry_safe(mac_node, tmp, del_list, node) { 1458 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1459 if (new_node) { 1460 /* If the mac addr is exist in the mac list, it means 1461 * received a new request TO_ADD during the time window 1462 * of sending mac addr configurrequest to PF, so just 1463 * change the mac state to ACTIVE. 1464 */ 1465 new_node->state = HCLGEVF_MAC_ACTIVE; 1466 list_del(&mac_node->node); 1467 kfree(mac_node); 1468 } else { 1469 list_del(&mac_node->node); 1470 list_add_tail(&mac_node->node, mac_list); 1471 } 1472 } 1473 } 1474 1475 static void hclgevf_clear_list(struct list_head *list) 1476 { 1477 struct hclgevf_mac_addr_node *mac_node, *tmp; 1478 1479 list_for_each_entry_safe(mac_node, tmp, list, node) { 1480 list_del(&mac_node->node); 1481 kfree(mac_node); 1482 } 1483 } 1484 1485 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, 1486 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1487 { 1488 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1489 struct list_head tmp_add_list, tmp_del_list; 1490 struct list_head *list; 1491 1492 INIT_LIST_HEAD(&tmp_add_list); 1493 INIT_LIST_HEAD(&tmp_del_list); 1494 1495 /* move the mac addr to the tmp_add_list and tmp_del_list, then 1496 * we can add/delete these mac addr outside the spin lock 1497 */ 1498 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1499 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1500 1501 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1502 1503 list_for_each_entry_safe(mac_node, tmp, list, node) { 1504 switch (mac_node->state) { 1505 case HCLGEVF_MAC_TO_DEL: 1506 list_del(&mac_node->node); 1507 list_add_tail(&mac_node->node, &tmp_del_list); 1508 break; 1509 case HCLGEVF_MAC_TO_ADD: 1510 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 1511 if (!new_node) 1512 goto stop_traverse; 1513 1514 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 1515 new_node->state = mac_node->state; 1516 list_add_tail(&new_node->node, &tmp_add_list); 1517 break; 1518 default: 1519 break; 1520 } 1521 } 1522 1523 stop_traverse: 1524 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1525 1526 /* delete first, in order to get max mac table space for adding */ 1527 hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); 1528 hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); 1529 1530 /* if some mac addresses were added/deleted fail, move back to the 1531 * mac_list, and retry at next time. 1532 */ 1533 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1534 1535 hclgevf_sync_from_del_list(&tmp_del_list, list); 1536 hclgevf_sync_from_add_list(&tmp_add_list, list); 1537 1538 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1539 } 1540 1541 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) 1542 { 1543 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); 1544 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); 1545 } 1546 1547 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) 1548 { 1549 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1550 1551 hclgevf_clear_list(&hdev->mac_table.uc_mac_list); 1552 hclgevf_clear_list(&hdev->mac_table.mc_mac_list); 1553 1554 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1555 } 1556 1557 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1558 __be16 proto, u16 vlan_id, 1559 bool is_kill) 1560 { 1561 #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 1562 #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 1563 #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 1564 1565 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1566 struct hclge_vf_to_pf_msg send_msg; 1567 int ret; 1568 1569 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1570 return -EINVAL; 1571 1572 if (proto != htons(ETH_P_8021Q)) 1573 return -EPROTONOSUPPORT; 1574 1575 /* When device is resetting, firmware is unable to handle 1576 * mailbox. Just record the vlan id, and remove it after 1577 * reset finished. 1578 */ 1579 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) { 1580 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1581 return -EBUSY; 1582 } 1583 1584 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1585 HCLGE_MBX_VLAN_FILTER); 1586 send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; 1587 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, 1588 sizeof(vlan_id)); 1589 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, 1590 sizeof(proto)); 1591 /* when remove hw vlan filter failed, record the vlan id, 1592 * and try to remove it from hw later, to be consistence 1593 * with stack. 1594 */ 1595 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1596 if (is_kill && ret) 1597 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1598 1599 return ret; 1600 } 1601 1602 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1603 { 1604 #define HCLGEVF_MAX_SYNC_COUNT 60 1605 struct hnae3_handle *handle = &hdev->nic; 1606 int ret, sync_cnt = 0; 1607 u16 vlan_id; 1608 1609 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1610 while (vlan_id != VLAN_N_VID) { 1611 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1612 vlan_id, true); 1613 if (ret) 1614 return; 1615 1616 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1617 sync_cnt++; 1618 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1619 return; 1620 1621 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1622 } 1623 } 1624 1625 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1626 { 1627 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1628 struct hclge_vf_to_pf_msg send_msg; 1629 1630 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1631 HCLGE_MBX_VLAN_RX_OFF_CFG); 1632 send_msg.data[0] = enable ? 1 : 0; 1633 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1634 } 1635 1636 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1637 { 1638 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1639 struct hclge_vf_to_pf_msg send_msg; 1640 int ret; 1641 1642 /* disable vf queue before send queue reset msg to PF */ 1643 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1644 if (ret) 1645 return ret; 1646 1647 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1648 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 1649 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1650 } 1651 1652 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1653 { 1654 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1655 struct hclge_vf_to_pf_msg send_msg; 1656 1657 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1658 memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); 1659 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1660 } 1661 1662 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1663 enum hnae3_reset_notify_type type) 1664 { 1665 struct hnae3_client *client = hdev->nic_client; 1666 struct hnae3_handle *handle = &hdev->nic; 1667 int ret; 1668 1669 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1670 !client) 1671 return 0; 1672 1673 if (!client->ops->reset_notify) 1674 return -EOPNOTSUPP; 1675 1676 ret = client->ops->reset_notify(handle, type); 1677 if (ret) 1678 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1679 type, ret); 1680 1681 return ret; 1682 } 1683 1684 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1685 { 1686 #define HCLGEVF_RESET_WAIT_US 20000 1687 #define HCLGEVF_RESET_WAIT_CNT 2000 1688 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1689 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1690 1691 u32 val; 1692 int ret; 1693 1694 if (hdev->reset_type == HNAE3_VF_RESET) 1695 ret = readl_poll_timeout(hdev->hw.io_base + 1696 HCLGEVF_VF_RST_ING, val, 1697 !(val & HCLGEVF_VF_RST_ING_BIT), 1698 HCLGEVF_RESET_WAIT_US, 1699 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1700 else 1701 ret = readl_poll_timeout(hdev->hw.io_base + 1702 HCLGEVF_RST_ING, val, 1703 !(val & HCLGEVF_RST_ING_BITS), 1704 HCLGEVF_RESET_WAIT_US, 1705 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1706 1707 /* hardware completion status should be available by this time */ 1708 if (ret) { 1709 dev_err(&hdev->pdev->dev, 1710 "could'nt get reset done status from h/w, timeout!\n"); 1711 return ret; 1712 } 1713 1714 /* we will wait a bit more to let reset of the stack to complete. This 1715 * might happen in case reset assertion was made by PF. Yes, this also 1716 * means we might end up waiting bit more even for VF reset. 1717 */ 1718 msleep(5000); 1719 1720 return 0; 1721 } 1722 1723 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 1724 { 1725 u32 reg_val; 1726 1727 reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); 1728 if (enable) 1729 reg_val |= HCLGEVF_NIC_SW_RST_RDY; 1730 else 1731 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 1732 1733 hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 1734 reg_val); 1735 } 1736 1737 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1738 { 1739 int ret; 1740 1741 /* uninitialize the nic client */ 1742 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1743 if (ret) 1744 return ret; 1745 1746 /* re-initialize the hclge device */ 1747 ret = hclgevf_reset_hdev(hdev); 1748 if (ret) { 1749 dev_err(&hdev->pdev->dev, 1750 "hclge device re-init failed, VF is disabled!\n"); 1751 return ret; 1752 } 1753 1754 /* bring up the nic client again */ 1755 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1756 if (ret) 1757 return ret; 1758 1759 ret = hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); 1760 if (ret) 1761 return ret; 1762 1763 /* clear handshake status with IMP */ 1764 hclgevf_reset_handshake(hdev, false); 1765 1766 /* bring up the nic to enable TX/RX again */ 1767 return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1768 } 1769 1770 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1771 { 1772 #define HCLGEVF_RESET_SYNC_TIME 100 1773 1774 struct hclge_vf_to_pf_msg send_msg; 1775 int ret = 0; 1776 1777 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1778 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1779 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1780 hdev->rst_stats.vf_func_rst_cnt++; 1781 } 1782 1783 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1784 /* inform hardware that preparatory work is done */ 1785 msleep(HCLGEVF_RESET_SYNC_TIME); 1786 hclgevf_reset_handshake(hdev, true); 1787 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1788 hdev->reset_type, ret); 1789 1790 return ret; 1791 } 1792 1793 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 1794 { 1795 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 1796 hdev->rst_stats.vf_func_rst_cnt); 1797 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1798 hdev->rst_stats.flr_rst_cnt); 1799 dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 1800 hdev->rst_stats.vf_rst_cnt); 1801 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1802 hdev->rst_stats.rst_done_cnt); 1803 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1804 hdev->rst_stats.hw_rst_done_cnt); 1805 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1806 hdev->rst_stats.rst_cnt); 1807 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1808 hdev->rst_stats.rst_fail_cnt); 1809 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1810 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 1811 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 1812 hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STAT_REG)); 1813 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1814 hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); 1815 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1816 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 1817 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1818 } 1819 1820 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1821 { 1822 /* recover handshake status with IMP when reset fail */ 1823 hclgevf_reset_handshake(hdev, true); 1824 hdev->rst_stats.rst_fail_cnt++; 1825 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1826 hdev->rst_stats.rst_fail_cnt); 1827 1828 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1829 set_bit(hdev->reset_type, &hdev->reset_pending); 1830 1831 if (hclgevf_is_reset_pending(hdev)) { 1832 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1833 hclgevf_reset_task_schedule(hdev); 1834 } else { 1835 set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1836 hclgevf_dump_rst_info(hdev); 1837 } 1838 } 1839 1840 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 1841 { 1842 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1843 int ret; 1844 1845 /* Initialize ae_dev reset status as well, in case enet layer wants to 1846 * know if device is undergoing reset 1847 */ 1848 ae_dev->reset_type = hdev->reset_type; 1849 hdev->rst_stats.rst_cnt++; 1850 1851 rtnl_lock(); 1852 /* bring down the nic to stop any ongoing TX/RX */ 1853 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1854 rtnl_unlock(); 1855 if (ret) 1856 return ret; 1857 1858 return hclgevf_reset_prepare_wait(hdev); 1859 } 1860 1861 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 1862 { 1863 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1864 int ret; 1865 1866 hdev->rst_stats.hw_rst_done_cnt++; 1867 1868 rtnl_lock(); 1869 /* now, re-initialize the nic client and ae device */ 1870 ret = hclgevf_reset_stack(hdev); 1871 rtnl_unlock(); 1872 if (ret) { 1873 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1874 return ret; 1875 } 1876 1877 hdev->last_reset_time = jiffies; 1878 ae_dev->reset_type = HNAE3_NONE_RESET; 1879 hdev->rst_stats.rst_done_cnt++; 1880 hdev->rst_stats.rst_fail_cnt = 0; 1881 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1882 1883 return 0; 1884 } 1885 1886 static void hclgevf_reset(struct hclgevf_dev *hdev) 1887 { 1888 if (hclgevf_reset_prepare(hdev)) 1889 goto err_reset; 1890 1891 /* check if VF could successfully fetch the hardware reset completion 1892 * status from the hardware 1893 */ 1894 if (hclgevf_reset_wait(hdev)) { 1895 /* can't do much in this situation, will disable VF */ 1896 dev_err(&hdev->pdev->dev, 1897 "failed to fetch H/W reset completion status\n"); 1898 goto err_reset; 1899 } 1900 1901 if (hclgevf_reset_rebuild(hdev)) 1902 goto err_reset; 1903 1904 return; 1905 1906 err_reset: 1907 hclgevf_reset_err_handle(hdev); 1908 } 1909 1910 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1911 unsigned long *addr) 1912 { 1913 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1914 1915 /* return the highest priority reset level amongst all */ 1916 if (test_bit(HNAE3_VF_RESET, addr)) { 1917 rst_level = HNAE3_VF_RESET; 1918 clear_bit(HNAE3_VF_RESET, addr); 1919 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1920 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1921 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1922 rst_level = HNAE3_VF_FULL_RESET; 1923 clear_bit(HNAE3_VF_FULL_RESET, addr); 1924 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1925 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1926 rst_level = HNAE3_VF_PF_FUNC_RESET; 1927 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1928 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1929 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1930 rst_level = HNAE3_VF_FUNC_RESET; 1931 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1932 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1933 rst_level = HNAE3_FLR_RESET; 1934 clear_bit(HNAE3_FLR_RESET, addr); 1935 } 1936 1937 return rst_level; 1938 } 1939 1940 static void hclgevf_reset_event(struct pci_dev *pdev, 1941 struct hnae3_handle *handle) 1942 { 1943 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1944 struct hclgevf_dev *hdev = ae_dev->priv; 1945 1946 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1947 1948 if (hdev->default_reset_request) 1949 hdev->reset_level = 1950 hclgevf_get_reset_level(hdev, 1951 &hdev->default_reset_request); 1952 else 1953 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1954 1955 /* reset of this VF requested */ 1956 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1957 hclgevf_reset_task_schedule(hdev); 1958 1959 hdev->last_reset_time = jiffies; 1960 } 1961 1962 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1963 enum hnae3_reset_type rst_type) 1964 { 1965 struct hclgevf_dev *hdev = ae_dev->priv; 1966 1967 set_bit(rst_type, &hdev->default_reset_request); 1968 } 1969 1970 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1971 { 1972 writel(en ? 1 : 0, vector->addr); 1973 } 1974 1975 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1976 { 1977 #define HCLGEVF_FLR_RETRY_WAIT_MS 500 1978 #define HCLGEVF_FLR_RETRY_CNT 5 1979 1980 struct hclgevf_dev *hdev = ae_dev->priv; 1981 int retry_cnt = 0; 1982 int ret; 1983 1984 retry: 1985 down(&hdev->reset_sem); 1986 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1987 hdev->reset_type = HNAE3_FLR_RESET; 1988 ret = hclgevf_reset_prepare(hdev); 1989 if (ret) { 1990 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", 1991 ret); 1992 if (hdev->reset_pending || 1993 retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) { 1994 dev_err(&hdev->pdev->dev, 1995 "reset_pending:0x%lx, retry_cnt:%d\n", 1996 hdev->reset_pending, retry_cnt); 1997 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1998 up(&hdev->reset_sem); 1999 msleep(HCLGEVF_FLR_RETRY_WAIT_MS); 2000 goto retry; 2001 } 2002 } 2003 2004 /* disable misc vector before FLR done */ 2005 hclgevf_enable_vector(&hdev->misc_vector, false); 2006 hdev->rst_stats.flr_rst_cnt++; 2007 } 2008 2009 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 2010 { 2011 struct hclgevf_dev *hdev = ae_dev->priv; 2012 int ret; 2013 2014 hclgevf_enable_vector(&hdev->misc_vector, true); 2015 2016 ret = hclgevf_reset_rebuild(hdev); 2017 if (ret) 2018 dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 2019 ret); 2020 2021 hdev->reset_type = HNAE3_NONE_RESET; 2022 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2023 up(&hdev->reset_sem); 2024 } 2025 2026 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 2027 { 2028 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2029 2030 return hdev->fw_version; 2031 } 2032 2033 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 2034 { 2035 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 2036 2037 vector->vector_irq = pci_irq_vector(hdev->pdev, 2038 HCLGEVF_MISC_VECTOR_NUM); 2039 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 2040 /* vector status always valid for Vector 0 */ 2041 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 2042 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 2043 2044 hdev->num_msi_left -= 1; 2045 hdev->num_msi_used += 1; 2046 } 2047 2048 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 2049 { 2050 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2051 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 2052 &hdev->state)) 2053 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2054 } 2055 2056 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 2057 { 2058 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2059 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 2060 &hdev->state)) 2061 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2062 } 2063 2064 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 2065 unsigned long delay) 2066 { 2067 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2068 !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2069 mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 2070 } 2071 2072 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 2073 { 2074 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 2075 2076 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 2077 return; 2078 2079 down(&hdev->reset_sem); 2080 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2081 2082 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 2083 &hdev->reset_state)) { 2084 /* PF has initmated that it is about to reset the hardware. 2085 * We now have to poll & check if hardware has actually 2086 * completed the reset sequence. On hardware reset completion, 2087 * VF needs to reset the client and ae device. 2088 */ 2089 hdev->reset_attempts = 0; 2090 2091 hdev->last_reset_time = jiffies; 2092 while ((hdev->reset_type = 2093 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 2094 != HNAE3_NONE_RESET) 2095 hclgevf_reset(hdev); 2096 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 2097 &hdev->reset_state)) { 2098 /* we could be here when either of below happens: 2099 * 1. reset was initiated due to watchdog timeout caused by 2100 * a. IMP was earlier reset and our TX got choked down and 2101 * which resulted in watchdog reacting and inducing VF 2102 * reset. This also means our cmdq would be unreliable. 2103 * b. problem in TX due to other lower layer(example link 2104 * layer not functioning properly etc.) 2105 * 2. VF reset might have been initiated due to some config 2106 * change. 2107 * 2108 * NOTE: Theres no clear way to detect above cases than to react 2109 * to the response of PF for this reset request. PF will ack the 2110 * 1b and 2. cases but we will not get any intimation about 1a 2111 * from PF as cmdq would be in unreliable state i.e. mailbox 2112 * communication between PF and VF would be broken. 2113 * 2114 * if we are never geting into pending state it means either: 2115 * 1. PF is not receiving our request which could be due to IMP 2116 * reset 2117 * 2. PF is screwed 2118 * We cannot do much for 2. but to check first we can try reset 2119 * our PCIe + stack and see if it alleviates the problem. 2120 */ 2121 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 2122 /* prepare for full reset of stack + pcie interface */ 2123 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 2124 2125 /* "defer" schedule the reset task again */ 2126 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2127 } else { 2128 hdev->reset_attempts++; 2129 2130 set_bit(hdev->reset_level, &hdev->reset_pending); 2131 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2132 } 2133 hclgevf_reset_task_schedule(hdev); 2134 } 2135 2136 hdev->reset_type = HNAE3_NONE_RESET; 2137 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2138 up(&hdev->reset_sem); 2139 } 2140 2141 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 2142 { 2143 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2144 return; 2145 2146 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 2147 return; 2148 2149 hclgevf_mbx_async_handler(hdev); 2150 2151 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2152 } 2153 2154 static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 2155 { 2156 struct hclge_vf_to_pf_msg send_msg; 2157 int ret; 2158 2159 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) 2160 return; 2161 2162 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 2163 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2164 if (ret) 2165 dev_err(&hdev->pdev->dev, 2166 "VF sends keep alive cmd failed(=%d)\n", ret); 2167 } 2168 2169 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 2170 { 2171 unsigned long delta = round_jiffies_relative(HZ); 2172 struct hnae3_handle *handle = &hdev->nic; 2173 2174 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 2175 delta = jiffies - hdev->last_serv_processed; 2176 2177 if (delta < round_jiffies_relative(HZ)) { 2178 delta = round_jiffies_relative(HZ) - delta; 2179 goto out; 2180 } 2181 } 2182 2183 hdev->serv_processed_cnt++; 2184 if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 2185 hclgevf_keep_alive(hdev); 2186 2187 if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 2188 hdev->last_serv_processed = jiffies; 2189 goto out; 2190 } 2191 2192 if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 2193 hclgevf_tqps_update_stats(handle); 2194 2195 /* request the link status from the PF. PF would be able to tell VF 2196 * about such updates in future so we might remove this later 2197 */ 2198 hclgevf_request_link_info(hdev); 2199 2200 hclgevf_update_link_mode(hdev); 2201 2202 hclgevf_sync_vlan_filter(hdev); 2203 2204 hclgevf_sync_mac_table(hdev); 2205 2206 hdev->last_serv_processed = jiffies; 2207 2208 out: 2209 hclgevf_task_schedule(hdev, delta); 2210 } 2211 2212 static void hclgevf_service_task(struct work_struct *work) 2213 { 2214 struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 2215 service_task.work); 2216 2217 hclgevf_reset_service_task(hdev); 2218 hclgevf_mailbox_service_task(hdev); 2219 hclgevf_periodic_service_task(hdev); 2220 2221 /* Handle reset and mbx again in case periodical task delays the 2222 * handling by calling hclgevf_task_schedule() in 2223 * hclgevf_periodic_service_task() 2224 */ 2225 hclgevf_reset_service_task(hdev); 2226 hclgevf_mailbox_service_task(hdev); 2227 } 2228 2229 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 2230 { 2231 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 2232 } 2233 2234 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 2235 u32 *clearval) 2236 { 2237 u32 val, cmdq_stat_reg, rst_ing_reg; 2238 2239 /* fetch the events from their corresponding regs */ 2240 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 2241 HCLGEVF_VECTOR0_CMDQ_STAT_REG); 2242 2243 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 2244 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2245 dev_info(&hdev->pdev->dev, 2246 "receive reset interrupt 0x%x!\n", rst_ing_reg); 2247 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 2248 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2249 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 2250 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 2251 hdev->rst_stats.vf_rst_cnt++; 2252 /* set up VF hardware reset status, its PF will clear 2253 * this status when PF has initialized done. 2254 */ 2255 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 2256 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 2257 val | HCLGEVF_VF_RST_ING_BIT); 2258 return HCLGEVF_VECTOR0_EVENT_RST; 2259 } 2260 2261 /* check for vector0 mailbox(=CMDQ RX) event source */ 2262 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 2263 /* for revision 0x21, clearing interrupt is writing bit 0 2264 * to the clear register, writing bit 1 means to keep the 2265 * old value. 2266 * for revision 0x20, the clear register is a read & write 2267 * register, so we should just write 0 to the bit we are 2268 * handling, and keep other bits as cmdq_stat_reg. 2269 */ 2270 if (hdev->pdev->revision >= 0x21) 2271 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2272 else 2273 *clearval = cmdq_stat_reg & 2274 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2275 2276 return HCLGEVF_VECTOR0_EVENT_MBX; 2277 } 2278 2279 /* print other vector0 event source */ 2280 dev_info(&hdev->pdev->dev, 2281 "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2282 cmdq_stat_reg); 2283 2284 return HCLGEVF_VECTOR0_EVENT_OTHER; 2285 } 2286 2287 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2288 { 2289 enum hclgevf_evt_cause event_cause; 2290 struct hclgevf_dev *hdev = data; 2291 u32 clearval; 2292 2293 hclgevf_enable_vector(&hdev->misc_vector, false); 2294 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2295 2296 switch (event_cause) { 2297 case HCLGEVF_VECTOR0_EVENT_RST: 2298 hclgevf_reset_task_schedule(hdev); 2299 break; 2300 case HCLGEVF_VECTOR0_EVENT_MBX: 2301 hclgevf_mbx_handler(hdev); 2302 break; 2303 default: 2304 break; 2305 } 2306 2307 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 2308 hclgevf_clear_event_cause(hdev, clearval); 2309 hclgevf_enable_vector(&hdev->misc_vector, true); 2310 } 2311 2312 return IRQ_HANDLED; 2313 } 2314 2315 static int hclgevf_configure(struct hclgevf_dev *hdev) 2316 { 2317 int ret; 2318 2319 /* get current port based vlan state from PF */ 2320 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2321 if (ret) 2322 return ret; 2323 2324 /* get queue configuration from PF */ 2325 ret = hclgevf_get_queue_info(hdev); 2326 if (ret) 2327 return ret; 2328 2329 /* get queue depth info from PF */ 2330 ret = hclgevf_get_queue_depth(hdev); 2331 if (ret) 2332 return ret; 2333 2334 ret = hclgevf_get_pf_media_type(hdev); 2335 if (ret) 2336 return ret; 2337 2338 /* get tc configuration from PF */ 2339 return hclgevf_get_tc_info(hdev); 2340 } 2341 2342 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 2343 { 2344 struct pci_dev *pdev = ae_dev->pdev; 2345 struct hclgevf_dev *hdev; 2346 2347 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 2348 if (!hdev) 2349 return -ENOMEM; 2350 2351 hdev->pdev = pdev; 2352 hdev->ae_dev = ae_dev; 2353 ae_dev->priv = hdev; 2354 2355 return 0; 2356 } 2357 2358 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2359 { 2360 struct hnae3_handle *roce = &hdev->roce; 2361 struct hnae3_handle *nic = &hdev->nic; 2362 2363 roce->rinfo.num_vectors = hdev->num_roce_msix; 2364 2365 if (hdev->num_msi_left < roce->rinfo.num_vectors || 2366 hdev->num_msi_left == 0) 2367 return -EINVAL; 2368 2369 roce->rinfo.base_vector = hdev->roce_base_vector; 2370 2371 roce->rinfo.netdev = nic->kinfo.netdev; 2372 roce->rinfo.roce_io_base = hdev->hw.io_base; 2373 2374 roce->pdev = nic->pdev; 2375 roce->ae_algo = nic->ae_algo; 2376 roce->numa_node_mask = nic->numa_node_mask; 2377 2378 return 0; 2379 } 2380 2381 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 2382 { 2383 struct hclgevf_cfg_gro_status_cmd *req; 2384 struct hclgevf_desc desc; 2385 int ret; 2386 2387 if (!hnae3_dev_gro_supported(hdev)) 2388 return 0; 2389 2390 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2391 false); 2392 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2393 2394 req->gro_en = cpu_to_le16(en ? 1 : 0); 2395 2396 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2397 if (ret) 2398 dev_err(&hdev->pdev->dev, 2399 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2400 2401 return ret; 2402 } 2403 2404 static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) 2405 { 2406 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2407 struct hclgevf_rss_tuple_cfg *tuple_sets; 2408 u32 i; 2409 2410 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 2411 rss_cfg->rss_size = hdev->nic.kinfo.rss_size; 2412 tuple_sets = &rss_cfg->rss_tuple_sets; 2413 if (hdev->pdev->revision >= 0x21) { 2414 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 2415 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2416 HCLGEVF_RSS_KEY_SIZE); 2417 2418 tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2419 tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2420 tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2421 tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2422 tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2423 tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2424 tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2425 tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2426 } 2427 2428 /* Initialize RSS indirect table */ 2429 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 2430 rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 2431 } 2432 2433 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2434 { 2435 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2436 int ret; 2437 2438 if (hdev->pdev->revision >= 0x21) { 2439 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 2440 rss_cfg->rss_hash_key); 2441 if (ret) 2442 return ret; 2443 2444 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2445 if (ret) 2446 return ret; 2447 } 2448 2449 ret = hclgevf_set_rss_indir_table(hdev); 2450 if (ret) 2451 return ret; 2452 2453 return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); 2454 } 2455 2456 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2457 { 2458 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2459 false); 2460 } 2461 2462 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2463 { 2464 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2465 2466 unsigned long last = hdev->serv_processed_cnt; 2467 int i = 0; 2468 2469 while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2470 i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2471 last == hdev->serv_processed_cnt) 2472 usleep_range(1, 1); 2473 } 2474 2475 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2476 { 2477 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2478 2479 if (enable) { 2480 hclgevf_task_schedule(hdev, 0); 2481 } else { 2482 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2483 2484 /* flush memory to make sure DOWN is seen by service task */ 2485 smp_mb__before_atomic(); 2486 hclgevf_flush_link_update(hdev); 2487 } 2488 } 2489 2490 static int hclgevf_ae_start(struct hnae3_handle *handle) 2491 { 2492 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2493 2494 hclgevf_reset_tqp_stats(handle); 2495 2496 hclgevf_request_link_info(hdev); 2497 2498 hclgevf_update_link_mode(hdev); 2499 2500 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2501 2502 return 0; 2503 } 2504 2505 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2506 { 2507 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2508 int i; 2509 2510 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2511 2512 if (hdev->reset_type != HNAE3_VF_RESET) 2513 for (i = 0; i < handle->kinfo.num_tqps; i++) 2514 if (hclgevf_reset_tqp(handle, i)) 2515 break; 2516 2517 hclgevf_reset_tqp_stats(handle); 2518 hclgevf_update_link_status(hdev, 0); 2519 } 2520 2521 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2522 { 2523 #define HCLGEVF_STATE_ALIVE 1 2524 #define HCLGEVF_STATE_NOT_ALIVE 0 2525 2526 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2527 struct hclge_vf_to_pf_msg send_msg; 2528 2529 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2530 send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2531 HCLGEVF_STATE_NOT_ALIVE; 2532 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2533 } 2534 2535 static int hclgevf_client_start(struct hnae3_handle *handle) 2536 { 2537 int ret; 2538 2539 ret = hclgevf_set_alive(handle, true); 2540 if (ret) 2541 return ret; 2542 2543 return 0; 2544 } 2545 2546 static void hclgevf_client_stop(struct hnae3_handle *handle) 2547 { 2548 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2549 int ret; 2550 2551 ret = hclgevf_set_alive(handle, false); 2552 if (ret) 2553 dev_warn(&hdev->pdev->dev, 2554 "%s failed %d\n", __func__, ret); 2555 } 2556 2557 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2558 { 2559 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2560 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2561 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2562 2563 INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 2564 2565 mutex_init(&hdev->mbx_resp.mbx_mutex); 2566 sema_init(&hdev->reset_sem, 1); 2567 2568 spin_lock_init(&hdev->mac_table.mac_list_lock); 2569 INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); 2570 INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); 2571 2572 /* bring the device down */ 2573 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2574 } 2575 2576 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2577 { 2578 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2579 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2580 2581 if (hdev->service_task.work.func) 2582 cancel_delayed_work_sync(&hdev->service_task); 2583 2584 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2585 } 2586 2587 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2588 { 2589 struct pci_dev *pdev = hdev->pdev; 2590 int vectors; 2591 int i; 2592 2593 if (hnae3_dev_roce_supported(hdev)) 2594 vectors = pci_alloc_irq_vectors(pdev, 2595 hdev->roce_base_msix_offset + 1, 2596 hdev->num_msi, 2597 PCI_IRQ_MSIX); 2598 else 2599 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2600 hdev->num_msi, 2601 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2602 2603 if (vectors < 0) { 2604 dev_err(&pdev->dev, 2605 "failed(%d) to allocate MSI/MSI-X vectors\n", 2606 vectors); 2607 return vectors; 2608 } 2609 if (vectors < hdev->num_msi) 2610 dev_warn(&hdev->pdev->dev, 2611 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2612 hdev->num_msi, vectors); 2613 2614 hdev->num_msi = vectors; 2615 hdev->num_msi_left = vectors; 2616 2617 hdev->base_msi_vector = pdev->irq; 2618 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2619 2620 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2621 sizeof(u16), GFP_KERNEL); 2622 if (!hdev->vector_status) { 2623 pci_free_irq_vectors(pdev); 2624 return -ENOMEM; 2625 } 2626 2627 for (i = 0; i < hdev->num_msi; i++) 2628 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2629 2630 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2631 sizeof(int), GFP_KERNEL); 2632 if (!hdev->vector_irq) { 2633 devm_kfree(&pdev->dev, hdev->vector_status); 2634 pci_free_irq_vectors(pdev); 2635 return -ENOMEM; 2636 } 2637 2638 return 0; 2639 } 2640 2641 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2642 { 2643 struct pci_dev *pdev = hdev->pdev; 2644 2645 devm_kfree(&pdev->dev, hdev->vector_status); 2646 devm_kfree(&pdev->dev, hdev->vector_irq); 2647 pci_free_irq_vectors(pdev); 2648 } 2649 2650 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2651 { 2652 int ret; 2653 2654 hclgevf_get_misc_vector(hdev); 2655 2656 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2657 HCLGEVF_NAME, pci_name(hdev->pdev)); 2658 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2659 0, hdev->misc_vector.name, hdev); 2660 if (ret) { 2661 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2662 hdev->misc_vector.vector_irq); 2663 return ret; 2664 } 2665 2666 hclgevf_clear_event_cause(hdev, 0); 2667 2668 /* enable misc. vector(vector 0) */ 2669 hclgevf_enable_vector(&hdev->misc_vector, true); 2670 2671 return ret; 2672 } 2673 2674 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2675 { 2676 /* disable misc vector(vector 0) */ 2677 hclgevf_enable_vector(&hdev->misc_vector, false); 2678 synchronize_irq(hdev->misc_vector.vector_irq); 2679 free_irq(hdev->misc_vector.vector_irq, hdev); 2680 hclgevf_free_vector(hdev, 0); 2681 } 2682 2683 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2684 { 2685 struct device *dev = &hdev->pdev->dev; 2686 2687 dev_info(dev, "VF info begin:\n"); 2688 2689 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2690 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2691 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2692 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2693 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2694 dev_info(dev, "PF media type of this VF: %u\n", 2695 hdev->hw.mac.media_type); 2696 2697 dev_info(dev, "VF info end.\n"); 2698 } 2699 2700 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2701 struct hnae3_client *client) 2702 { 2703 struct hclgevf_dev *hdev = ae_dev->priv; 2704 int ret; 2705 2706 ret = client->ops->init_instance(&hdev->nic); 2707 if (ret) 2708 return ret; 2709 2710 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2711 hnae3_set_client_init_flag(client, ae_dev, 1); 2712 2713 if (netif_msg_drv(&hdev->nic)) 2714 hclgevf_info_show(hdev); 2715 2716 return 0; 2717 } 2718 2719 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2720 struct hnae3_client *client) 2721 { 2722 struct hclgevf_dev *hdev = ae_dev->priv; 2723 int ret; 2724 2725 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2726 !hdev->nic_client) 2727 return 0; 2728 2729 ret = hclgevf_init_roce_base_info(hdev); 2730 if (ret) 2731 return ret; 2732 2733 ret = client->ops->init_instance(&hdev->roce); 2734 if (ret) 2735 return ret; 2736 2737 hnae3_set_client_init_flag(client, ae_dev, 1); 2738 2739 return 0; 2740 } 2741 2742 static int hclgevf_init_client_instance(struct hnae3_client *client, 2743 struct hnae3_ae_dev *ae_dev) 2744 { 2745 struct hclgevf_dev *hdev = ae_dev->priv; 2746 int ret; 2747 2748 switch (client->type) { 2749 case HNAE3_CLIENT_KNIC: 2750 hdev->nic_client = client; 2751 hdev->nic.client = client; 2752 2753 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2754 if (ret) 2755 goto clear_nic; 2756 2757 ret = hclgevf_init_roce_client_instance(ae_dev, 2758 hdev->roce_client); 2759 if (ret) 2760 goto clear_roce; 2761 2762 break; 2763 case HNAE3_CLIENT_ROCE: 2764 if (hnae3_dev_roce_supported(hdev)) { 2765 hdev->roce_client = client; 2766 hdev->roce.client = client; 2767 } 2768 2769 ret = hclgevf_init_roce_client_instance(ae_dev, client); 2770 if (ret) 2771 goto clear_roce; 2772 2773 break; 2774 default: 2775 return -EINVAL; 2776 } 2777 2778 return 0; 2779 2780 clear_nic: 2781 hdev->nic_client = NULL; 2782 hdev->nic.client = NULL; 2783 return ret; 2784 clear_roce: 2785 hdev->roce_client = NULL; 2786 hdev->roce.client = NULL; 2787 return ret; 2788 } 2789 2790 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2791 struct hnae3_ae_dev *ae_dev) 2792 { 2793 struct hclgevf_dev *hdev = ae_dev->priv; 2794 2795 /* un-init roce, if it exists */ 2796 if (hdev->roce_client) { 2797 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2798 hdev->roce_client = NULL; 2799 hdev->roce.client = NULL; 2800 } 2801 2802 /* un-init nic/unic, if this was not called by roce client */ 2803 if (client->ops->uninit_instance && hdev->nic_client && 2804 client->type != HNAE3_CLIENT_ROCE) { 2805 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2806 2807 client->ops->uninit_instance(&hdev->nic, 0); 2808 hdev->nic_client = NULL; 2809 hdev->nic.client = NULL; 2810 } 2811 } 2812 2813 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2814 { 2815 struct pci_dev *pdev = hdev->pdev; 2816 struct hclgevf_hw *hw; 2817 int ret; 2818 2819 ret = pci_enable_device(pdev); 2820 if (ret) { 2821 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2822 return ret; 2823 } 2824 2825 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2826 if (ret) { 2827 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2828 goto err_disable_device; 2829 } 2830 2831 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2832 if (ret) { 2833 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2834 goto err_disable_device; 2835 } 2836 2837 pci_set_master(pdev); 2838 hw = &hdev->hw; 2839 hw->hdev = hdev; 2840 hw->io_base = pci_iomap(pdev, 2, 0); 2841 if (!hw->io_base) { 2842 dev_err(&pdev->dev, "can't map configuration register space\n"); 2843 ret = -ENOMEM; 2844 goto err_clr_master; 2845 } 2846 2847 return 0; 2848 2849 err_clr_master: 2850 pci_clear_master(pdev); 2851 pci_release_regions(pdev); 2852 err_disable_device: 2853 pci_disable_device(pdev); 2854 2855 return ret; 2856 } 2857 2858 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2859 { 2860 struct pci_dev *pdev = hdev->pdev; 2861 2862 pci_iounmap(pdev, hdev->hw.io_base); 2863 pci_clear_master(pdev); 2864 pci_release_regions(pdev); 2865 pci_disable_device(pdev); 2866 } 2867 2868 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2869 { 2870 struct hclgevf_query_res_cmd *req; 2871 struct hclgevf_desc desc; 2872 int ret; 2873 2874 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2875 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2876 if (ret) { 2877 dev_err(&hdev->pdev->dev, 2878 "query vf resource failed, ret = %d.\n", ret); 2879 return ret; 2880 } 2881 2882 req = (struct hclgevf_query_res_cmd *)desc.data; 2883 2884 if (hnae3_dev_roce_supported(hdev)) { 2885 hdev->roce_base_msix_offset = 2886 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 2887 HCLGEVF_MSIX_OFT_ROCEE_M, 2888 HCLGEVF_MSIX_OFT_ROCEE_S); 2889 hdev->num_roce_msix = 2890 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 2891 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2892 2893 /* nic's msix numbers is always equals to the roce's. */ 2894 hdev->num_nic_msix = hdev->num_roce_msix; 2895 2896 /* VF should have NIC vectors and Roce vectors, NIC vectors 2897 * are queued before Roce vectors. The offset is fixed to 64. 2898 */ 2899 hdev->num_msi = hdev->num_roce_msix + 2900 hdev->roce_base_msix_offset; 2901 } else { 2902 hdev->num_msi = 2903 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 2904 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2905 2906 hdev->num_nic_msix = hdev->num_msi; 2907 } 2908 2909 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 2910 dev_err(&hdev->pdev->dev, 2911 "Just %u msi resources, not enough for vf(min:2).\n", 2912 hdev->num_nic_msix); 2913 return -EINVAL; 2914 } 2915 2916 return 0; 2917 } 2918 2919 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2920 { 2921 struct pci_dev *pdev = hdev->pdev; 2922 int ret = 0; 2923 2924 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2925 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2926 hclgevf_misc_irq_uninit(hdev); 2927 hclgevf_uninit_msi(hdev); 2928 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2929 } 2930 2931 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2932 pci_set_master(pdev); 2933 ret = hclgevf_init_msi(hdev); 2934 if (ret) { 2935 dev_err(&pdev->dev, 2936 "failed(%d) to init MSI/MSI-X\n", ret); 2937 return ret; 2938 } 2939 2940 ret = hclgevf_misc_irq_init(hdev); 2941 if (ret) { 2942 hclgevf_uninit_msi(hdev); 2943 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2944 ret); 2945 return ret; 2946 } 2947 2948 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2949 } 2950 2951 return ret; 2952 } 2953 2954 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2955 { 2956 struct pci_dev *pdev = hdev->pdev; 2957 int ret; 2958 2959 ret = hclgevf_pci_reset(hdev); 2960 if (ret) { 2961 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2962 return ret; 2963 } 2964 2965 ret = hclgevf_cmd_init(hdev); 2966 if (ret) { 2967 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2968 return ret; 2969 } 2970 2971 ret = hclgevf_rss_init_hw(hdev); 2972 if (ret) { 2973 dev_err(&hdev->pdev->dev, 2974 "failed(%d) to initialize RSS\n", ret); 2975 return ret; 2976 } 2977 2978 ret = hclgevf_config_gro(hdev, true); 2979 if (ret) 2980 return ret; 2981 2982 ret = hclgevf_init_vlan_config(hdev); 2983 if (ret) { 2984 dev_err(&hdev->pdev->dev, 2985 "failed(%d) to initialize VLAN config\n", ret); 2986 return ret; 2987 } 2988 2989 dev_info(&hdev->pdev->dev, "Reset done\n"); 2990 2991 return 0; 2992 } 2993 2994 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2995 { 2996 struct pci_dev *pdev = hdev->pdev; 2997 int ret; 2998 2999 ret = hclgevf_pci_init(hdev); 3000 if (ret) 3001 return ret; 3002 3003 ret = hclgevf_cmd_queue_init(hdev); 3004 if (ret) 3005 goto err_cmd_queue_init; 3006 3007 ret = hclgevf_cmd_init(hdev); 3008 if (ret) 3009 goto err_cmd_init; 3010 3011 /* Get vf resource */ 3012 ret = hclgevf_query_vf_resource(hdev); 3013 if (ret) 3014 goto err_cmd_init; 3015 3016 ret = hclgevf_init_msi(hdev); 3017 if (ret) { 3018 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 3019 goto err_cmd_init; 3020 } 3021 3022 hclgevf_state_init(hdev); 3023 hdev->reset_level = HNAE3_VF_FUNC_RESET; 3024 hdev->reset_type = HNAE3_NONE_RESET; 3025 3026 ret = hclgevf_misc_irq_init(hdev); 3027 if (ret) 3028 goto err_misc_irq_init; 3029 3030 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3031 3032 ret = hclgevf_configure(hdev); 3033 if (ret) { 3034 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 3035 goto err_config; 3036 } 3037 3038 ret = hclgevf_alloc_tqps(hdev); 3039 if (ret) { 3040 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 3041 goto err_config; 3042 } 3043 3044 ret = hclgevf_set_handle_info(hdev); 3045 if (ret) 3046 goto err_config; 3047 3048 ret = hclgevf_config_gro(hdev, true); 3049 if (ret) 3050 goto err_config; 3051 3052 /* Initialize RSS for this VF */ 3053 hclgevf_rss_init_cfg(hdev); 3054 ret = hclgevf_rss_init_hw(hdev); 3055 if (ret) { 3056 dev_err(&hdev->pdev->dev, 3057 "failed(%d) to initialize RSS\n", ret); 3058 goto err_config; 3059 } 3060 3061 ret = hclgevf_init_vlan_config(hdev); 3062 if (ret) { 3063 dev_err(&hdev->pdev->dev, 3064 "failed(%d) to initialize VLAN config\n", ret); 3065 goto err_config; 3066 } 3067 3068 hdev->last_reset_time = jiffies; 3069 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 3070 HCLGEVF_DRIVER_NAME); 3071 3072 hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 3073 3074 return 0; 3075 3076 err_config: 3077 hclgevf_misc_irq_uninit(hdev); 3078 err_misc_irq_init: 3079 hclgevf_state_uninit(hdev); 3080 hclgevf_uninit_msi(hdev); 3081 err_cmd_init: 3082 hclgevf_cmd_uninit(hdev); 3083 err_cmd_queue_init: 3084 hclgevf_pci_uninit(hdev); 3085 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3086 return ret; 3087 } 3088 3089 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 3090 { 3091 struct hclge_vf_to_pf_msg send_msg; 3092 3093 hclgevf_state_uninit(hdev); 3094 3095 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 3096 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3097 3098 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3099 hclgevf_misc_irq_uninit(hdev); 3100 hclgevf_uninit_msi(hdev); 3101 } 3102 3103 hclgevf_pci_uninit(hdev); 3104 hclgevf_cmd_uninit(hdev); 3105 hclgevf_uninit_mac_list(hdev); 3106 } 3107 3108 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 3109 { 3110 struct pci_dev *pdev = ae_dev->pdev; 3111 int ret; 3112 3113 ret = hclgevf_alloc_hdev(ae_dev); 3114 if (ret) { 3115 dev_err(&pdev->dev, "hclge device allocation failed\n"); 3116 return ret; 3117 } 3118 3119 ret = hclgevf_init_hdev(ae_dev->priv); 3120 if (ret) { 3121 dev_err(&pdev->dev, "hclge device initialization failed\n"); 3122 return ret; 3123 } 3124 3125 return 0; 3126 } 3127 3128 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 3129 { 3130 struct hclgevf_dev *hdev = ae_dev->priv; 3131 3132 hclgevf_uninit_hdev(hdev); 3133 ae_dev->priv = NULL; 3134 } 3135 3136 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3137 { 3138 struct hnae3_handle *nic = &hdev->nic; 3139 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3140 3141 return min_t(u32, hdev->rss_size_max, 3142 hdev->num_tqps / kinfo->num_tc); 3143 } 3144 3145 /** 3146 * hclgevf_get_channels - Get the current channels enabled and max supported. 3147 * @handle: hardware information for network interface 3148 * @ch: ethtool channels structure 3149 * 3150 * We don't support separate tx and rx queues as channels. The other count 3151 * represents how many queues are being used for control. max_combined counts 3152 * how many queue pairs we can support. They may not be mapped 1 to 1 with 3153 * q_vectors since we support a lot more queue pairs than q_vectors. 3154 **/ 3155 static void hclgevf_get_channels(struct hnae3_handle *handle, 3156 struct ethtool_channels *ch) 3157 { 3158 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3159 3160 ch->max_combined = hclgevf_get_max_channels(hdev); 3161 ch->other_count = 0; 3162 ch->max_other = 0; 3163 ch->combined_count = handle->kinfo.rss_size; 3164 } 3165 3166 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 3167 u16 *alloc_tqps, u16 *max_rss_size) 3168 { 3169 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3170 3171 *alloc_tqps = hdev->num_tqps; 3172 *max_rss_size = hdev->rss_size_max; 3173 } 3174 3175 static void hclgevf_update_rss_size(struct hnae3_handle *handle, 3176 u32 new_tqps_num) 3177 { 3178 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3179 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3180 u16 max_rss_size; 3181 3182 kinfo->req_rss_size = new_tqps_num; 3183 3184 max_rss_size = min_t(u16, hdev->rss_size_max, 3185 hdev->num_tqps / kinfo->num_tc); 3186 3187 /* Use the user's configuration when it is not larger than 3188 * max_rss_size, otherwise, use the maximum specification value. 3189 */ 3190 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 3191 kinfo->req_rss_size <= max_rss_size) 3192 kinfo->rss_size = kinfo->req_rss_size; 3193 else if (kinfo->rss_size > max_rss_size || 3194 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 3195 kinfo->rss_size = max_rss_size; 3196 3197 kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; 3198 } 3199 3200 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 3201 bool rxfh_configured) 3202 { 3203 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3204 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3205 u16 cur_rss_size = kinfo->rss_size; 3206 u16 cur_tqps = kinfo->num_tqps; 3207 u32 *rss_indir; 3208 unsigned int i; 3209 int ret; 3210 3211 hclgevf_update_rss_size(handle, new_tqps_num); 3212 3213 ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); 3214 if (ret) 3215 return ret; 3216 3217 /* RSS indirection table has been configuared by user */ 3218 if (rxfh_configured) 3219 goto out; 3220 3221 /* Reinitializes the rss indirect table according to the new RSS size */ 3222 rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 3223 if (!rss_indir) 3224 return -ENOMEM; 3225 3226 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 3227 rss_indir[i] = i % kinfo->rss_size; 3228 3229 hdev->rss_cfg.rss_size = kinfo->rss_size; 3230 3231 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 3232 if (ret) 3233 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 3234 ret); 3235 3236 kfree(rss_indir); 3237 3238 out: 3239 if (!ret) 3240 dev_info(&hdev->pdev->dev, 3241 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 3242 cur_rss_size, kinfo->rss_size, 3243 cur_tqps, kinfo->rss_size * kinfo->num_tc); 3244 3245 return ret; 3246 } 3247 3248 static int hclgevf_get_status(struct hnae3_handle *handle) 3249 { 3250 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3251 3252 return hdev->hw.mac.link; 3253 } 3254 3255 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 3256 u8 *auto_neg, u32 *speed, 3257 u8 *duplex) 3258 { 3259 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3260 3261 if (speed) 3262 *speed = hdev->hw.mac.speed; 3263 if (duplex) 3264 *duplex = hdev->hw.mac.duplex; 3265 if (auto_neg) 3266 *auto_neg = AUTONEG_DISABLE; 3267 } 3268 3269 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 3270 u8 duplex) 3271 { 3272 hdev->hw.mac.speed = speed; 3273 hdev->hw.mac.duplex = duplex; 3274 } 3275 3276 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 3277 { 3278 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3279 3280 return hclgevf_config_gro(hdev, enable); 3281 } 3282 3283 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 3284 u8 *module_type) 3285 { 3286 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3287 3288 if (media_type) 3289 *media_type = hdev->hw.mac.media_type; 3290 3291 if (module_type) 3292 *module_type = hdev->hw.mac.module_type; 3293 } 3294 3295 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 3296 { 3297 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3298 3299 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 3300 } 3301 3302 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 3303 { 3304 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3305 3306 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 3307 } 3308 3309 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 3310 { 3311 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3312 3313 return hdev->rst_stats.hw_rst_done_cnt; 3314 } 3315 3316 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 3317 unsigned long *supported, 3318 unsigned long *advertising) 3319 { 3320 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3321 3322 *supported = hdev->hw.mac.supported; 3323 *advertising = hdev->hw.mac.advertising; 3324 } 3325 3326 #define MAX_SEPARATE_NUM 4 3327 #define SEPARATOR_VALUE 0xFFFFFFFF 3328 #define REG_NUM_PER_LINE 4 3329 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 3330 3331 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 3332 { 3333 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 3334 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3335 3336 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 3337 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 3338 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 3339 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 3340 3341 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 3342 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 3343 } 3344 3345 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 3346 void *data) 3347 { 3348 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3349 int i, j, reg_um, separator_num; 3350 u32 *reg = data; 3351 3352 *version = hdev->fw_version; 3353 3354 /* fetching per-VF registers values from VF PCIe register space */ 3355 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 3356 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3357 for (i = 0; i < reg_um; i++) 3358 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 3359 for (i = 0; i < separator_num; i++) 3360 *reg++ = SEPARATOR_VALUE; 3361 3362 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 3363 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3364 for (i = 0; i < reg_um; i++) 3365 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 3366 for (i = 0; i < separator_num; i++) 3367 *reg++ = SEPARATOR_VALUE; 3368 3369 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 3370 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3371 for (j = 0; j < hdev->num_tqps; j++) { 3372 for (i = 0; i < reg_um; i++) 3373 *reg++ = hclgevf_read_dev(&hdev->hw, 3374 ring_reg_addr_list[i] + 3375 0x200 * j); 3376 for (i = 0; i < separator_num; i++) 3377 *reg++ = SEPARATOR_VALUE; 3378 } 3379 3380 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 3381 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3382 for (j = 0; j < hdev->num_msi_used - 1; j++) { 3383 for (i = 0; i < reg_um; i++) 3384 *reg++ = hclgevf_read_dev(&hdev->hw, 3385 tqp_intr_reg_addr_list[i] + 3386 4 * j); 3387 for (i = 0; i < separator_num; i++) 3388 *reg++ = SEPARATOR_VALUE; 3389 } 3390 } 3391 3392 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 3393 u8 *port_base_vlan_info, u8 data_size) 3394 { 3395 struct hnae3_handle *nic = &hdev->nic; 3396 struct hclge_vf_to_pf_msg send_msg; 3397 3398 rtnl_lock(); 3399 hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3400 rtnl_unlock(); 3401 3402 /* send msg to PF and wait update port based vlan info */ 3403 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3404 HCLGE_MBX_PORT_BASE_VLAN_CFG); 3405 memcpy(send_msg.data, port_base_vlan_info, data_size); 3406 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3407 3408 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3409 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; 3410 else 3411 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3412 3413 rtnl_lock(); 3414 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 3415 rtnl_unlock(); 3416 } 3417 3418 static const struct hnae3_ae_ops hclgevf_ops = { 3419 .init_ae_dev = hclgevf_init_ae_dev, 3420 .uninit_ae_dev = hclgevf_uninit_ae_dev, 3421 .flr_prepare = hclgevf_flr_prepare, 3422 .flr_done = hclgevf_flr_done, 3423 .init_client_instance = hclgevf_init_client_instance, 3424 .uninit_client_instance = hclgevf_uninit_client_instance, 3425 .start = hclgevf_ae_start, 3426 .stop = hclgevf_ae_stop, 3427 .client_start = hclgevf_client_start, 3428 .client_stop = hclgevf_client_stop, 3429 .map_ring_to_vector = hclgevf_map_ring_to_vector, 3430 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3431 .get_vector = hclgevf_get_vector, 3432 .put_vector = hclgevf_put_vector, 3433 .reset_queue = hclgevf_reset_tqp, 3434 .get_mac_addr = hclgevf_get_mac_addr, 3435 .set_mac_addr = hclgevf_set_mac_addr, 3436 .add_uc_addr = hclgevf_add_uc_addr, 3437 .rm_uc_addr = hclgevf_rm_uc_addr, 3438 .add_mc_addr = hclgevf_add_mc_addr, 3439 .rm_mc_addr = hclgevf_rm_mc_addr, 3440 .get_stats = hclgevf_get_stats, 3441 .update_stats = hclgevf_update_stats, 3442 .get_strings = hclgevf_get_strings, 3443 .get_sset_count = hclgevf_get_sset_count, 3444 .get_rss_key_size = hclgevf_get_rss_key_size, 3445 .get_rss_indir_size = hclgevf_get_rss_indir_size, 3446 .get_rss = hclgevf_get_rss, 3447 .set_rss = hclgevf_set_rss, 3448 .get_rss_tuple = hclgevf_get_rss_tuple, 3449 .set_rss_tuple = hclgevf_set_rss_tuple, 3450 .get_tc_size = hclgevf_get_tc_size, 3451 .get_fw_version = hclgevf_get_fw_version, 3452 .set_vlan_filter = hclgevf_set_vlan_filter, 3453 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 3454 .reset_event = hclgevf_reset_event, 3455 .set_default_reset_request = hclgevf_set_def_reset_request, 3456 .set_channels = hclgevf_set_channels, 3457 .get_channels = hclgevf_get_channels, 3458 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 3459 .get_regs_len = hclgevf_get_regs_len, 3460 .get_regs = hclgevf_get_regs, 3461 .get_status = hclgevf_get_status, 3462 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3463 .get_media_type = hclgevf_get_media_type, 3464 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 3465 .ae_dev_resetting = hclgevf_ae_dev_resetting, 3466 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 3467 .set_gro_en = hclgevf_gro_en, 3468 .set_mtu = hclgevf_set_mtu, 3469 .get_global_queue_id = hclgevf_get_qid_global, 3470 .set_timer_task = hclgevf_set_timer_task, 3471 .get_link_mode = hclgevf_get_link_mode, 3472 .set_promisc_mode = hclgevf_set_promisc_mode, 3473 }; 3474 3475 static struct hnae3_ae_algo ae_algovf = { 3476 .ops = &hclgevf_ops, 3477 .pdev_id_table = ae_algovf_pci_tbl, 3478 }; 3479 3480 static int hclgevf_init(void) 3481 { 3482 pr_info("%s is initializing\n", HCLGEVF_NAME); 3483 3484 hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME); 3485 if (!hclgevf_wq) { 3486 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 3487 return -ENOMEM; 3488 } 3489 3490 hnae3_register_ae_algo(&ae_algovf); 3491 3492 return 0; 3493 } 3494 3495 static void hclgevf_exit(void) 3496 { 3497 hnae3_unregister_ae_algo(&ae_algovf); 3498 destroy_workqueue(hclgevf_wq); 3499 } 3500 module_init(hclgevf_init); 3501 module_exit(hclgevf_exit); 3502 3503 MODULE_LICENSE("GPL"); 3504 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3505 MODULE_DESCRIPTION("HCLGEVF Driver"); 3506 MODULE_VERSION(HCLGEVF_MOD_VERSION); 3507