1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 #define HCLGEVF_RESET_MAX_FAIL_CNT 5 15 16 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 17 static struct hnae3_ae_algo ae_algovf; 18 19 static struct workqueue_struct *hclgevf_wq; 20 21 static const struct pci_device_id ae_algovf_pci_tbl[] = { 22 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 23 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 24 /* required last entry */ 25 {0, } 26 }; 27 28 static const u8 hclgevf_hash_key[] = { 29 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 30 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 31 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 32 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 33 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 34 }; 35 36 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 37 38 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 39 HCLGEVF_CMDQ_TX_ADDR_H_REG, 40 HCLGEVF_CMDQ_TX_DEPTH_REG, 41 HCLGEVF_CMDQ_TX_TAIL_REG, 42 HCLGEVF_CMDQ_TX_HEAD_REG, 43 HCLGEVF_CMDQ_RX_ADDR_L_REG, 44 HCLGEVF_CMDQ_RX_ADDR_H_REG, 45 HCLGEVF_CMDQ_RX_DEPTH_REG, 46 HCLGEVF_CMDQ_RX_TAIL_REG, 47 HCLGEVF_CMDQ_RX_HEAD_REG, 48 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 49 HCLGEVF_CMDQ_INTR_STS_REG, 50 HCLGEVF_CMDQ_INTR_EN_REG, 51 HCLGEVF_CMDQ_INTR_GEN_REG}; 52 53 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 54 HCLGEVF_RST_ING, 55 HCLGEVF_GRO_EN_REG}; 56 57 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 58 HCLGEVF_RING_RX_ADDR_H_REG, 59 HCLGEVF_RING_RX_BD_NUM_REG, 60 HCLGEVF_RING_RX_BD_LENGTH_REG, 61 HCLGEVF_RING_RX_MERGE_EN_REG, 62 HCLGEVF_RING_RX_TAIL_REG, 63 HCLGEVF_RING_RX_HEAD_REG, 64 HCLGEVF_RING_RX_FBD_NUM_REG, 65 HCLGEVF_RING_RX_OFFSET_REG, 66 HCLGEVF_RING_RX_FBD_OFFSET_REG, 67 HCLGEVF_RING_RX_STASH_REG, 68 HCLGEVF_RING_RX_BD_ERR_REG, 69 HCLGEVF_RING_TX_ADDR_L_REG, 70 HCLGEVF_RING_TX_ADDR_H_REG, 71 HCLGEVF_RING_TX_BD_NUM_REG, 72 HCLGEVF_RING_TX_PRIORITY_REG, 73 HCLGEVF_RING_TX_TC_REG, 74 HCLGEVF_RING_TX_MERGE_EN_REG, 75 HCLGEVF_RING_TX_TAIL_REG, 76 HCLGEVF_RING_TX_HEAD_REG, 77 HCLGEVF_RING_TX_FBD_NUM_REG, 78 HCLGEVF_RING_TX_OFFSET_REG, 79 HCLGEVF_RING_TX_EBD_NUM_REG, 80 HCLGEVF_RING_TX_EBD_OFFSET_REG, 81 HCLGEVF_RING_TX_BD_ERR_REG, 82 HCLGEVF_RING_EN_REG}; 83 84 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 85 HCLGEVF_TQP_INTR_GL0_REG, 86 HCLGEVF_TQP_INTR_GL1_REG, 87 HCLGEVF_TQP_INTR_GL2_REG, 88 HCLGEVF_TQP_INTR_RL_REG}; 89 90 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 91 { 92 if (!handle->client) 93 return container_of(handle, struct hclgevf_dev, nic); 94 else if (handle->client->type == HNAE3_CLIENT_ROCE) 95 return container_of(handle, struct hclgevf_dev, roce); 96 else 97 return container_of(handle, struct hclgevf_dev, nic); 98 } 99 100 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 101 { 102 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 103 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 104 struct hclgevf_desc desc; 105 struct hclgevf_tqp *tqp; 106 int status; 107 int i; 108 109 for (i = 0; i < kinfo->num_tqps; i++) { 110 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 111 hclgevf_cmd_setup_basic_desc(&desc, 112 HCLGEVF_OPC_QUERY_RX_STATUS, 113 true); 114 115 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 116 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 117 if (status) { 118 dev_err(&hdev->pdev->dev, 119 "Query tqp stat fail, status = %d,queue = %d\n", 120 status, i); 121 return status; 122 } 123 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 124 le32_to_cpu(desc.data[1]); 125 126 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 127 true); 128 129 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 130 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 131 if (status) { 132 dev_err(&hdev->pdev->dev, 133 "Query tqp stat fail, status = %d,queue = %d\n", 134 status, i); 135 return status; 136 } 137 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 138 le32_to_cpu(desc.data[1]); 139 } 140 141 return 0; 142 } 143 144 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 145 { 146 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 147 struct hclgevf_tqp *tqp; 148 u64 *buff = data; 149 int i; 150 151 for (i = 0; i < kinfo->num_tqps; i++) { 152 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 153 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 154 } 155 for (i = 0; i < kinfo->num_tqps; i++) { 156 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 157 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 158 } 159 160 return buff; 161 } 162 163 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 164 { 165 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 166 167 return kinfo->num_tqps * 2; 168 } 169 170 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 171 { 172 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 173 u8 *buff = data; 174 int i = 0; 175 176 for (i = 0; i < kinfo->num_tqps; i++) { 177 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 178 struct hclgevf_tqp, q); 179 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 180 tqp->index); 181 buff += ETH_GSTRING_LEN; 182 } 183 184 for (i = 0; i < kinfo->num_tqps; i++) { 185 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 186 struct hclgevf_tqp, q); 187 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 188 tqp->index); 189 buff += ETH_GSTRING_LEN; 190 } 191 192 return buff; 193 } 194 195 static void hclgevf_update_stats(struct hnae3_handle *handle, 196 struct net_device_stats *net_stats) 197 { 198 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 199 int status; 200 201 status = hclgevf_tqps_update_stats(handle); 202 if (status) 203 dev_err(&hdev->pdev->dev, 204 "VF update of TQPS stats fail, status = %d.\n", 205 status); 206 } 207 208 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 209 { 210 if (strset == ETH_SS_TEST) 211 return -EOPNOTSUPP; 212 else if (strset == ETH_SS_STATS) 213 return hclgevf_tqps_get_sset_count(handle, strset); 214 215 return 0; 216 } 217 218 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 219 u8 *data) 220 { 221 u8 *p = (char *)data; 222 223 if (strset == ETH_SS_STATS) 224 p = hclgevf_tqps_get_strings(handle, p); 225 } 226 227 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 228 { 229 hclgevf_tqps_get_stats(handle, data); 230 } 231 232 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 233 u8 subcode) 234 { 235 if (msg) { 236 memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 237 msg->code = code; 238 msg->subcode = subcode; 239 } 240 } 241 242 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 243 { 244 struct hclge_vf_to_pf_msg send_msg; 245 u8 resp_msg; 246 int status; 247 248 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0); 249 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 250 sizeof(resp_msg)); 251 if (status) { 252 dev_err(&hdev->pdev->dev, 253 "VF request to get TC info from PF failed %d", 254 status); 255 return status; 256 } 257 258 hdev->hw_tc_map = resp_msg; 259 260 return 0; 261 } 262 263 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 264 { 265 struct hnae3_handle *nic = &hdev->nic; 266 struct hclge_vf_to_pf_msg send_msg; 267 u8 resp_msg; 268 int ret; 269 270 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 271 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 272 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 273 sizeof(u8)); 274 if (ret) { 275 dev_err(&hdev->pdev->dev, 276 "VF request to get port based vlan state failed %d", 277 ret); 278 return ret; 279 } 280 281 nic->port_base_vlan_state = resp_msg; 282 283 return 0; 284 } 285 286 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 287 { 288 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 289 #define HCLGEVF_TQPS_ALLOC_OFFSET 0 290 #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 291 #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 292 293 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 294 struct hclge_vf_to_pf_msg send_msg; 295 int status; 296 297 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 298 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 299 HCLGEVF_TQPS_RSS_INFO_LEN); 300 if (status) { 301 dev_err(&hdev->pdev->dev, 302 "VF request to get tqp info from PF failed %d", 303 status); 304 return status; 305 } 306 307 memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], 308 sizeof(u16)); 309 memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], 310 sizeof(u16)); 311 memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], 312 sizeof(u16)); 313 314 return 0; 315 } 316 317 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 318 { 319 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 320 #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 321 #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 322 323 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 324 struct hclge_vf_to_pf_msg send_msg; 325 int ret; 326 327 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 328 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 329 HCLGEVF_TQPS_DEPTH_INFO_LEN); 330 if (ret) { 331 dev_err(&hdev->pdev->dev, 332 "VF request to get tqp depth info from PF failed %d", 333 ret); 334 return ret; 335 } 336 337 memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], 338 sizeof(u16)); 339 memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], 340 sizeof(u16)); 341 342 return 0; 343 } 344 345 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 346 { 347 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 348 struct hclge_vf_to_pf_msg send_msg; 349 u16 qid_in_pf = 0; 350 u8 resp_data[2]; 351 int ret; 352 353 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 354 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 355 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 356 sizeof(resp_data)); 357 if (!ret) 358 qid_in_pf = *(u16 *)resp_data; 359 360 return qid_in_pf; 361 } 362 363 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 364 { 365 struct hclge_vf_to_pf_msg send_msg; 366 u8 resp_msg[2]; 367 int ret; 368 369 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 370 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 371 sizeof(resp_msg)); 372 if (ret) { 373 dev_err(&hdev->pdev->dev, 374 "VF request to get the pf port media type failed %d", 375 ret); 376 return ret; 377 } 378 379 hdev->hw.mac.media_type = resp_msg[0]; 380 hdev->hw.mac.module_type = resp_msg[1]; 381 382 return 0; 383 } 384 385 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 386 { 387 struct hclgevf_tqp *tqp; 388 int i; 389 390 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 391 sizeof(struct hclgevf_tqp), GFP_KERNEL); 392 if (!hdev->htqp) 393 return -ENOMEM; 394 395 tqp = hdev->htqp; 396 397 for (i = 0; i < hdev->num_tqps; i++) { 398 tqp->dev = &hdev->pdev->dev; 399 tqp->index = i; 400 401 tqp->q.ae_algo = &ae_algovf; 402 tqp->q.buf_size = hdev->rx_buf_len; 403 tqp->q.tx_desc_num = hdev->num_tx_desc; 404 tqp->q.rx_desc_num = hdev->num_rx_desc; 405 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 406 i * HCLGEVF_TQP_REG_SIZE; 407 408 tqp++; 409 } 410 411 return 0; 412 } 413 414 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 415 { 416 struct hnae3_handle *nic = &hdev->nic; 417 struct hnae3_knic_private_info *kinfo; 418 u16 new_tqps = hdev->num_tqps; 419 unsigned int i; 420 421 kinfo = &nic->kinfo; 422 kinfo->num_tc = 0; 423 kinfo->num_tx_desc = hdev->num_tx_desc; 424 kinfo->num_rx_desc = hdev->num_rx_desc; 425 kinfo->rx_buf_len = hdev->rx_buf_len; 426 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 427 if (hdev->hw_tc_map & BIT(i)) 428 kinfo->num_tc++; 429 430 kinfo->rss_size 431 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 432 new_tqps = kinfo->rss_size * kinfo->num_tc; 433 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 434 435 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 436 sizeof(struct hnae3_queue *), GFP_KERNEL); 437 if (!kinfo->tqp) 438 return -ENOMEM; 439 440 for (i = 0; i < kinfo->num_tqps; i++) { 441 hdev->htqp[i].q.handle = &hdev->nic; 442 hdev->htqp[i].q.tqp_index = i; 443 kinfo->tqp[i] = &hdev->htqp[i].q; 444 } 445 446 /* after init the max rss_size and tqps, adjust the default tqp numbers 447 * and rss size with the actual vector numbers 448 */ 449 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 450 kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc, 451 kinfo->rss_size); 452 453 return 0; 454 } 455 456 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 457 { 458 struct hclge_vf_to_pf_msg send_msg; 459 int status; 460 461 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 462 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 463 if (status) 464 dev_err(&hdev->pdev->dev, 465 "VF failed to fetch link status(%d) from PF", status); 466 } 467 468 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 469 { 470 struct hnae3_handle *rhandle = &hdev->roce; 471 struct hnae3_handle *handle = &hdev->nic; 472 struct hnae3_client *rclient; 473 struct hnae3_client *client; 474 475 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 476 return; 477 478 client = handle->client; 479 rclient = hdev->roce_client; 480 481 link_state = 482 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 483 484 if (link_state != hdev->hw.mac.link) { 485 client->ops->link_status_change(handle, !!link_state); 486 if (rclient && rclient->ops->link_status_change) 487 rclient->ops->link_status_change(rhandle, !!link_state); 488 hdev->hw.mac.link = link_state; 489 } 490 491 clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 492 } 493 494 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 495 { 496 #define HCLGEVF_ADVERTISING 0 497 #define HCLGEVF_SUPPORTED 1 498 499 struct hclge_vf_to_pf_msg send_msg; 500 501 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 502 send_msg.data[0] = HCLGEVF_ADVERTISING; 503 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 504 send_msg.data[0] = HCLGEVF_SUPPORTED; 505 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 506 } 507 508 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 509 { 510 struct hnae3_handle *nic = &hdev->nic; 511 int ret; 512 513 nic->ae_algo = &ae_algovf; 514 nic->pdev = hdev->pdev; 515 nic->numa_node_mask = hdev->numa_node_mask; 516 nic->flags |= HNAE3_SUPPORT_VF; 517 518 ret = hclgevf_knic_setup(hdev); 519 if (ret) 520 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 521 ret); 522 return ret; 523 } 524 525 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 526 { 527 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 528 dev_warn(&hdev->pdev->dev, 529 "vector(vector_id %d) has been freed.\n", vector_id); 530 return; 531 } 532 533 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 534 hdev->num_msi_left += 1; 535 hdev->num_msi_used -= 1; 536 } 537 538 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 539 struct hnae3_vector_info *vector_info) 540 { 541 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 542 struct hnae3_vector_info *vector = vector_info; 543 int alloc = 0; 544 int i, j; 545 546 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 547 vector_num = min(hdev->num_msi_left, vector_num); 548 549 for (j = 0; j < vector_num; j++) { 550 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 551 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 552 vector->vector = pci_irq_vector(hdev->pdev, i); 553 vector->io_addr = hdev->hw.io_base + 554 HCLGEVF_VECTOR_REG_BASE + 555 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 556 hdev->vector_status[i] = 0; 557 hdev->vector_irq[i] = vector->vector; 558 559 vector++; 560 alloc++; 561 562 break; 563 } 564 } 565 } 566 hdev->num_msi_left -= alloc; 567 hdev->num_msi_used += alloc; 568 569 return alloc; 570 } 571 572 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 573 { 574 int i; 575 576 for (i = 0; i < hdev->num_msi; i++) 577 if (vector == hdev->vector_irq[i]) 578 return i; 579 580 return -EINVAL; 581 } 582 583 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 584 const u8 hfunc, const u8 *key) 585 { 586 struct hclgevf_rss_config_cmd *req; 587 unsigned int key_offset = 0; 588 struct hclgevf_desc desc; 589 int key_counts; 590 int key_size; 591 int ret; 592 593 key_counts = HCLGEVF_RSS_KEY_SIZE; 594 req = (struct hclgevf_rss_config_cmd *)desc.data; 595 596 while (key_counts) { 597 hclgevf_cmd_setup_basic_desc(&desc, 598 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 599 false); 600 601 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 602 req->hash_config |= 603 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 604 605 key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 606 memcpy(req->hash_key, 607 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 608 609 key_counts -= key_size; 610 key_offset++; 611 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 612 if (ret) { 613 dev_err(&hdev->pdev->dev, 614 "Configure RSS config fail, status = %d\n", 615 ret); 616 return ret; 617 } 618 } 619 620 return 0; 621 } 622 623 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 624 { 625 return HCLGEVF_RSS_KEY_SIZE; 626 } 627 628 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 629 { 630 return HCLGEVF_RSS_IND_TBL_SIZE; 631 } 632 633 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 634 { 635 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 636 struct hclgevf_rss_indirection_table_cmd *req; 637 struct hclgevf_desc desc; 638 int status; 639 int i, j; 640 641 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 642 643 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 644 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 645 false); 646 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 647 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 648 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 649 req->rss_result[j] = 650 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 651 652 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 653 if (status) { 654 dev_err(&hdev->pdev->dev, 655 "VF failed(=%d) to set RSS indirection table\n", 656 status); 657 return status; 658 } 659 } 660 661 return 0; 662 } 663 664 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 665 { 666 struct hclgevf_rss_tc_mode_cmd *req; 667 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 668 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 669 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 670 struct hclgevf_desc desc; 671 u16 roundup_size; 672 int status; 673 unsigned int i; 674 675 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 676 677 roundup_size = roundup_pow_of_two(rss_size); 678 roundup_size = ilog2(roundup_size); 679 680 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 681 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 682 tc_size[i] = roundup_size; 683 tc_offset[i] = rss_size * i; 684 } 685 686 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 687 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 688 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 689 (tc_valid[i] & 0x1)); 690 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 691 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 692 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 693 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 694 } 695 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 696 if (status) 697 dev_err(&hdev->pdev->dev, 698 "VF failed(=%d) to set rss tc mode\n", status); 699 700 return status; 701 } 702 703 /* for revision 0x20, vf shared the same rss config with pf */ 704 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 705 { 706 #define HCLGEVF_RSS_MBX_RESP_LEN 8 707 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 708 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 709 struct hclge_vf_to_pf_msg send_msg; 710 u16 msg_num, hash_key_index; 711 u8 index; 712 int ret; 713 714 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 715 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 716 HCLGEVF_RSS_MBX_RESP_LEN; 717 for (index = 0; index < msg_num; index++) { 718 send_msg.data[0] = index; 719 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 720 HCLGEVF_RSS_MBX_RESP_LEN); 721 if (ret) { 722 dev_err(&hdev->pdev->dev, 723 "VF get rss hash key from PF failed, ret=%d", 724 ret); 725 return ret; 726 } 727 728 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 729 if (index == msg_num - 1) 730 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 731 &resp_msg[0], 732 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 733 else 734 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 735 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 736 } 737 738 return 0; 739 } 740 741 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 742 u8 *hfunc) 743 { 744 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 745 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 746 int i, ret; 747 748 if (handle->pdev->revision >= 0x21) { 749 /* Get hash algorithm */ 750 if (hfunc) { 751 switch (rss_cfg->hash_algo) { 752 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 753 *hfunc = ETH_RSS_HASH_TOP; 754 break; 755 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 756 *hfunc = ETH_RSS_HASH_XOR; 757 break; 758 default: 759 *hfunc = ETH_RSS_HASH_UNKNOWN; 760 break; 761 } 762 } 763 764 /* Get the RSS Key required by the user */ 765 if (key) 766 memcpy(key, rss_cfg->rss_hash_key, 767 HCLGEVF_RSS_KEY_SIZE); 768 } else { 769 if (hfunc) 770 *hfunc = ETH_RSS_HASH_TOP; 771 if (key) { 772 ret = hclgevf_get_rss_hash_key(hdev); 773 if (ret) 774 return ret; 775 memcpy(key, rss_cfg->rss_hash_key, 776 HCLGEVF_RSS_KEY_SIZE); 777 } 778 } 779 780 if (indir) 781 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 782 indir[i] = rss_cfg->rss_indirection_tbl[i]; 783 784 return 0; 785 } 786 787 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 788 const u8 *key, const u8 hfunc) 789 { 790 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 791 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 792 int ret, i; 793 794 if (handle->pdev->revision >= 0x21) { 795 /* Set the RSS Hash Key if specififed by the user */ 796 if (key) { 797 switch (hfunc) { 798 case ETH_RSS_HASH_TOP: 799 rss_cfg->hash_algo = 800 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 801 break; 802 case ETH_RSS_HASH_XOR: 803 rss_cfg->hash_algo = 804 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 805 break; 806 case ETH_RSS_HASH_NO_CHANGE: 807 break; 808 default: 809 return -EINVAL; 810 } 811 812 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 813 key); 814 if (ret) 815 return ret; 816 817 /* Update the shadow RSS key with user specified qids */ 818 memcpy(rss_cfg->rss_hash_key, key, 819 HCLGEVF_RSS_KEY_SIZE); 820 } 821 } 822 823 /* update the shadow RSS table with user specified qids */ 824 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 825 rss_cfg->rss_indirection_tbl[i] = indir[i]; 826 827 /* update the hardware */ 828 return hclgevf_set_rss_indir_table(hdev); 829 } 830 831 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 832 { 833 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 834 835 if (nfc->data & RXH_L4_B_2_3) 836 hash_sets |= HCLGEVF_D_PORT_BIT; 837 else 838 hash_sets &= ~HCLGEVF_D_PORT_BIT; 839 840 if (nfc->data & RXH_IP_SRC) 841 hash_sets |= HCLGEVF_S_IP_BIT; 842 else 843 hash_sets &= ~HCLGEVF_S_IP_BIT; 844 845 if (nfc->data & RXH_IP_DST) 846 hash_sets |= HCLGEVF_D_IP_BIT; 847 else 848 hash_sets &= ~HCLGEVF_D_IP_BIT; 849 850 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 851 hash_sets |= HCLGEVF_V_TAG_BIT; 852 853 return hash_sets; 854 } 855 856 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 857 struct ethtool_rxnfc *nfc) 858 { 859 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 860 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 861 struct hclgevf_rss_input_tuple_cmd *req; 862 struct hclgevf_desc desc; 863 u8 tuple_sets; 864 int ret; 865 866 if (handle->pdev->revision == 0x20) 867 return -EOPNOTSUPP; 868 869 if (nfc->data & 870 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 871 return -EINVAL; 872 873 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 874 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 875 876 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 877 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 878 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 879 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 880 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 881 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 882 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 883 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 884 885 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 886 switch (nfc->flow_type) { 887 case TCP_V4_FLOW: 888 req->ipv4_tcp_en = tuple_sets; 889 break; 890 case TCP_V6_FLOW: 891 req->ipv6_tcp_en = tuple_sets; 892 break; 893 case UDP_V4_FLOW: 894 req->ipv4_udp_en = tuple_sets; 895 break; 896 case UDP_V6_FLOW: 897 req->ipv6_udp_en = tuple_sets; 898 break; 899 case SCTP_V4_FLOW: 900 req->ipv4_sctp_en = tuple_sets; 901 break; 902 case SCTP_V6_FLOW: 903 if ((nfc->data & RXH_L4_B_0_1) || 904 (nfc->data & RXH_L4_B_2_3)) 905 return -EINVAL; 906 907 req->ipv6_sctp_en = tuple_sets; 908 break; 909 case IPV4_FLOW: 910 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 911 break; 912 case IPV6_FLOW: 913 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 914 break; 915 default: 916 return -EINVAL; 917 } 918 919 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 920 if (ret) { 921 dev_err(&hdev->pdev->dev, 922 "Set rss tuple fail, status = %d\n", ret); 923 return ret; 924 } 925 926 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 927 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 928 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 929 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 930 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 931 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 932 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 933 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 934 return 0; 935 } 936 937 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 938 struct ethtool_rxnfc *nfc) 939 { 940 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 941 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 942 u8 tuple_sets; 943 944 if (handle->pdev->revision == 0x20) 945 return -EOPNOTSUPP; 946 947 nfc->data = 0; 948 949 switch (nfc->flow_type) { 950 case TCP_V4_FLOW: 951 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 952 break; 953 case UDP_V4_FLOW: 954 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 955 break; 956 case TCP_V6_FLOW: 957 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 958 break; 959 case UDP_V6_FLOW: 960 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 961 break; 962 case SCTP_V4_FLOW: 963 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 964 break; 965 case SCTP_V6_FLOW: 966 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 967 break; 968 case IPV4_FLOW: 969 case IPV6_FLOW: 970 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 971 break; 972 default: 973 return -EINVAL; 974 } 975 976 if (!tuple_sets) 977 return 0; 978 979 if (tuple_sets & HCLGEVF_D_PORT_BIT) 980 nfc->data |= RXH_L4_B_2_3; 981 if (tuple_sets & HCLGEVF_S_PORT_BIT) 982 nfc->data |= RXH_L4_B_0_1; 983 if (tuple_sets & HCLGEVF_D_IP_BIT) 984 nfc->data |= RXH_IP_DST; 985 if (tuple_sets & HCLGEVF_S_IP_BIT) 986 nfc->data |= RXH_IP_SRC; 987 988 return 0; 989 } 990 991 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 992 struct hclgevf_rss_cfg *rss_cfg) 993 { 994 struct hclgevf_rss_input_tuple_cmd *req; 995 struct hclgevf_desc desc; 996 int ret; 997 998 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 999 1000 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 1001 1002 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 1003 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 1004 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 1005 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 1006 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 1007 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 1008 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 1009 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 1010 1011 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1012 if (ret) 1013 dev_err(&hdev->pdev->dev, 1014 "Configure rss input fail, status = %d\n", ret); 1015 return ret; 1016 } 1017 1018 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 1019 { 1020 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1021 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1022 1023 return rss_cfg->rss_size; 1024 } 1025 1026 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 1027 int vector_id, 1028 struct hnae3_ring_chain_node *ring_chain) 1029 { 1030 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1031 struct hclge_vf_to_pf_msg send_msg; 1032 struct hnae3_ring_chain_node *node; 1033 int status; 1034 int i = 0; 1035 1036 memset(&send_msg, 0, sizeof(send_msg)); 1037 send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 1038 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1039 send_msg.vector_id = vector_id; 1040 1041 for (node = ring_chain; node; node = node->next) { 1042 send_msg.param[i].ring_type = 1043 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1044 1045 send_msg.param[i].tqp_index = node->tqp_index; 1046 send_msg.param[i].int_gl_index = 1047 hnae3_get_field(node->int_gl_idx, 1048 HNAE3_RING_GL_IDX_M, 1049 HNAE3_RING_GL_IDX_S); 1050 1051 i++; 1052 if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 1053 send_msg.ring_num = i; 1054 1055 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 1056 NULL, 0); 1057 if (status) { 1058 dev_err(&hdev->pdev->dev, 1059 "Map TQP fail, status is %d.\n", 1060 status); 1061 return status; 1062 } 1063 i = 0; 1064 } 1065 } 1066 1067 return 0; 1068 } 1069 1070 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1071 struct hnae3_ring_chain_node *ring_chain) 1072 { 1073 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1074 int vector_id; 1075 1076 vector_id = hclgevf_get_vector_index(hdev, vector); 1077 if (vector_id < 0) { 1078 dev_err(&handle->pdev->dev, 1079 "Get vector index fail. ret =%d\n", vector_id); 1080 return vector_id; 1081 } 1082 1083 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1084 } 1085 1086 static int hclgevf_unmap_ring_from_vector( 1087 struct hnae3_handle *handle, 1088 int vector, 1089 struct hnae3_ring_chain_node *ring_chain) 1090 { 1091 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1092 int ret, vector_id; 1093 1094 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1095 return 0; 1096 1097 vector_id = hclgevf_get_vector_index(hdev, vector); 1098 if (vector_id < 0) { 1099 dev_err(&handle->pdev->dev, 1100 "Get vector index fail. ret =%d\n", vector_id); 1101 return vector_id; 1102 } 1103 1104 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1105 if (ret) 1106 dev_err(&handle->pdev->dev, 1107 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1108 vector_id, 1109 ret); 1110 1111 return ret; 1112 } 1113 1114 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1115 { 1116 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1117 int vector_id; 1118 1119 vector_id = hclgevf_get_vector_index(hdev, vector); 1120 if (vector_id < 0) { 1121 dev_err(&handle->pdev->dev, 1122 "hclgevf_put_vector get vector index fail. ret =%d\n", 1123 vector_id); 1124 return vector_id; 1125 } 1126 1127 hclgevf_free_vector(hdev, vector_id); 1128 1129 return 0; 1130 } 1131 1132 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1133 bool en_uc_pmc, bool en_mc_pmc, 1134 bool en_bc_pmc) 1135 { 1136 struct hclge_vf_to_pf_msg send_msg; 1137 int ret; 1138 1139 memset(&send_msg, 0, sizeof(send_msg)); 1140 send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 1141 send_msg.en_bc = en_bc_pmc ? 1 : 0; 1142 send_msg.en_uc = en_uc_pmc ? 1 : 0; 1143 send_msg.en_mc = en_mc_pmc ? 1 : 0; 1144 1145 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1146 1147 if (ret) 1148 dev_err(&hdev->pdev->dev, 1149 "Set promisc mode fail, status is %d.\n", ret); 1150 1151 return ret; 1152 } 1153 1154 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 1155 bool en_mc_pmc) 1156 { 1157 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1158 struct pci_dev *pdev = hdev->pdev; 1159 bool en_bc_pmc; 1160 1161 en_bc_pmc = pdev->revision != 0x20; 1162 1163 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 1164 en_bc_pmc); 1165 } 1166 1167 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) 1168 { 1169 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1170 1171 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1172 } 1173 1174 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) 1175 { 1176 struct hnae3_handle *handle = &hdev->nic; 1177 bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; 1178 bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; 1179 int ret; 1180 1181 if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { 1182 ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); 1183 if (!ret) 1184 clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1185 } 1186 } 1187 1188 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, 1189 int stream_id, bool enable) 1190 { 1191 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1192 struct hclgevf_desc desc; 1193 int status; 1194 1195 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1196 1197 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1198 false); 1199 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1200 req->stream_id = cpu_to_le16(stream_id); 1201 if (enable) 1202 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1203 1204 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1205 if (status) 1206 dev_err(&hdev->pdev->dev, 1207 "TQP enable fail, status =%d.\n", status); 1208 1209 return status; 1210 } 1211 1212 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1213 { 1214 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1215 struct hclgevf_tqp *tqp; 1216 int i; 1217 1218 for (i = 0; i < kinfo->num_tqps; i++) { 1219 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1220 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1221 } 1222 } 1223 1224 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 1225 { 1226 struct hclge_vf_to_pf_msg send_msg; 1227 u8 host_mac[ETH_ALEN]; 1228 int status; 1229 1230 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 1231 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 1232 ETH_ALEN); 1233 if (status) { 1234 dev_err(&hdev->pdev->dev, 1235 "fail to get VF MAC from host %d", status); 1236 return status; 1237 } 1238 1239 ether_addr_copy(p, host_mac); 1240 1241 return 0; 1242 } 1243 1244 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1245 { 1246 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1247 u8 host_mac_addr[ETH_ALEN]; 1248 1249 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 1250 return; 1251 1252 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 1253 if (hdev->has_pf_mac) 1254 ether_addr_copy(p, host_mac_addr); 1255 else 1256 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1257 } 1258 1259 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1260 bool is_first) 1261 { 1262 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1263 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1264 struct hclge_vf_to_pf_msg send_msg; 1265 u8 *new_mac_addr = (u8 *)p; 1266 int status; 1267 1268 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 1269 send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1270 ether_addr_copy(send_msg.data, new_mac_addr); 1271 if (is_first && !hdev->has_pf_mac) 1272 eth_zero_addr(&send_msg.data[ETH_ALEN]); 1273 else 1274 ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 1275 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1276 if (!status) 1277 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1278 1279 return status; 1280 } 1281 1282 static struct hclgevf_mac_addr_node * 1283 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) 1284 { 1285 struct hclgevf_mac_addr_node *mac_node, *tmp; 1286 1287 list_for_each_entry_safe(mac_node, tmp, list, node) 1288 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 1289 return mac_node; 1290 1291 return NULL; 1292 } 1293 1294 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, 1295 enum HCLGEVF_MAC_NODE_STATE state) 1296 { 1297 switch (state) { 1298 /* from set_rx_mode or tmp_add_list */ 1299 case HCLGEVF_MAC_TO_ADD: 1300 if (mac_node->state == HCLGEVF_MAC_TO_DEL) 1301 mac_node->state = HCLGEVF_MAC_ACTIVE; 1302 break; 1303 /* only from set_rx_mode */ 1304 case HCLGEVF_MAC_TO_DEL: 1305 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1306 list_del(&mac_node->node); 1307 kfree(mac_node); 1308 } else { 1309 mac_node->state = HCLGEVF_MAC_TO_DEL; 1310 } 1311 break; 1312 /* only from tmp_add_list, the mac_node->state won't be 1313 * HCLGEVF_MAC_ACTIVE 1314 */ 1315 case HCLGEVF_MAC_ACTIVE: 1316 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1317 mac_node->state = HCLGEVF_MAC_ACTIVE; 1318 break; 1319 } 1320 } 1321 1322 static int hclgevf_update_mac_list(struct hnae3_handle *handle, 1323 enum HCLGEVF_MAC_NODE_STATE state, 1324 enum HCLGEVF_MAC_ADDR_TYPE mac_type, 1325 const unsigned char *addr) 1326 { 1327 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1328 struct hclgevf_mac_addr_node *mac_node; 1329 struct list_head *list; 1330 1331 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1332 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1333 1334 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1335 1336 /* if the mac addr is already in the mac list, no need to add a new 1337 * one into it, just check the mac addr state, convert it to a new 1338 * new state, or just remove it, or do nothing. 1339 */ 1340 mac_node = hclgevf_find_mac_node(list, addr); 1341 if (mac_node) { 1342 hclgevf_update_mac_node(mac_node, state); 1343 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1344 return 0; 1345 } 1346 /* if this address is never added, unnecessary to delete */ 1347 if (state == HCLGEVF_MAC_TO_DEL) { 1348 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1349 return -ENOENT; 1350 } 1351 1352 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 1353 if (!mac_node) { 1354 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1355 return -ENOMEM; 1356 } 1357 1358 mac_node->state = state; 1359 ether_addr_copy(mac_node->mac_addr, addr); 1360 list_add_tail(&mac_node->node, list); 1361 1362 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1363 return 0; 1364 } 1365 1366 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1367 const unsigned char *addr) 1368 { 1369 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1370 HCLGEVF_MAC_ADDR_UC, addr); 1371 } 1372 1373 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1374 const unsigned char *addr) 1375 { 1376 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1377 HCLGEVF_MAC_ADDR_UC, addr); 1378 } 1379 1380 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1381 const unsigned char *addr) 1382 { 1383 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1384 HCLGEVF_MAC_ADDR_MC, addr); 1385 } 1386 1387 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1388 const unsigned char *addr) 1389 { 1390 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1391 HCLGEVF_MAC_ADDR_MC, addr); 1392 } 1393 1394 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, 1395 struct hclgevf_mac_addr_node *mac_node, 1396 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1397 { 1398 struct hclge_vf_to_pf_msg send_msg; 1399 u8 code, subcode; 1400 1401 if (mac_type == HCLGEVF_MAC_ADDR_UC) { 1402 code = HCLGE_MBX_SET_UNICAST; 1403 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1404 subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; 1405 else 1406 subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; 1407 } else { 1408 code = HCLGE_MBX_SET_MULTICAST; 1409 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1410 subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; 1411 else 1412 subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; 1413 } 1414 1415 hclgevf_build_send_msg(&send_msg, code, subcode); 1416 ether_addr_copy(send_msg.data, mac_node->mac_addr); 1417 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1418 } 1419 1420 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, 1421 struct list_head *list, 1422 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1423 { 1424 struct hclgevf_mac_addr_node *mac_node, *tmp; 1425 int ret; 1426 1427 list_for_each_entry_safe(mac_node, tmp, list, node) { 1428 ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); 1429 if (ret) { 1430 dev_err(&hdev->pdev->dev, 1431 "failed to configure mac %pM, state = %d, ret = %d\n", 1432 mac_node->mac_addr, mac_node->state, ret); 1433 return; 1434 } 1435 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1436 mac_node->state = HCLGEVF_MAC_ACTIVE; 1437 } else { 1438 list_del(&mac_node->node); 1439 kfree(mac_node); 1440 } 1441 } 1442 } 1443 1444 static void hclgevf_sync_from_add_list(struct list_head *add_list, 1445 struct list_head *mac_list) 1446 { 1447 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1448 1449 list_for_each_entry_safe(mac_node, tmp, add_list, node) { 1450 /* if the mac address from tmp_add_list is not in the 1451 * uc/mc_mac_list, it means have received a TO_DEL request 1452 * during the time window of sending mac config request to PF 1453 * If mac_node state is ACTIVE, then change its state to TO_DEL, 1454 * then it will be removed at next time. If is TO_ADD, it means 1455 * send TO_ADD request failed, so just remove the mac node. 1456 */ 1457 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1458 if (new_node) { 1459 hclgevf_update_mac_node(new_node, mac_node->state); 1460 list_del(&mac_node->node); 1461 kfree(mac_node); 1462 } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { 1463 mac_node->state = HCLGEVF_MAC_TO_DEL; 1464 list_del(&mac_node->node); 1465 list_add_tail(&mac_node->node, mac_list); 1466 } else { 1467 list_del(&mac_node->node); 1468 kfree(mac_node); 1469 } 1470 } 1471 } 1472 1473 static void hclgevf_sync_from_del_list(struct list_head *del_list, 1474 struct list_head *mac_list) 1475 { 1476 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1477 1478 list_for_each_entry_safe(mac_node, tmp, del_list, node) { 1479 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1480 if (new_node) { 1481 /* If the mac addr is exist in the mac list, it means 1482 * received a new request TO_ADD during the time window 1483 * of sending mac addr configurrequest to PF, so just 1484 * change the mac state to ACTIVE. 1485 */ 1486 new_node->state = HCLGEVF_MAC_ACTIVE; 1487 list_del(&mac_node->node); 1488 kfree(mac_node); 1489 } else { 1490 list_del(&mac_node->node); 1491 list_add_tail(&mac_node->node, mac_list); 1492 } 1493 } 1494 } 1495 1496 static void hclgevf_clear_list(struct list_head *list) 1497 { 1498 struct hclgevf_mac_addr_node *mac_node, *tmp; 1499 1500 list_for_each_entry_safe(mac_node, tmp, list, node) { 1501 list_del(&mac_node->node); 1502 kfree(mac_node); 1503 } 1504 } 1505 1506 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, 1507 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1508 { 1509 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1510 struct list_head tmp_add_list, tmp_del_list; 1511 struct list_head *list; 1512 1513 INIT_LIST_HEAD(&tmp_add_list); 1514 INIT_LIST_HEAD(&tmp_del_list); 1515 1516 /* move the mac addr to the tmp_add_list and tmp_del_list, then 1517 * we can add/delete these mac addr outside the spin lock 1518 */ 1519 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1520 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1521 1522 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1523 1524 list_for_each_entry_safe(mac_node, tmp, list, node) { 1525 switch (mac_node->state) { 1526 case HCLGEVF_MAC_TO_DEL: 1527 list_del(&mac_node->node); 1528 list_add_tail(&mac_node->node, &tmp_del_list); 1529 break; 1530 case HCLGEVF_MAC_TO_ADD: 1531 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 1532 if (!new_node) 1533 goto stop_traverse; 1534 1535 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 1536 new_node->state = mac_node->state; 1537 list_add_tail(&new_node->node, &tmp_add_list); 1538 break; 1539 default: 1540 break; 1541 } 1542 } 1543 1544 stop_traverse: 1545 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1546 1547 /* delete first, in order to get max mac table space for adding */ 1548 hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); 1549 hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); 1550 1551 /* if some mac addresses were added/deleted fail, move back to the 1552 * mac_list, and retry at next time. 1553 */ 1554 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1555 1556 hclgevf_sync_from_del_list(&tmp_del_list, list); 1557 hclgevf_sync_from_add_list(&tmp_add_list, list); 1558 1559 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1560 } 1561 1562 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) 1563 { 1564 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); 1565 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); 1566 } 1567 1568 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) 1569 { 1570 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1571 1572 hclgevf_clear_list(&hdev->mac_table.uc_mac_list); 1573 hclgevf_clear_list(&hdev->mac_table.mc_mac_list); 1574 1575 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1576 } 1577 1578 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1579 __be16 proto, u16 vlan_id, 1580 bool is_kill) 1581 { 1582 #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 1583 #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 1584 #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 1585 1586 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1587 struct hclge_vf_to_pf_msg send_msg; 1588 int ret; 1589 1590 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1591 return -EINVAL; 1592 1593 if (proto != htons(ETH_P_8021Q)) 1594 return -EPROTONOSUPPORT; 1595 1596 /* When device is resetting, firmware is unable to handle 1597 * mailbox. Just record the vlan id, and remove it after 1598 * reset finished. 1599 */ 1600 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) { 1601 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1602 return -EBUSY; 1603 } 1604 1605 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1606 HCLGE_MBX_VLAN_FILTER); 1607 send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; 1608 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, 1609 sizeof(vlan_id)); 1610 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, 1611 sizeof(proto)); 1612 /* when remove hw vlan filter failed, record the vlan id, 1613 * and try to remove it from hw later, to be consistence 1614 * with stack. 1615 */ 1616 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1617 if (is_kill && ret) 1618 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1619 1620 return ret; 1621 } 1622 1623 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1624 { 1625 #define HCLGEVF_MAX_SYNC_COUNT 60 1626 struct hnae3_handle *handle = &hdev->nic; 1627 int ret, sync_cnt = 0; 1628 u16 vlan_id; 1629 1630 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1631 while (vlan_id != VLAN_N_VID) { 1632 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1633 vlan_id, true); 1634 if (ret) 1635 return; 1636 1637 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1638 sync_cnt++; 1639 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1640 return; 1641 1642 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1643 } 1644 } 1645 1646 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1647 { 1648 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1649 struct hclge_vf_to_pf_msg send_msg; 1650 1651 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1652 HCLGE_MBX_VLAN_RX_OFF_CFG); 1653 send_msg.data[0] = enable ? 1 : 0; 1654 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1655 } 1656 1657 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1658 { 1659 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1660 struct hclge_vf_to_pf_msg send_msg; 1661 int ret; 1662 1663 /* disable vf queue before send queue reset msg to PF */ 1664 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1665 if (ret) 1666 return ret; 1667 1668 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1669 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 1670 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1671 } 1672 1673 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1674 { 1675 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1676 struct hclge_vf_to_pf_msg send_msg; 1677 1678 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1679 memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); 1680 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1681 } 1682 1683 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1684 enum hnae3_reset_notify_type type) 1685 { 1686 struct hnae3_client *client = hdev->nic_client; 1687 struct hnae3_handle *handle = &hdev->nic; 1688 int ret; 1689 1690 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1691 !client) 1692 return 0; 1693 1694 if (!client->ops->reset_notify) 1695 return -EOPNOTSUPP; 1696 1697 ret = client->ops->reset_notify(handle, type); 1698 if (ret) 1699 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1700 type, ret); 1701 1702 return ret; 1703 } 1704 1705 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1706 { 1707 #define HCLGEVF_RESET_WAIT_US 20000 1708 #define HCLGEVF_RESET_WAIT_CNT 2000 1709 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1710 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1711 1712 u32 val; 1713 int ret; 1714 1715 if (hdev->reset_type == HNAE3_VF_RESET) 1716 ret = readl_poll_timeout(hdev->hw.io_base + 1717 HCLGEVF_VF_RST_ING, val, 1718 !(val & HCLGEVF_VF_RST_ING_BIT), 1719 HCLGEVF_RESET_WAIT_US, 1720 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1721 else 1722 ret = readl_poll_timeout(hdev->hw.io_base + 1723 HCLGEVF_RST_ING, val, 1724 !(val & HCLGEVF_RST_ING_BITS), 1725 HCLGEVF_RESET_WAIT_US, 1726 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1727 1728 /* hardware completion status should be available by this time */ 1729 if (ret) { 1730 dev_err(&hdev->pdev->dev, 1731 "could'nt get reset done status from h/w, timeout!\n"); 1732 return ret; 1733 } 1734 1735 /* we will wait a bit more to let reset of the stack to complete. This 1736 * might happen in case reset assertion was made by PF. Yes, this also 1737 * means we might end up waiting bit more even for VF reset. 1738 */ 1739 msleep(5000); 1740 1741 return 0; 1742 } 1743 1744 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 1745 { 1746 u32 reg_val; 1747 1748 reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); 1749 if (enable) 1750 reg_val |= HCLGEVF_NIC_SW_RST_RDY; 1751 else 1752 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 1753 1754 hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 1755 reg_val); 1756 } 1757 1758 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1759 { 1760 int ret; 1761 1762 /* uninitialize the nic client */ 1763 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1764 if (ret) 1765 return ret; 1766 1767 /* re-initialize the hclge device */ 1768 ret = hclgevf_reset_hdev(hdev); 1769 if (ret) { 1770 dev_err(&hdev->pdev->dev, 1771 "hclge device re-init failed, VF is disabled!\n"); 1772 return ret; 1773 } 1774 1775 /* bring up the nic client again */ 1776 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1777 if (ret) 1778 return ret; 1779 1780 /* clear handshake status with IMP */ 1781 hclgevf_reset_handshake(hdev, false); 1782 1783 /* bring up the nic to enable TX/RX again */ 1784 return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1785 } 1786 1787 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1788 { 1789 #define HCLGEVF_RESET_SYNC_TIME 100 1790 1791 struct hclge_vf_to_pf_msg send_msg; 1792 int ret = 0; 1793 1794 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1795 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1796 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1797 hdev->rst_stats.vf_func_rst_cnt++; 1798 } 1799 1800 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1801 /* inform hardware that preparatory work is done */ 1802 msleep(HCLGEVF_RESET_SYNC_TIME); 1803 hclgevf_reset_handshake(hdev, true); 1804 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1805 hdev->reset_type, ret); 1806 1807 return ret; 1808 } 1809 1810 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 1811 { 1812 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 1813 hdev->rst_stats.vf_func_rst_cnt); 1814 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1815 hdev->rst_stats.flr_rst_cnt); 1816 dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 1817 hdev->rst_stats.vf_rst_cnt); 1818 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1819 hdev->rst_stats.rst_done_cnt); 1820 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1821 hdev->rst_stats.hw_rst_done_cnt); 1822 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1823 hdev->rst_stats.rst_cnt); 1824 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1825 hdev->rst_stats.rst_fail_cnt); 1826 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1827 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 1828 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 1829 hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STAT_REG)); 1830 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1831 hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); 1832 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1833 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 1834 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1835 } 1836 1837 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1838 { 1839 /* recover handshake status with IMP when reset fail */ 1840 hclgevf_reset_handshake(hdev, true); 1841 hdev->rst_stats.rst_fail_cnt++; 1842 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1843 hdev->rst_stats.rst_fail_cnt); 1844 1845 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1846 set_bit(hdev->reset_type, &hdev->reset_pending); 1847 1848 if (hclgevf_is_reset_pending(hdev)) { 1849 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1850 hclgevf_reset_task_schedule(hdev); 1851 } else { 1852 set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1853 hclgevf_dump_rst_info(hdev); 1854 } 1855 } 1856 1857 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 1858 { 1859 int ret; 1860 1861 hdev->rst_stats.rst_cnt++; 1862 1863 rtnl_lock(); 1864 /* bring down the nic to stop any ongoing TX/RX */ 1865 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1866 rtnl_unlock(); 1867 if (ret) 1868 return ret; 1869 1870 return hclgevf_reset_prepare_wait(hdev); 1871 } 1872 1873 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 1874 { 1875 int ret; 1876 1877 hdev->rst_stats.hw_rst_done_cnt++; 1878 1879 rtnl_lock(); 1880 /* now, re-initialize the nic client and ae device */ 1881 ret = hclgevf_reset_stack(hdev); 1882 rtnl_unlock(); 1883 if (ret) { 1884 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1885 return ret; 1886 } 1887 1888 hdev->last_reset_time = jiffies; 1889 hdev->rst_stats.rst_done_cnt++; 1890 hdev->rst_stats.rst_fail_cnt = 0; 1891 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1892 1893 return 0; 1894 } 1895 1896 static void hclgevf_reset(struct hclgevf_dev *hdev) 1897 { 1898 if (hclgevf_reset_prepare(hdev)) 1899 goto err_reset; 1900 1901 /* check if VF could successfully fetch the hardware reset completion 1902 * status from the hardware 1903 */ 1904 if (hclgevf_reset_wait(hdev)) { 1905 /* can't do much in this situation, will disable VF */ 1906 dev_err(&hdev->pdev->dev, 1907 "failed to fetch H/W reset completion status\n"); 1908 goto err_reset; 1909 } 1910 1911 if (hclgevf_reset_rebuild(hdev)) 1912 goto err_reset; 1913 1914 return; 1915 1916 err_reset: 1917 hclgevf_reset_err_handle(hdev); 1918 } 1919 1920 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1921 unsigned long *addr) 1922 { 1923 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1924 1925 /* return the highest priority reset level amongst all */ 1926 if (test_bit(HNAE3_VF_RESET, addr)) { 1927 rst_level = HNAE3_VF_RESET; 1928 clear_bit(HNAE3_VF_RESET, addr); 1929 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1930 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1931 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1932 rst_level = HNAE3_VF_FULL_RESET; 1933 clear_bit(HNAE3_VF_FULL_RESET, addr); 1934 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1935 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1936 rst_level = HNAE3_VF_PF_FUNC_RESET; 1937 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1938 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1939 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1940 rst_level = HNAE3_VF_FUNC_RESET; 1941 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1942 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1943 rst_level = HNAE3_FLR_RESET; 1944 clear_bit(HNAE3_FLR_RESET, addr); 1945 } 1946 1947 return rst_level; 1948 } 1949 1950 static void hclgevf_reset_event(struct pci_dev *pdev, 1951 struct hnae3_handle *handle) 1952 { 1953 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1954 struct hclgevf_dev *hdev = ae_dev->priv; 1955 1956 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1957 1958 if (hdev->default_reset_request) 1959 hdev->reset_level = 1960 hclgevf_get_reset_level(hdev, 1961 &hdev->default_reset_request); 1962 else 1963 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1964 1965 /* reset of this VF requested */ 1966 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1967 hclgevf_reset_task_schedule(hdev); 1968 1969 hdev->last_reset_time = jiffies; 1970 } 1971 1972 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1973 enum hnae3_reset_type rst_type) 1974 { 1975 struct hclgevf_dev *hdev = ae_dev->priv; 1976 1977 set_bit(rst_type, &hdev->default_reset_request); 1978 } 1979 1980 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1981 { 1982 writel(en ? 1 : 0, vector->addr); 1983 } 1984 1985 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1986 { 1987 #define HCLGEVF_FLR_RETRY_WAIT_MS 500 1988 #define HCLGEVF_FLR_RETRY_CNT 5 1989 1990 struct hclgevf_dev *hdev = ae_dev->priv; 1991 int retry_cnt = 0; 1992 int ret; 1993 1994 retry: 1995 down(&hdev->reset_sem); 1996 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1997 hdev->reset_type = HNAE3_FLR_RESET; 1998 ret = hclgevf_reset_prepare(hdev); 1999 if (ret) { 2000 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", 2001 ret); 2002 if (hdev->reset_pending || 2003 retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) { 2004 dev_err(&hdev->pdev->dev, 2005 "reset_pending:0x%lx, retry_cnt:%d\n", 2006 hdev->reset_pending, retry_cnt); 2007 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2008 up(&hdev->reset_sem); 2009 msleep(HCLGEVF_FLR_RETRY_WAIT_MS); 2010 goto retry; 2011 } 2012 } 2013 2014 /* disable misc vector before FLR done */ 2015 hclgevf_enable_vector(&hdev->misc_vector, false); 2016 hdev->rst_stats.flr_rst_cnt++; 2017 } 2018 2019 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 2020 { 2021 struct hclgevf_dev *hdev = ae_dev->priv; 2022 int ret; 2023 2024 hclgevf_enable_vector(&hdev->misc_vector, true); 2025 2026 ret = hclgevf_reset_rebuild(hdev); 2027 if (ret) 2028 dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 2029 ret); 2030 2031 hdev->reset_type = HNAE3_NONE_RESET; 2032 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2033 up(&hdev->reset_sem); 2034 } 2035 2036 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 2037 { 2038 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2039 2040 return hdev->fw_version; 2041 } 2042 2043 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 2044 { 2045 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 2046 2047 vector->vector_irq = pci_irq_vector(hdev->pdev, 2048 HCLGEVF_MISC_VECTOR_NUM); 2049 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 2050 /* vector status always valid for Vector 0 */ 2051 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 2052 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 2053 2054 hdev->num_msi_left -= 1; 2055 hdev->num_msi_used += 1; 2056 } 2057 2058 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 2059 { 2060 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2061 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 2062 &hdev->state)) 2063 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2064 } 2065 2066 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 2067 { 2068 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2069 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 2070 &hdev->state)) 2071 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2072 } 2073 2074 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 2075 unsigned long delay) 2076 { 2077 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2078 !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2079 mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 2080 } 2081 2082 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 2083 { 2084 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 2085 2086 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 2087 return; 2088 2089 down(&hdev->reset_sem); 2090 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2091 2092 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 2093 &hdev->reset_state)) { 2094 /* PF has initmated that it is about to reset the hardware. 2095 * We now have to poll & check if hardware has actually 2096 * completed the reset sequence. On hardware reset completion, 2097 * VF needs to reset the client and ae device. 2098 */ 2099 hdev->reset_attempts = 0; 2100 2101 hdev->last_reset_time = jiffies; 2102 while ((hdev->reset_type = 2103 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 2104 != HNAE3_NONE_RESET) 2105 hclgevf_reset(hdev); 2106 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 2107 &hdev->reset_state)) { 2108 /* we could be here when either of below happens: 2109 * 1. reset was initiated due to watchdog timeout caused by 2110 * a. IMP was earlier reset and our TX got choked down and 2111 * which resulted in watchdog reacting and inducing VF 2112 * reset. This also means our cmdq would be unreliable. 2113 * b. problem in TX due to other lower layer(example link 2114 * layer not functioning properly etc.) 2115 * 2. VF reset might have been initiated due to some config 2116 * change. 2117 * 2118 * NOTE: Theres no clear way to detect above cases than to react 2119 * to the response of PF for this reset request. PF will ack the 2120 * 1b and 2. cases but we will not get any intimation about 1a 2121 * from PF as cmdq would be in unreliable state i.e. mailbox 2122 * communication between PF and VF would be broken. 2123 * 2124 * if we are never geting into pending state it means either: 2125 * 1. PF is not receiving our request which could be due to IMP 2126 * reset 2127 * 2. PF is screwed 2128 * We cannot do much for 2. but to check first we can try reset 2129 * our PCIe + stack and see if it alleviates the problem. 2130 */ 2131 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 2132 /* prepare for full reset of stack + pcie interface */ 2133 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 2134 2135 /* "defer" schedule the reset task again */ 2136 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2137 } else { 2138 hdev->reset_attempts++; 2139 2140 set_bit(hdev->reset_level, &hdev->reset_pending); 2141 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2142 } 2143 hclgevf_reset_task_schedule(hdev); 2144 } 2145 2146 hdev->reset_type = HNAE3_NONE_RESET; 2147 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2148 up(&hdev->reset_sem); 2149 } 2150 2151 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 2152 { 2153 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2154 return; 2155 2156 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 2157 return; 2158 2159 hclgevf_mbx_async_handler(hdev); 2160 2161 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2162 } 2163 2164 static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 2165 { 2166 struct hclge_vf_to_pf_msg send_msg; 2167 int ret; 2168 2169 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) 2170 return; 2171 2172 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 2173 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2174 if (ret) 2175 dev_err(&hdev->pdev->dev, 2176 "VF sends keep alive cmd failed(=%d)\n", ret); 2177 } 2178 2179 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 2180 { 2181 unsigned long delta = round_jiffies_relative(HZ); 2182 struct hnae3_handle *handle = &hdev->nic; 2183 2184 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 2185 delta = jiffies - hdev->last_serv_processed; 2186 2187 if (delta < round_jiffies_relative(HZ)) { 2188 delta = round_jiffies_relative(HZ) - delta; 2189 goto out; 2190 } 2191 } 2192 2193 hdev->serv_processed_cnt++; 2194 if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 2195 hclgevf_keep_alive(hdev); 2196 2197 if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 2198 hdev->last_serv_processed = jiffies; 2199 goto out; 2200 } 2201 2202 if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 2203 hclgevf_tqps_update_stats(handle); 2204 2205 /* request the link status from the PF. PF would be able to tell VF 2206 * about such updates in future so we might remove this later 2207 */ 2208 hclgevf_request_link_info(hdev); 2209 2210 hclgevf_update_link_mode(hdev); 2211 2212 hclgevf_sync_vlan_filter(hdev); 2213 2214 hclgevf_sync_mac_table(hdev); 2215 2216 hclgevf_sync_promisc_mode(hdev); 2217 2218 hdev->last_serv_processed = jiffies; 2219 2220 out: 2221 hclgevf_task_schedule(hdev, delta); 2222 } 2223 2224 static void hclgevf_service_task(struct work_struct *work) 2225 { 2226 struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 2227 service_task.work); 2228 2229 hclgevf_reset_service_task(hdev); 2230 hclgevf_mailbox_service_task(hdev); 2231 hclgevf_periodic_service_task(hdev); 2232 2233 /* Handle reset and mbx again in case periodical task delays the 2234 * handling by calling hclgevf_task_schedule() in 2235 * hclgevf_periodic_service_task() 2236 */ 2237 hclgevf_reset_service_task(hdev); 2238 hclgevf_mailbox_service_task(hdev); 2239 } 2240 2241 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 2242 { 2243 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 2244 } 2245 2246 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 2247 u32 *clearval) 2248 { 2249 u32 val, cmdq_stat_reg, rst_ing_reg; 2250 2251 /* fetch the events from their corresponding regs */ 2252 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 2253 HCLGEVF_VECTOR0_CMDQ_STAT_REG); 2254 2255 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 2256 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2257 dev_info(&hdev->pdev->dev, 2258 "receive reset interrupt 0x%x!\n", rst_ing_reg); 2259 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 2260 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2261 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 2262 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 2263 hdev->rst_stats.vf_rst_cnt++; 2264 /* set up VF hardware reset status, its PF will clear 2265 * this status when PF has initialized done. 2266 */ 2267 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 2268 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 2269 val | HCLGEVF_VF_RST_ING_BIT); 2270 return HCLGEVF_VECTOR0_EVENT_RST; 2271 } 2272 2273 /* check for vector0 mailbox(=CMDQ RX) event source */ 2274 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 2275 /* for revision 0x21, clearing interrupt is writing bit 0 2276 * to the clear register, writing bit 1 means to keep the 2277 * old value. 2278 * for revision 0x20, the clear register is a read & write 2279 * register, so we should just write 0 to the bit we are 2280 * handling, and keep other bits as cmdq_stat_reg. 2281 */ 2282 if (hdev->pdev->revision >= 0x21) 2283 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2284 else 2285 *clearval = cmdq_stat_reg & 2286 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2287 2288 return HCLGEVF_VECTOR0_EVENT_MBX; 2289 } 2290 2291 /* print other vector0 event source */ 2292 dev_info(&hdev->pdev->dev, 2293 "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2294 cmdq_stat_reg); 2295 2296 return HCLGEVF_VECTOR0_EVENT_OTHER; 2297 } 2298 2299 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2300 { 2301 enum hclgevf_evt_cause event_cause; 2302 struct hclgevf_dev *hdev = data; 2303 u32 clearval; 2304 2305 hclgevf_enable_vector(&hdev->misc_vector, false); 2306 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2307 2308 switch (event_cause) { 2309 case HCLGEVF_VECTOR0_EVENT_RST: 2310 hclgevf_reset_task_schedule(hdev); 2311 break; 2312 case HCLGEVF_VECTOR0_EVENT_MBX: 2313 hclgevf_mbx_handler(hdev); 2314 break; 2315 default: 2316 break; 2317 } 2318 2319 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 2320 hclgevf_clear_event_cause(hdev, clearval); 2321 hclgevf_enable_vector(&hdev->misc_vector, true); 2322 } 2323 2324 return IRQ_HANDLED; 2325 } 2326 2327 static int hclgevf_configure(struct hclgevf_dev *hdev) 2328 { 2329 int ret; 2330 2331 /* get current port based vlan state from PF */ 2332 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2333 if (ret) 2334 return ret; 2335 2336 /* get queue configuration from PF */ 2337 ret = hclgevf_get_queue_info(hdev); 2338 if (ret) 2339 return ret; 2340 2341 /* get queue depth info from PF */ 2342 ret = hclgevf_get_queue_depth(hdev); 2343 if (ret) 2344 return ret; 2345 2346 ret = hclgevf_get_pf_media_type(hdev); 2347 if (ret) 2348 return ret; 2349 2350 /* get tc configuration from PF */ 2351 return hclgevf_get_tc_info(hdev); 2352 } 2353 2354 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 2355 { 2356 struct pci_dev *pdev = ae_dev->pdev; 2357 struct hclgevf_dev *hdev; 2358 2359 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 2360 if (!hdev) 2361 return -ENOMEM; 2362 2363 hdev->pdev = pdev; 2364 hdev->ae_dev = ae_dev; 2365 ae_dev->priv = hdev; 2366 2367 return 0; 2368 } 2369 2370 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2371 { 2372 struct hnae3_handle *roce = &hdev->roce; 2373 struct hnae3_handle *nic = &hdev->nic; 2374 2375 roce->rinfo.num_vectors = hdev->num_roce_msix; 2376 2377 if (hdev->num_msi_left < roce->rinfo.num_vectors || 2378 hdev->num_msi_left == 0) 2379 return -EINVAL; 2380 2381 roce->rinfo.base_vector = hdev->roce_base_vector; 2382 2383 roce->rinfo.netdev = nic->kinfo.netdev; 2384 roce->rinfo.roce_io_base = hdev->hw.io_base; 2385 2386 roce->pdev = nic->pdev; 2387 roce->ae_algo = nic->ae_algo; 2388 roce->numa_node_mask = nic->numa_node_mask; 2389 2390 return 0; 2391 } 2392 2393 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 2394 { 2395 struct hclgevf_cfg_gro_status_cmd *req; 2396 struct hclgevf_desc desc; 2397 int ret; 2398 2399 if (!hnae3_dev_gro_supported(hdev)) 2400 return 0; 2401 2402 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2403 false); 2404 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2405 2406 req->gro_en = en ? 1 : 0; 2407 2408 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2409 if (ret) 2410 dev_err(&hdev->pdev->dev, 2411 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2412 2413 return ret; 2414 } 2415 2416 static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) 2417 { 2418 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2419 struct hclgevf_rss_tuple_cfg *tuple_sets; 2420 u32 i; 2421 2422 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 2423 rss_cfg->rss_size = hdev->nic.kinfo.rss_size; 2424 tuple_sets = &rss_cfg->rss_tuple_sets; 2425 if (hdev->pdev->revision >= 0x21) { 2426 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 2427 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2428 HCLGEVF_RSS_KEY_SIZE); 2429 2430 tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2431 tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2432 tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2433 tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2434 tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2435 tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2436 tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2437 tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2438 } 2439 2440 /* Initialize RSS indirect table */ 2441 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 2442 rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 2443 } 2444 2445 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2446 { 2447 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2448 int ret; 2449 2450 if (hdev->pdev->revision >= 0x21) { 2451 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 2452 rss_cfg->rss_hash_key); 2453 if (ret) 2454 return ret; 2455 2456 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2457 if (ret) 2458 return ret; 2459 } 2460 2461 ret = hclgevf_set_rss_indir_table(hdev); 2462 if (ret) 2463 return ret; 2464 2465 return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); 2466 } 2467 2468 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2469 { 2470 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2471 false); 2472 } 2473 2474 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2475 { 2476 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2477 2478 unsigned long last = hdev->serv_processed_cnt; 2479 int i = 0; 2480 2481 while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2482 i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2483 last == hdev->serv_processed_cnt) 2484 usleep_range(1, 1); 2485 } 2486 2487 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2488 { 2489 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2490 2491 if (enable) { 2492 hclgevf_task_schedule(hdev, 0); 2493 } else { 2494 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2495 2496 /* flush memory to make sure DOWN is seen by service task */ 2497 smp_mb__before_atomic(); 2498 hclgevf_flush_link_update(hdev); 2499 } 2500 } 2501 2502 static int hclgevf_ae_start(struct hnae3_handle *handle) 2503 { 2504 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2505 2506 hclgevf_reset_tqp_stats(handle); 2507 2508 hclgevf_request_link_info(hdev); 2509 2510 hclgevf_update_link_mode(hdev); 2511 2512 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2513 2514 return 0; 2515 } 2516 2517 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2518 { 2519 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2520 int i; 2521 2522 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2523 2524 if (hdev->reset_type != HNAE3_VF_RESET) 2525 for (i = 0; i < handle->kinfo.num_tqps; i++) 2526 if (hclgevf_reset_tqp(handle, i)) 2527 break; 2528 2529 hclgevf_reset_tqp_stats(handle); 2530 hclgevf_update_link_status(hdev, 0); 2531 } 2532 2533 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2534 { 2535 #define HCLGEVF_STATE_ALIVE 1 2536 #define HCLGEVF_STATE_NOT_ALIVE 0 2537 2538 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2539 struct hclge_vf_to_pf_msg send_msg; 2540 2541 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2542 send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2543 HCLGEVF_STATE_NOT_ALIVE; 2544 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2545 } 2546 2547 static int hclgevf_client_start(struct hnae3_handle *handle) 2548 { 2549 int ret; 2550 2551 ret = hclgevf_set_alive(handle, true); 2552 if (ret) 2553 return ret; 2554 2555 return 0; 2556 } 2557 2558 static void hclgevf_client_stop(struct hnae3_handle *handle) 2559 { 2560 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2561 int ret; 2562 2563 ret = hclgevf_set_alive(handle, false); 2564 if (ret) 2565 dev_warn(&hdev->pdev->dev, 2566 "%s failed %d\n", __func__, ret); 2567 } 2568 2569 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2570 { 2571 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2572 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2573 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2574 2575 INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 2576 2577 mutex_init(&hdev->mbx_resp.mbx_mutex); 2578 sema_init(&hdev->reset_sem, 1); 2579 2580 spin_lock_init(&hdev->mac_table.mac_list_lock); 2581 INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); 2582 INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); 2583 2584 /* bring the device down */ 2585 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2586 } 2587 2588 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2589 { 2590 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2591 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2592 2593 if (hdev->service_task.work.func) 2594 cancel_delayed_work_sync(&hdev->service_task); 2595 2596 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2597 } 2598 2599 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2600 { 2601 struct pci_dev *pdev = hdev->pdev; 2602 int vectors; 2603 int i; 2604 2605 if (hnae3_dev_roce_supported(hdev)) 2606 vectors = pci_alloc_irq_vectors(pdev, 2607 hdev->roce_base_msix_offset + 1, 2608 hdev->num_msi, 2609 PCI_IRQ_MSIX); 2610 else 2611 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2612 hdev->num_msi, 2613 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2614 2615 if (vectors < 0) { 2616 dev_err(&pdev->dev, 2617 "failed(%d) to allocate MSI/MSI-X vectors\n", 2618 vectors); 2619 return vectors; 2620 } 2621 if (vectors < hdev->num_msi) 2622 dev_warn(&hdev->pdev->dev, 2623 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2624 hdev->num_msi, vectors); 2625 2626 hdev->num_msi = vectors; 2627 hdev->num_msi_left = vectors; 2628 2629 hdev->base_msi_vector = pdev->irq; 2630 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2631 2632 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2633 sizeof(u16), GFP_KERNEL); 2634 if (!hdev->vector_status) { 2635 pci_free_irq_vectors(pdev); 2636 return -ENOMEM; 2637 } 2638 2639 for (i = 0; i < hdev->num_msi; i++) 2640 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2641 2642 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2643 sizeof(int), GFP_KERNEL); 2644 if (!hdev->vector_irq) { 2645 devm_kfree(&pdev->dev, hdev->vector_status); 2646 pci_free_irq_vectors(pdev); 2647 return -ENOMEM; 2648 } 2649 2650 return 0; 2651 } 2652 2653 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2654 { 2655 struct pci_dev *pdev = hdev->pdev; 2656 2657 devm_kfree(&pdev->dev, hdev->vector_status); 2658 devm_kfree(&pdev->dev, hdev->vector_irq); 2659 pci_free_irq_vectors(pdev); 2660 } 2661 2662 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2663 { 2664 int ret; 2665 2666 hclgevf_get_misc_vector(hdev); 2667 2668 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2669 HCLGEVF_NAME, pci_name(hdev->pdev)); 2670 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2671 0, hdev->misc_vector.name, hdev); 2672 if (ret) { 2673 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2674 hdev->misc_vector.vector_irq); 2675 return ret; 2676 } 2677 2678 hclgevf_clear_event_cause(hdev, 0); 2679 2680 /* enable misc. vector(vector 0) */ 2681 hclgevf_enable_vector(&hdev->misc_vector, true); 2682 2683 return ret; 2684 } 2685 2686 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2687 { 2688 /* disable misc vector(vector 0) */ 2689 hclgevf_enable_vector(&hdev->misc_vector, false); 2690 synchronize_irq(hdev->misc_vector.vector_irq); 2691 free_irq(hdev->misc_vector.vector_irq, hdev); 2692 hclgevf_free_vector(hdev, 0); 2693 } 2694 2695 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2696 { 2697 struct device *dev = &hdev->pdev->dev; 2698 2699 dev_info(dev, "VF info begin:\n"); 2700 2701 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2702 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2703 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2704 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2705 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2706 dev_info(dev, "PF media type of this VF: %u\n", 2707 hdev->hw.mac.media_type); 2708 2709 dev_info(dev, "VF info end.\n"); 2710 } 2711 2712 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2713 struct hnae3_client *client) 2714 { 2715 struct hclgevf_dev *hdev = ae_dev->priv; 2716 int rst_cnt = hdev->rst_stats.rst_cnt; 2717 int ret; 2718 2719 ret = client->ops->init_instance(&hdev->nic); 2720 if (ret) 2721 return ret; 2722 2723 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2724 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 2725 rst_cnt != hdev->rst_stats.rst_cnt) { 2726 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2727 2728 client->ops->uninit_instance(&hdev->nic, 0); 2729 return -EBUSY; 2730 } 2731 2732 hnae3_set_client_init_flag(client, ae_dev, 1); 2733 2734 if (netif_msg_drv(&hdev->nic)) 2735 hclgevf_info_show(hdev); 2736 2737 return 0; 2738 } 2739 2740 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2741 struct hnae3_client *client) 2742 { 2743 struct hclgevf_dev *hdev = ae_dev->priv; 2744 int ret; 2745 2746 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2747 !hdev->nic_client) 2748 return 0; 2749 2750 ret = hclgevf_init_roce_base_info(hdev); 2751 if (ret) 2752 return ret; 2753 2754 ret = client->ops->init_instance(&hdev->roce); 2755 if (ret) 2756 return ret; 2757 2758 hnae3_set_client_init_flag(client, ae_dev, 1); 2759 2760 return 0; 2761 } 2762 2763 static int hclgevf_init_client_instance(struct hnae3_client *client, 2764 struct hnae3_ae_dev *ae_dev) 2765 { 2766 struct hclgevf_dev *hdev = ae_dev->priv; 2767 int ret; 2768 2769 switch (client->type) { 2770 case HNAE3_CLIENT_KNIC: 2771 hdev->nic_client = client; 2772 hdev->nic.client = client; 2773 2774 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2775 if (ret) 2776 goto clear_nic; 2777 2778 ret = hclgevf_init_roce_client_instance(ae_dev, 2779 hdev->roce_client); 2780 if (ret) 2781 goto clear_roce; 2782 2783 break; 2784 case HNAE3_CLIENT_ROCE: 2785 if (hnae3_dev_roce_supported(hdev)) { 2786 hdev->roce_client = client; 2787 hdev->roce.client = client; 2788 } 2789 2790 ret = hclgevf_init_roce_client_instance(ae_dev, client); 2791 if (ret) 2792 goto clear_roce; 2793 2794 break; 2795 default: 2796 return -EINVAL; 2797 } 2798 2799 return 0; 2800 2801 clear_nic: 2802 hdev->nic_client = NULL; 2803 hdev->nic.client = NULL; 2804 return ret; 2805 clear_roce: 2806 hdev->roce_client = NULL; 2807 hdev->roce.client = NULL; 2808 return ret; 2809 } 2810 2811 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2812 struct hnae3_ae_dev *ae_dev) 2813 { 2814 struct hclgevf_dev *hdev = ae_dev->priv; 2815 2816 /* un-init roce, if it exists */ 2817 if (hdev->roce_client) { 2818 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2819 hdev->roce_client = NULL; 2820 hdev->roce.client = NULL; 2821 } 2822 2823 /* un-init nic/unic, if this was not called by roce client */ 2824 if (client->ops->uninit_instance && hdev->nic_client && 2825 client->type != HNAE3_CLIENT_ROCE) { 2826 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2827 2828 client->ops->uninit_instance(&hdev->nic, 0); 2829 hdev->nic_client = NULL; 2830 hdev->nic.client = NULL; 2831 } 2832 } 2833 2834 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2835 { 2836 struct pci_dev *pdev = hdev->pdev; 2837 struct hclgevf_hw *hw; 2838 int ret; 2839 2840 ret = pci_enable_device(pdev); 2841 if (ret) { 2842 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2843 return ret; 2844 } 2845 2846 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2847 if (ret) { 2848 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2849 goto err_disable_device; 2850 } 2851 2852 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2853 if (ret) { 2854 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2855 goto err_disable_device; 2856 } 2857 2858 pci_set_master(pdev); 2859 hw = &hdev->hw; 2860 hw->hdev = hdev; 2861 hw->io_base = pci_iomap(pdev, 2, 0); 2862 if (!hw->io_base) { 2863 dev_err(&pdev->dev, "can't map configuration register space\n"); 2864 ret = -ENOMEM; 2865 goto err_clr_master; 2866 } 2867 2868 return 0; 2869 2870 err_clr_master: 2871 pci_clear_master(pdev); 2872 pci_release_regions(pdev); 2873 err_disable_device: 2874 pci_disable_device(pdev); 2875 2876 return ret; 2877 } 2878 2879 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2880 { 2881 struct pci_dev *pdev = hdev->pdev; 2882 2883 pci_iounmap(pdev, hdev->hw.io_base); 2884 pci_clear_master(pdev); 2885 pci_release_regions(pdev); 2886 pci_disable_device(pdev); 2887 } 2888 2889 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2890 { 2891 struct hclgevf_query_res_cmd *req; 2892 struct hclgevf_desc desc; 2893 int ret; 2894 2895 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2896 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2897 if (ret) { 2898 dev_err(&hdev->pdev->dev, 2899 "query vf resource failed, ret = %d.\n", ret); 2900 return ret; 2901 } 2902 2903 req = (struct hclgevf_query_res_cmd *)desc.data; 2904 2905 if (hnae3_dev_roce_supported(hdev)) { 2906 hdev->roce_base_msix_offset = 2907 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 2908 HCLGEVF_MSIX_OFT_ROCEE_M, 2909 HCLGEVF_MSIX_OFT_ROCEE_S); 2910 hdev->num_roce_msix = 2911 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 2912 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2913 2914 /* nic's msix numbers is always equals to the roce's. */ 2915 hdev->num_nic_msix = hdev->num_roce_msix; 2916 2917 /* VF should have NIC vectors and Roce vectors, NIC vectors 2918 * are queued before Roce vectors. The offset is fixed to 64. 2919 */ 2920 hdev->num_msi = hdev->num_roce_msix + 2921 hdev->roce_base_msix_offset; 2922 } else { 2923 hdev->num_msi = 2924 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 2925 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2926 2927 hdev->num_nic_msix = hdev->num_msi; 2928 } 2929 2930 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 2931 dev_err(&hdev->pdev->dev, 2932 "Just %u msi resources, not enough for vf(min:2).\n", 2933 hdev->num_nic_msix); 2934 return -EINVAL; 2935 } 2936 2937 return 0; 2938 } 2939 2940 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2941 { 2942 struct pci_dev *pdev = hdev->pdev; 2943 int ret = 0; 2944 2945 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2946 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2947 hclgevf_misc_irq_uninit(hdev); 2948 hclgevf_uninit_msi(hdev); 2949 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2950 } 2951 2952 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2953 pci_set_master(pdev); 2954 ret = hclgevf_init_msi(hdev); 2955 if (ret) { 2956 dev_err(&pdev->dev, 2957 "failed(%d) to init MSI/MSI-X\n", ret); 2958 return ret; 2959 } 2960 2961 ret = hclgevf_misc_irq_init(hdev); 2962 if (ret) { 2963 hclgevf_uninit_msi(hdev); 2964 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2965 ret); 2966 return ret; 2967 } 2968 2969 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2970 } 2971 2972 return ret; 2973 } 2974 2975 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) 2976 { 2977 struct hclge_vf_to_pf_msg send_msg; 2978 2979 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, 2980 HCLGE_MBX_VPORT_LIST_CLEAR); 2981 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2982 } 2983 2984 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2985 { 2986 struct pci_dev *pdev = hdev->pdev; 2987 int ret; 2988 2989 ret = hclgevf_pci_reset(hdev); 2990 if (ret) { 2991 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2992 return ret; 2993 } 2994 2995 ret = hclgevf_cmd_init(hdev); 2996 if (ret) { 2997 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2998 return ret; 2999 } 3000 3001 ret = hclgevf_rss_init_hw(hdev); 3002 if (ret) { 3003 dev_err(&hdev->pdev->dev, 3004 "failed(%d) to initialize RSS\n", ret); 3005 return ret; 3006 } 3007 3008 ret = hclgevf_config_gro(hdev, true); 3009 if (ret) 3010 return ret; 3011 3012 ret = hclgevf_init_vlan_config(hdev); 3013 if (ret) { 3014 dev_err(&hdev->pdev->dev, 3015 "failed(%d) to initialize VLAN config\n", ret); 3016 return ret; 3017 } 3018 3019 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 3020 3021 dev_info(&hdev->pdev->dev, "Reset done\n"); 3022 3023 return 0; 3024 } 3025 3026 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 3027 { 3028 struct pci_dev *pdev = hdev->pdev; 3029 int ret; 3030 3031 ret = hclgevf_pci_init(hdev); 3032 if (ret) 3033 return ret; 3034 3035 ret = hclgevf_cmd_queue_init(hdev); 3036 if (ret) 3037 goto err_cmd_queue_init; 3038 3039 ret = hclgevf_cmd_init(hdev); 3040 if (ret) 3041 goto err_cmd_init; 3042 3043 /* Get vf resource */ 3044 ret = hclgevf_query_vf_resource(hdev); 3045 if (ret) 3046 goto err_cmd_init; 3047 3048 ret = hclgevf_init_msi(hdev); 3049 if (ret) { 3050 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 3051 goto err_cmd_init; 3052 } 3053 3054 hclgevf_state_init(hdev); 3055 hdev->reset_level = HNAE3_VF_FUNC_RESET; 3056 hdev->reset_type = HNAE3_NONE_RESET; 3057 3058 ret = hclgevf_misc_irq_init(hdev); 3059 if (ret) 3060 goto err_misc_irq_init; 3061 3062 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3063 3064 ret = hclgevf_configure(hdev); 3065 if (ret) { 3066 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 3067 goto err_config; 3068 } 3069 3070 ret = hclgevf_alloc_tqps(hdev); 3071 if (ret) { 3072 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 3073 goto err_config; 3074 } 3075 3076 ret = hclgevf_set_handle_info(hdev); 3077 if (ret) 3078 goto err_config; 3079 3080 ret = hclgevf_config_gro(hdev, true); 3081 if (ret) 3082 goto err_config; 3083 3084 /* Initialize RSS for this VF */ 3085 hclgevf_rss_init_cfg(hdev); 3086 ret = hclgevf_rss_init_hw(hdev); 3087 if (ret) { 3088 dev_err(&hdev->pdev->dev, 3089 "failed(%d) to initialize RSS\n", ret); 3090 goto err_config; 3091 } 3092 3093 /* ensure vf tbl list as empty before init*/ 3094 ret = hclgevf_clear_vport_list(hdev); 3095 if (ret) { 3096 dev_err(&pdev->dev, 3097 "failed to clear tbl list configuration, ret = %d.\n", 3098 ret); 3099 goto err_config; 3100 } 3101 3102 ret = hclgevf_init_vlan_config(hdev); 3103 if (ret) { 3104 dev_err(&hdev->pdev->dev, 3105 "failed(%d) to initialize VLAN config\n", ret); 3106 goto err_config; 3107 } 3108 3109 hdev->last_reset_time = jiffies; 3110 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 3111 HCLGEVF_DRIVER_NAME); 3112 3113 hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 3114 3115 return 0; 3116 3117 err_config: 3118 hclgevf_misc_irq_uninit(hdev); 3119 err_misc_irq_init: 3120 hclgevf_state_uninit(hdev); 3121 hclgevf_uninit_msi(hdev); 3122 err_cmd_init: 3123 hclgevf_cmd_uninit(hdev); 3124 err_cmd_queue_init: 3125 hclgevf_pci_uninit(hdev); 3126 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3127 return ret; 3128 } 3129 3130 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 3131 { 3132 struct hclge_vf_to_pf_msg send_msg; 3133 3134 hclgevf_state_uninit(hdev); 3135 3136 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 3137 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3138 3139 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3140 hclgevf_misc_irq_uninit(hdev); 3141 hclgevf_uninit_msi(hdev); 3142 } 3143 3144 hclgevf_pci_uninit(hdev); 3145 hclgevf_cmd_uninit(hdev); 3146 hclgevf_uninit_mac_list(hdev); 3147 } 3148 3149 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 3150 { 3151 struct pci_dev *pdev = ae_dev->pdev; 3152 int ret; 3153 3154 ret = hclgevf_alloc_hdev(ae_dev); 3155 if (ret) { 3156 dev_err(&pdev->dev, "hclge device allocation failed\n"); 3157 return ret; 3158 } 3159 3160 ret = hclgevf_init_hdev(ae_dev->priv); 3161 if (ret) { 3162 dev_err(&pdev->dev, "hclge device initialization failed\n"); 3163 return ret; 3164 } 3165 3166 return 0; 3167 } 3168 3169 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 3170 { 3171 struct hclgevf_dev *hdev = ae_dev->priv; 3172 3173 hclgevf_uninit_hdev(hdev); 3174 ae_dev->priv = NULL; 3175 } 3176 3177 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3178 { 3179 struct hnae3_handle *nic = &hdev->nic; 3180 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3181 3182 return min_t(u32, hdev->rss_size_max, 3183 hdev->num_tqps / kinfo->num_tc); 3184 } 3185 3186 /** 3187 * hclgevf_get_channels - Get the current channels enabled and max supported. 3188 * @handle: hardware information for network interface 3189 * @ch: ethtool channels structure 3190 * 3191 * We don't support separate tx and rx queues as channels. The other count 3192 * represents how many queues are being used for control. max_combined counts 3193 * how many queue pairs we can support. They may not be mapped 1 to 1 with 3194 * q_vectors since we support a lot more queue pairs than q_vectors. 3195 **/ 3196 static void hclgevf_get_channels(struct hnae3_handle *handle, 3197 struct ethtool_channels *ch) 3198 { 3199 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3200 3201 ch->max_combined = hclgevf_get_max_channels(hdev); 3202 ch->other_count = 0; 3203 ch->max_other = 0; 3204 ch->combined_count = handle->kinfo.rss_size; 3205 } 3206 3207 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 3208 u16 *alloc_tqps, u16 *max_rss_size) 3209 { 3210 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3211 3212 *alloc_tqps = hdev->num_tqps; 3213 *max_rss_size = hdev->rss_size_max; 3214 } 3215 3216 static void hclgevf_update_rss_size(struct hnae3_handle *handle, 3217 u32 new_tqps_num) 3218 { 3219 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3220 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3221 u16 max_rss_size; 3222 3223 kinfo->req_rss_size = new_tqps_num; 3224 3225 max_rss_size = min_t(u16, hdev->rss_size_max, 3226 hdev->num_tqps / kinfo->num_tc); 3227 3228 /* Use the user's configuration when it is not larger than 3229 * max_rss_size, otherwise, use the maximum specification value. 3230 */ 3231 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 3232 kinfo->req_rss_size <= max_rss_size) 3233 kinfo->rss_size = kinfo->req_rss_size; 3234 else if (kinfo->rss_size > max_rss_size || 3235 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 3236 kinfo->rss_size = max_rss_size; 3237 3238 kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; 3239 } 3240 3241 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 3242 bool rxfh_configured) 3243 { 3244 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3245 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3246 u16 cur_rss_size = kinfo->rss_size; 3247 u16 cur_tqps = kinfo->num_tqps; 3248 u32 *rss_indir; 3249 unsigned int i; 3250 int ret; 3251 3252 hclgevf_update_rss_size(handle, new_tqps_num); 3253 3254 ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); 3255 if (ret) 3256 return ret; 3257 3258 /* RSS indirection table has been configuared by user */ 3259 if (rxfh_configured) 3260 goto out; 3261 3262 /* Reinitializes the rss indirect table according to the new RSS size */ 3263 rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 3264 if (!rss_indir) 3265 return -ENOMEM; 3266 3267 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 3268 rss_indir[i] = i % kinfo->rss_size; 3269 3270 hdev->rss_cfg.rss_size = kinfo->rss_size; 3271 3272 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 3273 if (ret) 3274 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 3275 ret); 3276 3277 kfree(rss_indir); 3278 3279 out: 3280 if (!ret) 3281 dev_info(&hdev->pdev->dev, 3282 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 3283 cur_rss_size, kinfo->rss_size, 3284 cur_tqps, kinfo->rss_size * kinfo->num_tc); 3285 3286 return ret; 3287 } 3288 3289 static int hclgevf_get_status(struct hnae3_handle *handle) 3290 { 3291 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3292 3293 return hdev->hw.mac.link; 3294 } 3295 3296 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 3297 u8 *auto_neg, u32 *speed, 3298 u8 *duplex) 3299 { 3300 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3301 3302 if (speed) 3303 *speed = hdev->hw.mac.speed; 3304 if (duplex) 3305 *duplex = hdev->hw.mac.duplex; 3306 if (auto_neg) 3307 *auto_neg = AUTONEG_DISABLE; 3308 } 3309 3310 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 3311 u8 duplex) 3312 { 3313 hdev->hw.mac.speed = speed; 3314 hdev->hw.mac.duplex = duplex; 3315 } 3316 3317 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 3318 { 3319 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3320 3321 return hclgevf_config_gro(hdev, enable); 3322 } 3323 3324 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 3325 u8 *module_type) 3326 { 3327 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3328 3329 if (media_type) 3330 *media_type = hdev->hw.mac.media_type; 3331 3332 if (module_type) 3333 *module_type = hdev->hw.mac.module_type; 3334 } 3335 3336 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 3337 { 3338 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3339 3340 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 3341 } 3342 3343 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 3344 { 3345 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3346 3347 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 3348 } 3349 3350 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 3351 { 3352 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3353 3354 return hdev->rst_stats.hw_rst_done_cnt; 3355 } 3356 3357 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 3358 unsigned long *supported, 3359 unsigned long *advertising) 3360 { 3361 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3362 3363 *supported = hdev->hw.mac.supported; 3364 *advertising = hdev->hw.mac.advertising; 3365 } 3366 3367 #define MAX_SEPARATE_NUM 4 3368 #define SEPARATOR_VALUE 0xFFFFFFFF 3369 #define REG_NUM_PER_LINE 4 3370 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 3371 3372 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 3373 { 3374 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 3375 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3376 3377 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 3378 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 3379 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 3380 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 3381 3382 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 3383 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 3384 } 3385 3386 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 3387 void *data) 3388 { 3389 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3390 int i, j, reg_um, separator_num; 3391 u32 *reg = data; 3392 3393 *version = hdev->fw_version; 3394 3395 /* fetching per-VF registers values from VF PCIe register space */ 3396 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 3397 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3398 for (i = 0; i < reg_um; i++) 3399 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 3400 for (i = 0; i < separator_num; i++) 3401 *reg++ = SEPARATOR_VALUE; 3402 3403 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 3404 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3405 for (i = 0; i < reg_um; i++) 3406 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 3407 for (i = 0; i < separator_num; i++) 3408 *reg++ = SEPARATOR_VALUE; 3409 3410 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 3411 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3412 for (j = 0; j < hdev->num_tqps; j++) { 3413 for (i = 0; i < reg_um; i++) 3414 *reg++ = hclgevf_read_dev(&hdev->hw, 3415 ring_reg_addr_list[i] + 3416 0x200 * j); 3417 for (i = 0; i < separator_num; i++) 3418 *reg++ = SEPARATOR_VALUE; 3419 } 3420 3421 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 3422 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3423 for (j = 0; j < hdev->num_msi_used - 1; j++) { 3424 for (i = 0; i < reg_um; i++) 3425 *reg++ = hclgevf_read_dev(&hdev->hw, 3426 tqp_intr_reg_addr_list[i] + 3427 4 * j); 3428 for (i = 0; i < separator_num; i++) 3429 *reg++ = SEPARATOR_VALUE; 3430 } 3431 } 3432 3433 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 3434 u8 *port_base_vlan_info, u8 data_size) 3435 { 3436 struct hnae3_handle *nic = &hdev->nic; 3437 struct hclge_vf_to_pf_msg send_msg; 3438 3439 rtnl_lock(); 3440 hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3441 rtnl_unlock(); 3442 3443 /* send msg to PF and wait update port based vlan info */ 3444 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3445 HCLGE_MBX_PORT_BASE_VLAN_CFG); 3446 memcpy(send_msg.data, port_base_vlan_info, data_size); 3447 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3448 3449 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3450 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; 3451 else 3452 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3453 3454 rtnl_lock(); 3455 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 3456 rtnl_unlock(); 3457 } 3458 3459 static const struct hnae3_ae_ops hclgevf_ops = { 3460 .init_ae_dev = hclgevf_init_ae_dev, 3461 .uninit_ae_dev = hclgevf_uninit_ae_dev, 3462 .flr_prepare = hclgevf_flr_prepare, 3463 .flr_done = hclgevf_flr_done, 3464 .init_client_instance = hclgevf_init_client_instance, 3465 .uninit_client_instance = hclgevf_uninit_client_instance, 3466 .start = hclgevf_ae_start, 3467 .stop = hclgevf_ae_stop, 3468 .client_start = hclgevf_client_start, 3469 .client_stop = hclgevf_client_stop, 3470 .map_ring_to_vector = hclgevf_map_ring_to_vector, 3471 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3472 .get_vector = hclgevf_get_vector, 3473 .put_vector = hclgevf_put_vector, 3474 .reset_queue = hclgevf_reset_tqp, 3475 .get_mac_addr = hclgevf_get_mac_addr, 3476 .set_mac_addr = hclgevf_set_mac_addr, 3477 .add_uc_addr = hclgevf_add_uc_addr, 3478 .rm_uc_addr = hclgevf_rm_uc_addr, 3479 .add_mc_addr = hclgevf_add_mc_addr, 3480 .rm_mc_addr = hclgevf_rm_mc_addr, 3481 .get_stats = hclgevf_get_stats, 3482 .update_stats = hclgevf_update_stats, 3483 .get_strings = hclgevf_get_strings, 3484 .get_sset_count = hclgevf_get_sset_count, 3485 .get_rss_key_size = hclgevf_get_rss_key_size, 3486 .get_rss_indir_size = hclgevf_get_rss_indir_size, 3487 .get_rss = hclgevf_get_rss, 3488 .set_rss = hclgevf_set_rss, 3489 .get_rss_tuple = hclgevf_get_rss_tuple, 3490 .set_rss_tuple = hclgevf_set_rss_tuple, 3491 .get_tc_size = hclgevf_get_tc_size, 3492 .get_fw_version = hclgevf_get_fw_version, 3493 .set_vlan_filter = hclgevf_set_vlan_filter, 3494 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 3495 .reset_event = hclgevf_reset_event, 3496 .set_default_reset_request = hclgevf_set_def_reset_request, 3497 .set_channels = hclgevf_set_channels, 3498 .get_channels = hclgevf_get_channels, 3499 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 3500 .get_regs_len = hclgevf_get_regs_len, 3501 .get_regs = hclgevf_get_regs, 3502 .get_status = hclgevf_get_status, 3503 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3504 .get_media_type = hclgevf_get_media_type, 3505 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 3506 .ae_dev_resetting = hclgevf_ae_dev_resetting, 3507 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 3508 .set_gro_en = hclgevf_gro_en, 3509 .set_mtu = hclgevf_set_mtu, 3510 .get_global_queue_id = hclgevf_get_qid_global, 3511 .set_timer_task = hclgevf_set_timer_task, 3512 .get_link_mode = hclgevf_get_link_mode, 3513 .set_promisc_mode = hclgevf_set_promisc_mode, 3514 .request_update_promisc_mode = hclgevf_request_update_promisc_mode, 3515 }; 3516 3517 static struct hnae3_ae_algo ae_algovf = { 3518 .ops = &hclgevf_ops, 3519 .pdev_id_table = ae_algovf_pci_tbl, 3520 }; 3521 3522 static int hclgevf_init(void) 3523 { 3524 pr_info("%s is initializing\n", HCLGEVF_NAME); 3525 3526 hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME); 3527 if (!hclgevf_wq) { 3528 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 3529 return -ENOMEM; 3530 } 3531 3532 hnae3_register_ae_algo(&ae_algovf); 3533 3534 return 0; 3535 } 3536 3537 static void hclgevf_exit(void) 3538 { 3539 hnae3_unregister_ae_algo(&ae_algovf); 3540 destroy_workqueue(hclgevf_wq); 3541 } 3542 module_init(hclgevf_init); 3543 module_exit(hclgevf_exit); 3544 3545 MODULE_LICENSE("GPL"); 3546 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3547 MODULE_DESCRIPTION("HCLGEVF Driver"); 3548 MODULE_VERSION(HCLGEVF_MOD_VERSION); 3549