1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 #define HCLGEVF_RESET_MAX_FAIL_CNT 5 15 16 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 17 static struct hnae3_ae_algo ae_algovf; 18 19 static struct workqueue_struct *hclgevf_wq; 20 21 static const struct pci_device_id ae_algovf_pci_tbl[] = { 22 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, 23 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 24 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 25 /* required last entry */ 26 {0, } 27 }; 28 29 static const u8 hclgevf_hash_key[] = { 30 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 31 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 32 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 33 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 34 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 35 }; 36 37 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 38 39 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 40 HCLGEVF_CMDQ_TX_ADDR_H_REG, 41 HCLGEVF_CMDQ_TX_DEPTH_REG, 42 HCLGEVF_CMDQ_TX_TAIL_REG, 43 HCLGEVF_CMDQ_TX_HEAD_REG, 44 HCLGEVF_CMDQ_RX_ADDR_L_REG, 45 HCLGEVF_CMDQ_RX_ADDR_H_REG, 46 HCLGEVF_CMDQ_RX_DEPTH_REG, 47 HCLGEVF_CMDQ_RX_TAIL_REG, 48 HCLGEVF_CMDQ_RX_HEAD_REG, 49 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 50 HCLGEVF_VECTOR0_CMDQ_STATE_REG, 51 HCLGEVF_CMDQ_INTR_EN_REG, 52 HCLGEVF_CMDQ_INTR_GEN_REG}; 53 54 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 55 HCLGEVF_RST_ING, 56 HCLGEVF_GRO_EN_REG}; 57 58 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 59 HCLGEVF_RING_RX_ADDR_H_REG, 60 HCLGEVF_RING_RX_BD_NUM_REG, 61 HCLGEVF_RING_RX_BD_LENGTH_REG, 62 HCLGEVF_RING_RX_MERGE_EN_REG, 63 HCLGEVF_RING_RX_TAIL_REG, 64 HCLGEVF_RING_RX_HEAD_REG, 65 HCLGEVF_RING_RX_FBD_NUM_REG, 66 HCLGEVF_RING_RX_OFFSET_REG, 67 HCLGEVF_RING_RX_FBD_OFFSET_REG, 68 HCLGEVF_RING_RX_STASH_REG, 69 HCLGEVF_RING_RX_BD_ERR_REG, 70 HCLGEVF_RING_TX_ADDR_L_REG, 71 HCLGEVF_RING_TX_ADDR_H_REG, 72 HCLGEVF_RING_TX_BD_NUM_REG, 73 HCLGEVF_RING_TX_PRIORITY_REG, 74 HCLGEVF_RING_TX_TC_REG, 75 HCLGEVF_RING_TX_MERGE_EN_REG, 76 HCLGEVF_RING_TX_TAIL_REG, 77 HCLGEVF_RING_TX_HEAD_REG, 78 HCLGEVF_RING_TX_FBD_NUM_REG, 79 HCLGEVF_RING_TX_OFFSET_REG, 80 HCLGEVF_RING_TX_EBD_NUM_REG, 81 HCLGEVF_RING_TX_EBD_OFFSET_REG, 82 HCLGEVF_RING_TX_BD_ERR_REG, 83 HCLGEVF_RING_EN_REG}; 84 85 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 86 HCLGEVF_TQP_INTR_GL0_REG, 87 HCLGEVF_TQP_INTR_GL1_REG, 88 HCLGEVF_TQP_INTR_GL2_REG, 89 HCLGEVF_TQP_INTR_RL_REG}; 90 91 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 92 { 93 if (!handle->client) 94 return container_of(handle, struct hclgevf_dev, nic); 95 else if (handle->client->type == HNAE3_CLIENT_ROCE) 96 return container_of(handle, struct hclgevf_dev, roce); 97 else 98 return container_of(handle, struct hclgevf_dev, nic); 99 } 100 101 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 102 { 103 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 104 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 105 struct hclgevf_desc desc; 106 struct hclgevf_tqp *tqp; 107 int status; 108 int i; 109 110 for (i = 0; i < kinfo->num_tqps; i++) { 111 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 112 hclgevf_cmd_setup_basic_desc(&desc, 113 HCLGEVF_OPC_QUERY_RX_STATUS, 114 true); 115 116 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 117 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 118 if (status) { 119 dev_err(&hdev->pdev->dev, 120 "Query tqp stat fail, status = %d,queue = %d\n", 121 status, i); 122 return status; 123 } 124 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 125 le32_to_cpu(desc.data[1]); 126 127 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 128 true); 129 130 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 131 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 132 if (status) { 133 dev_err(&hdev->pdev->dev, 134 "Query tqp stat fail, status = %d,queue = %d\n", 135 status, i); 136 return status; 137 } 138 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 139 le32_to_cpu(desc.data[1]); 140 } 141 142 return 0; 143 } 144 145 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 146 { 147 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 148 struct hclgevf_tqp *tqp; 149 u64 *buff = data; 150 int i; 151 152 for (i = 0; i < kinfo->num_tqps; i++) { 153 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 154 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 155 } 156 for (i = 0; i < kinfo->num_tqps; i++) { 157 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 158 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 159 } 160 161 return buff; 162 } 163 164 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 165 { 166 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 167 168 return kinfo->num_tqps * 2; 169 } 170 171 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 172 { 173 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 174 u8 *buff = data; 175 int i; 176 177 for (i = 0; i < kinfo->num_tqps; i++) { 178 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 179 struct hclgevf_tqp, q); 180 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 181 tqp->index); 182 buff += ETH_GSTRING_LEN; 183 } 184 185 for (i = 0; i < kinfo->num_tqps; i++) { 186 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 187 struct hclgevf_tqp, q); 188 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 189 tqp->index); 190 buff += ETH_GSTRING_LEN; 191 } 192 193 return buff; 194 } 195 196 static void hclgevf_update_stats(struct hnae3_handle *handle, 197 struct net_device_stats *net_stats) 198 { 199 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 200 int status; 201 202 status = hclgevf_tqps_update_stats(handle); 203 if (status) 204 dev_err(&hdev->pdev->dev, 205 "VF update of TQPS stats fail, status = %d.\n", 206 status); 207 } 208 209 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 210 { 211 if (strset == ETH_SS_TEST) 212 return -EOPNOTSUPP; 213 else if (strset == ETH_SS_STATS) 214 return hclgevf_tqps_get_sset_count(handle, strset); 215 216 return 0; 217 } 218 219 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 220 u8 *data) 221 { 222 u8 *p = (char *)data; 223 224 if (strset == ETH_SS_STATS) 225 p = hclgevf_tqps_get_strings(handle, p); 226 } 227 228 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 229 { 230 hclgevf_tqps_get_stats(handle, data); 231 } 232 233 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 234 u8 subcode) 235 { 236 if (msg) { 237 memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 238 msg->code = code; 239 msg->subcode = subcode; 240 } 241 } 242 243 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 244 { 245 struct hclge_vf_to_pf_msg send_msg; 246 u8 resp_msg; 247 int status; 248 249 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0); 250 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 251 sizeof(resp_msg)); 252 if (status) { 253 dev_err(&hdev->pdev->dev, 254 "VF request to get TC info from PF failed %d", 255 status); 256 return status; 257 } 258 259 hdev->hw_tc_map = resp_msg; 260 261 return 0; 262 } 263 264 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 265 { 266 struct hnae3_handle *nic = &hdev->nic; 267 struct hclge_vf_to_pf_msg send_msg; 268 u8 resp_msg; 269 int ret; 270 271 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 272 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 273 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 274 sizeof(u8)); 275 if (ret) { 276 dev_err(&hdev->pdev->dev, 277 "VF request to get port based vlan state failed %d", 278 ret); 279 return ret; 280 } 281 282 nic->port_base_vlan_state = resp_msg; 283 284 return 0; 285 } 286 287 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 288 { 289 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 290 #define HCLGEVF_TQPS_ALLOC_OFFSET 0 291 #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 292 #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 293 294 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 295 struct hclge_vf_to_pf_msg send_msg; 296 int status; 297 298 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 299 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 300 HCLGEVF_TQPS_RSS_INFO_LEN); 301 if (status) { 302 dev_err(&hdev->pdev->dev, 303 "VF request to get tqp info from PF failed %d", 304 status); 305 return status; 306 } 307 308 memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], 309 sizeof(u16)); 310 memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], 311 sizeof(u16)); 312 memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], 313 sizeof(u16)); 314 315 return 0; 316 } 317 318 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 319 { 320 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 321 #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 322 #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 323 324 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 325 struct hclge_vf_to_pf_msg send_msg; 326 int ret; 327 328 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 329 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 330 HCLGEVF_TQPS_DEPTH_INFO_LEN); 331 if (ret) { 332 dev_err(&hdev->pdev->dev, 333 "VF request to get tqp depth info from PF failed %d", 334 ret); 335 return ret; 336 } 337 338 memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], 339 sizeof(u16)); 340 memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], 341 sizeof(u16)); 342 343 return 0; 344 } 345 346 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 347 { 348 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 349 struct hclge_vf_to_pf_msg send_msg; 350 u16 qid_in_pf = 0; 351 u8 resp_data[2]; 352 int ret; 353 354 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 355 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 356 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 357 sizeof(resp_data)); 358 if (!ret) 359 qid_in_pf = *(u16 *)resp_data; 360 361 return qid_in_pf; 362 } 363 364 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 365 { 366 struct hclge_vf_to_pf_msg send_msg; 367 u8 resp_msg[2]; 368 int ret; 369 370 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 371 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 372 sizeof(resp_msg)); 373 if (ret) { 374 dev_err(&hdev->pdev->dev, 375 "VF request to get the pf port media type failed %d", 376 ret); 377 return ret; 378 } 379 380 hdev->hw.mac.media_type = resp_msg[0]; 381 hdev->hw.mac.module_type = resp_msg[1]; 382 383 return 0; 384 } 385 386 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 387 { 388 struct hclgevf_tqp *tqp; 389 int i; 390 391 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 392 sizeof(struct hclgevf_tqp), GFP_KERNEL); 393 if (!hdev->htqp) 394 return -ENOMEM; 395 396 tqp = hdev->htqp; 397 398 for (i = 0; i < hdev->num_tqps; i++) { 399 tqp->dev = &hdev->pdev->dev; 400 tqp->index = i; 401 402 tqp->q.ae_algo = &ae_algovf; 403 tqp->q.buf_size = hdev->rx_buf_len; 404 tqp->q.tx_desc_num = hdev->num_tx_desc; 405 tqp->q.rx_desc_num = hdev->num_rx_desc; 406 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 407 i * HCLGEVF_TQP_REG_SIZE; 408 409 tqp++; 410 } 411 412 return 0; 413 } 414 415 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 416 { 417 struct hnae3_handle *nic = &hdev->nic; 418 struct hnae3_knic_private_info *kinfo; 419 u16 new_tqps = hdev->num_tqps; 420 unsigned int i; 421 422 kinfo = &nic->kinfo; 423 kinfo->num_tc = 0; 424 kinfo->num_tx_desc = hdev->num_tx_desc; 425 kinfo->num_rx_desc = hdev->num_rx_desc; 426 kinfo->rx_buf_len = hdev->rx_buf_len; 427 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 428 if (hdev->hw_tc_map & BIT(i)) 429 kinfo->num_tc++; 430 431 kinfo->rss_size 432 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 433 new_tqps = kinfo->rss_size * kinfo->num_tc; 434 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 435 436 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 437 sizeof(struct hnae3_queue *), GFP_KERNEL); 438 if (!kinfo->tqp) 439 return -ENOMEM; 440 441 for (i = 0; i < kinfo->num_tqps; i++) { 442 hdev->htqp[i].q.handle = &hdev->nic; 443 hdev->htqp[i].q.tqp_index = i; 444 kinfo->tqp[i] = &hdev->htqp[i].q; 445 } 446 447 /* after init the max rss_size and tqps, adjust the default tqp numbers 448 * and rss size with the actual vector numbers 449 */ 450 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 451 kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc, 452 kinfo->rss_size); 453 454 return 0; 455 } 456 457 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 458 { 459 struct hclge_vf_to_pf_msg send_msg; 460 int status; 461 462 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 463 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 464 if (status) 465 dev_err(&hdev->pdev->dev, 466 "VF failed to fetch link status(%d) from PF", status); 467 } 468 469 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 470 { 471 struct hnae3_handle *rhandle = &hdev->roce; 472 struct hnae3_handle *handle = &hdev->nic; 473 struct hnae3_client *rclient; 474 struct hnae3_client *client; 475 476 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 477 return; 478 479 client = handle->client; 480 rclient = hdev->roce_client; 481 482 link_state = 483 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 484 485 if (link_state != hdev->hw.mac.link) { 486 client->ops->link_status_change(handle, !!link_state); 487 if (rclient && rclient->ops->link_status_change) 488 rclient->ops->link_status_change(rhandle, !!link_state); 489 hdev->hw.mac.link = link_state; 490 } 491 492 clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 493 } 494 495 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 496 { 497 #define HCLGEVF_ADVERTISING 0 498 #define HCLGEVF_SUPPORTED 1 499 500 struct hclge_vf_to_pf_msg send_msg; 501 502 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 503 send_msg.data[0] = HCLGEVF_ADVERTISING; 504 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 505 send_msg.data[0] = HCLGEVF_SUPPORTED; 506 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 507 } 508 509 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 510 { 511 struct hnae3_handle *nic = &hdev->nic; 512 int ret; 513 514 nic->ae_algo = &ae_algovf; 515 nic->pdev = hdev->pdev; 516 nic->numa_node_mask = hdev->numa_node_mask; 517 nic->flags |= HNAE3_SUPPORT_VF; 518 519 ret = hclgevf_knic_setup(hdev); 520 if (ret) 521 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 522 ret); 523 return ret; 524 } 525 526 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 527 { 528 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 529 dev_warn(&hdev->pdev->dev, 530 "vector(vector_id %d) has been freed.\n", vector_id); 531 return; 532 } 533 534 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 535 hdev->num_msi_left += 1; 536 hdev->num_msi_used -= 1; 537 } 538 539 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 540 struct hnae3_vector_info *vector_info) 541 { 542 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 543 struct hnae3_vector_info *vector = vector_info; 544 int alloc = 0; 545 int i, j; 546 547 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 548 vector_num = min(hdev->num_msi_left, vector_num); 549 550 for (j = 0; j < vector_num; j++) { 551 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 552 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 553 vector->vector = pci_irq_vector(hdev->pdev, i); 554 vector->io_addr = hdev->hw.io_base + 555 HCLGEVF_VECTOR_REG_BASE + 556 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 557 hdev->vector_status[i] = 0; 558 hdev->vector_irq[i] = vector->vector; 559 560 vector++; 561 alloc++; 562 563 break; 564 } 565 } 566 } 567 hdev->num_msi_left -= alloc; 568 hdev->num_msi_used += alloc; 569 570 return alloc; 571 } 572 573 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 574 { 575 int i; 576 577 for (i = 0; i < hdev->num_msi; i++) 578 if (vector == hdev->vector_irq[i]) 579 return i; 580 581 return -EINVAL; 582 } 583 584 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 585 const u8 hfunc, const u8 *key) 586 { 587 struct hclgevf_rss_config_cmd *req; 588 unsigned int key_offset = 0; 589 struct hclgevf_desc desc; 590 int key_counts; 591 int key_size; 592 int ret; 593 594 key_counts = HCLGEVF_RSS_KEY_SIZE; 595 req = (struct hclgevf_rss_config_cmd *)desc.data; 596 597 while (key_counts) { 598 hclgevf_cmd_setup_basic_desc(&desc, 599 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 600 false); 601 602 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 603 req->hash_config |= 604 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 605 606 key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 607 memcpy(req->hash_key, 608 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 609 610 key_counts -= key_size; 611 key_offset++; 612 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 613 if (ret) { 614 dev_err(&hdev->pdev->dev, 615 "Configure RSS config fail, status = %d\n", 616 ret); 617 return ret; 618 } 619 } 620 621 return 0; 622 } 623 624 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 625 { 626 return HCLGEVF_RSS_KEY_SIZE; 627 } 628 629 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 630 { 631 return HCLGEVF_RSS_IND_TBL_SIZE; 632 } 633 634 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 635 { 636 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 637 struct hclgevf_rss_indirection_table_cmd *req; 638 struct hclgevf_desc desc; 639 int status; 640 int i, j; 641 642 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 643 644 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 645 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 646 false); 647 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 648 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 649 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 650 req->rss_result[j] = 651 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 652 653 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 654 if (status) { 655 dev_err(&hdev->pdev->dev, 656 "VF failed(=%d) to set RSS indirection table\n", 657 status); 658 return status; 659 } 660 } 661 662 return 0; 663 } 664 665 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 666 { 667 struct hclgevf_rss_tc_mode_cmd *req; 668 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 669 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 670 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 671 struct hclgevf_desc desc; 672 u16 roundup_size; 673 unsigned int i; 674 int status; 675 676 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 677 678 roundup_size = roundup_pow_of_two(rss_size); 679 roundup_size = ilog2(roundup_size); 680 681 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 682 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 683 tc_size[i] = roundup_size; 684 tc_offset[i] = rss_size * i; 685 } 686 687 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 688 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 689 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 690 (tc_valid[i] & 0x1)); 691 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 692 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 693 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 694 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 695 } 696 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 697 if (status) 698 dev_err(&hdev->pdev->dev, 699 "VF failed(=%d) to set rss tc mode\n", status); 700 701 return status; 702 } 703 704 /* for revision 0x20, vf shared the same rss config with pf */ 705 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 706 { 707 #define HCLGEVF_RSS_MBX_RESP_LEN 8 708 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 709 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 710 struct hclge_vf_to_pf_msg send_msg; 711 u16 msg_num, hash_key_index; 712 u8 index; 713 int ret; 714 715 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 716 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 717 HCLGEVF_RSS_MBX_RESP_LEN; 718 for (index = 0; index < msg_num; index++) { 719 send_msg.data[0] = index; 720 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 721 HCLGEVF_RSS_MBX_RESP_LEN); 722 if (ret) { 723 dev_err(&hdev->pdev->dev, 724 "VF get rss hash key from PF failed, ret=%d", 725 ret); 726 return ret; 727 } 728 729 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 730 if (index == msg_num - 1) 731 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 732 &resp_msg[0], 733 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 734 else 735 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 736 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 737 } 738 739 return 0; 740 } 741 742 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 743 u8 *hfunc) 744 { 745 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 746 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 747 int i, ret; 748 749 if (handle->pdev->revision >= 0x21) { 750 /* Get hash algorithm */ 751 if (hfunc) { 752 switch (rss_cfg->hash_algo) { 753 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 754 *hfunc = ETH_RSS_HASH_TOP; 755 break; 756 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 757 *hfunc = ETH_RSS_HASH_XOR; 758 break; 759 default: 760 *hfunc = ETH_RSS_HASH_UNKNOWN; 761 break; 762 } 763 } 764 765 /* Get the RSS Key required by the user */ 766 if (key) 767 memcpy(key, rss_cfg->rss_hash_key, 768 HCLGEVF_RSS_KEY_SIZE); 769 } else { 770 if (hfunc) 771 *hfunc = ETH_RSS_HASH_TOP; 772 if (key) { 773 ret = hclgevf_get_rss_hash_key(hdev); 774 if (ret) 775 return ret; 776 memcpy(key, rss_cfg->rss_hash_key, 777 HCLGEVF_RSS_KEY_SIZE); 778 } 779 } 780 781 if (indir) 782 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 783 indir[i] = rss_cfg->rss_indirection_tbl[i]; 784 785 return 0; 786 } 787 788 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 789 const u8 *key, const u8 hfunc) 790 { 791 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 792 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 793 int ret, i; 794 795 if (handle->pdev->revision >= 0x21) { 796 /* Set the RSS Hash Key if specififed by the user */ 797 if (key) { 798 switch (hfunc) { 799 case ETH_RSS_HASH_TOP: 800 rss_cfg->hash_algo = 801 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 802 break; 803 case ETH_RSS_HASH_XOR: 804 rss_cfg->hash_algo = 805 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 806 break; 807 case ETH_RSS_HASH_NO_CHANGE: 808 break; 809 default: 810 return -EINVAL; 811 } 812 813 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 814 key); 815 if (ret) 816 return ret; 817 818 /* Update the shadow RSS key with user specified qids */ 819 memcpy(rss_cfg->rss_hash_key, key, 820 HCLGEVF_RSS_KEY_SIZE); 821 } 822 } 823 824 /* update the shadow RSS table with user specified qids */ 825 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 826 rss_cfg->rss_indirection_tbl[i] = indir[i]; 827 828 /* update the hardware */ 829 return hclgevf_set_rss_indir_table(hdev); 830 } 831 832 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 833 { 834 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 835 836 if (nfc->data & RXH_L4_B_2_3) 837 hash_sets |= HCLGEVF_D_PORT_BIT; 838 else 839 hash_sets &= ~HCLGEVF_D_PORT_BIT; 840 841 if (nfc->data & RXH_IP_SRC) 842 hash_sets |= HCLGEVF_S_IP_BIT; 843 else 844 hash_sets &= ~HCLGEVF_S_IP_BIT; 845 846 if (nfc->data & RXH_IP_DST) 847 hash_sets |= HCLGEVF_D_IP_BIT; 848 else 849 hash_sets &= ~HCLGEVF_D_IP_BIT; 850 851 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 852 hash_sets |= HCLGEVF_V_TAG_BIT; 853 854 return hash_sets; 855 } 856 857 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 858 struct ethtool_rxnfc *nfc) 859 { 860 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 861 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 862 struct hclgevf_rss_input_tuple_cmd *req; 863 struct hclgevf_desc desc; 864 u8 tuple_sets; 865 int ret; 866 867 if (handle->pdev->revision == 0x20) 868 return -EOPNOTSUPP; 869 870 if (nfc->data & 871 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 872 return -EINVAL; 873 874 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 875 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 876 877 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 878 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 879 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 880 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 881 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 882 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 883 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 884 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 885 886 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 887 switch (nfc->flow_type) { 888 case TCP_V4_FLOW: 889 req->ipv4_tcp_en = tuple_sets; 890 break; 891 case TCP_V6_FLOW: 892 req->ipv6_tcp_en = tuple_sets; 893 break; 894 case UDP_V4_FLOW: 895 req->ipv4_udp_en = tuple_sets; 896 break; 897 case UDP_V6_FLOW: 898 req->ipv6_udp_en = tuple_sets; 899 break; 900 case SCTP_V4_FLOW: 901 req->ipv4_sctp_en = tuple_sets; 902 break; 903 case SCTP_V6_FLOW: 904 if ((nfc->data & RXH_L4_B_0_1) || 905 (nfc->data & RXH_L4_B_2_3)) 906 return -EINVAL; 907 908 req->ipv6_sctp_en = tuple_sets; 909 break; 910 case IPV4_FLOW: 911 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 912 break; 913 case IPV6_FLOW: 914 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 915 break; 916 default: 917 return -EINVAL; 918 } 919 920 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 921 if (ret) { 922 dev_err(&hdev->pdev->dev, 923 "Set rss tuple fail, status = %d\n", ret); 924 return ret; 925 } 926 927 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 928 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 929 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 930 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 931 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 932 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 933 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 934 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 935 return 0; 936 } 937 938 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 939 struct ethtool_rxnfc *nfc) 940 { 941 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 942 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 943 u8 tuple_sets; 944 945 if (handle->pdev->revision == 0x20) 946 return -EOPNOTSUPP; 947 948 nfc->data = 0; 949 950 switch (nfc->flow_type) { 951 case TCP_V4_FLOW: 952 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 953 break; 954 case UDP_V4_FLOW: 955 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 956 break; 957 case TCP_V6_FLOW: 958 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 959 break; 960 case UDP_V6_FLOW: 961 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 962 break; 963 case SCTP_V4_FLOW: 964 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 965 break; 966 case SCTP_V6_FLOW: 967 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 968 break; 969 case IPV4_FLOW: 970 case IPV6_FLOW: 971 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 972 break; 973 default: 974 return -EINVAL; 975 } 976 977 if (!tuple_sets) 978 return 0; 979 980 if (tuple_sets & HCLGEVF_D_PORT_BIT) 981 nfc->data |= RXH_L4_B_2_3; 982 if (tuple_sets & HCLGEVF_S_PORT_BIT) 983 nfc->data |= RXH_L4_B_0_1; 984 if (tuple_sets & HCLGEVF_D_IP_BIT) 985 nfc->data |= RXH_IP_DST; 986 if (tuple_sets & HCLGEVF_S_IP_BIT) 987 nfc->data |= RXH_IP_SRC; 988 989 return 0; 990 } 991 992 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 993 struct hclgevf_rss_cfg *rss_cfg) 994 { 995 struct hclgevf_rss_input_tuple_cmd *req; 996 struct hclgevf_desc desc; 997 int ret; 998 999 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 1000 1001 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 1002 1003 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 1004 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 1005 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 1006 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 1007 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 1008 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 1009 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 1010 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 1011 1012 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1013 if (ret) 1014 dev_err(&hdev->pdev->dev, 1015 "Configure rss input fail, status = %d\n", ret); 1016 return ret; 1017 } 1018 1019 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 1020 { 1021 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1022 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1023 1024 return rss_cfg->rss_size; 1025 } 1026 1027 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 1028 int vector_id, 1029 struct hnae3_ring_chain_node *ring_chain) 1030 { 1031 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1032 struct hclge_vf_to_pf_msg send_msg; 1033 struct hnae3_ring_chain_node *node; 1034 int status; 1035 int i = 0; 1036 1037 memset(&send_msg, 0, sizeof(send_msg)); 1038 send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 1039 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1040 send_msg.vector_id = vector_id; 1041 1042 for (node = ring_chain; node; node = node->next) { 1043 send_msg.param[i].ring_type = 1044 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1045 1046 send_msg.param[i].tqp_index = node->tqp_index; 1047 send_msg.param[i].int_gl_index = 1048 hnae3_get_field(node->int_gl_idx, 1049 HNAE3_RING_GL_IDX_M, 1050 HNAE3_RING_GL_IDX_S); 1051 1052 i++; 1053 if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 1054 send_msg.ring_num = i; 1055 1056 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 1057 NULL, 0); 1058 if (status) { 1059 dev_err(&hdev->pdev->dev, 1060 "Map TQP fail, status is %d.\n", 1061 status); 1062 return status; 1063 } 1064 i = 0; 1065 } 1066 } 1067 1068 return 0; 1069 } 1070 1071 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1072 struct hnae3_ring_chain_node *ring_chain) 1073 { 1074 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1075 int vector_id; 1076 1077 vector_id = hclgevf_get_vector_index(hdev, vector); 1078 if (vector_id < 0) { 1079 dev_err(&handle->pdev->dev, 1080 "Get vector index fail. ret =%d\n", vector_id); 1081 return vector_id; 1082 } 1083 1084 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1085 } 1086 1087 static int hclgevf_unmap_ring_from_vector( 1088 struct hnae3_handle *handle, 1089 int vector, 1090 struct hnae3_ring_chain_node *ring_chain) 1091 { 1092 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1093 int ret, vector_id; 1094 1095 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1096 return 0; 1097 1098 vector_id = hclgevf_get_vector_index(hdev, vector); 1099 if (vector_id < 0) { 1100 dev_err(&handle->pdev->dev, 1101 "Get vector index fail. ret =%d\n", vector_id); 1102 return vector_id; 1103 } 1104 1105 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1106 if (ret) 1107 dev_err(&handle->pdev->dev, 1108 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1109 vector_id, 1110 ret); 1111 1112 return ret; 1113 } 1114 1115 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1116 { 1117 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1118 int vector_id; 1119 1120 vector_id = hclgevf_get_vector_index(hdev, vector); 1121 if (vector_id < 0) { 1122 dev_err(&handle->pdev->dev, 1123 "hclgevf_put_vector get vector index fail. ret =%d\n", 1124 vector_id); 1125 return vector_id; 1126 } 1127 1128 hclgevf_free_vector(hdev, vector_id); 1129 1130 return 0; 1131 } 1132 1133 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1134 bool en_uc_pmc, bool en_mc_pmc, 1135 bool en_bc_pmc) 1136 { 1137 struct hclge_vf_to_pf_msg send_msg; 1138 int ret; 1139 1140 memset(&send_msg, 0, sizeof(send_msg)); 1141 send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 1142 send_msg.en_bc = en_bc_pmc ? 1 : 0; 1143 send_msg.en_uc = en_uc_pmc ? 1 : 0; 1144 send_msg.en_mc = en_mc_pmc ? 1 : 0; 1145 1146 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1147 if (ret) 1148 dev_err(&hdev->pdev->dev, 1149 "Set promisc mode fail, status is %d.\n", ret); 1150 1151 return ret; 1152 } 1153 1154 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 1155 bool en_mc_pmc) 1156 { 1157 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1158 struct pci_dev *pdev = hdev->pdev; 1159 bool en_bc_pmc; 1160 1161 en_bc_pmc = pdev->revision != 0x20; 1162 1163 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 1164 en_bc_pmc); 1165 } 1166 1167 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) 1168 { 1169 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1170 1171 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1172 } 1173 1174 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) 1175 { 1176 struct hnae3_handle *handle = &hdev->nic; 1177 bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; 1178 bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; 1179 int ret; 1180 1181 if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { 1182 ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); 1183 if (!ret) 1184 clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1185 } 1186 } 1187 1188 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, 1189 int stream_id, bool enable) 1190 { 1191 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1192 struct hclgevf_desc desc; 1193 int status; 1194 1195 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1196 1197 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1198 false); 1199 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1200 req->stream_id = cpu_to_le16(stream_id); 1201 if (enable) 1202 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1203 1204 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1205 if (status) 1206 dev_err(&hdev->pdev->dev, 1207 "TQP enable fail, status =%d.\n", status); 1208 1209 return status; 1210 } 1211 1212 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1213 { 1214 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1215 struct hclgevf_tqp *tqp; 1216 int i; 1217 1218 for (i = 0; i < kinfo->num_tqps; i++) { 1219 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1220 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1221 } 1222 } 1223 1224 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 1225 { 1226 struct hclge_vf_to_pf_msg send_msg; 1227 u8 host_mac[ETH_ALEN]; 1228 int status; 1229 1230 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 1231 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 1232 ETH_ALEN); 1233 if (status) { 1234 dev_err(&hdev->pdev->dev, 1235 "fail to get VF MAC from host %d", status); 1236 return status; 1237 } 1238 1239 ether_addr_copy(p, host_mac); 1240 1241 return 0; 1242 } 1243 1244 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1245 { 1246 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1247 u8 host_mac_addr[ETH_ALEN]; 1248 1249 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 1250 return; 1251 1252 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 1253 if (hdev->has_pf_mac) 1254 ether_addr_copy(p, host_mac_addr); 1255 else 1256 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1257 } 1258 1259 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1260 bool is_first) 1261 { 1262 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1263 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1264 struct hclge_vf_to_pf_msg send_msg; 1265 u8 *new_mac_addr = (u8 *)p; 1266 int status; 1267 1268 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 1269 send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1270 ether_addr_copy(send_msg.data, new_mac_addr); 1271 if (is_first && !hdev->has_pf_mac) 1272 eth_zero_addr(&send_msg.data[ETH_ALEN]); 1273 else 1274 ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 1275 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1276 if (!status) 1277 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1278 1279 return status; 1280 } 1281 1282 static struct hclgevf_mac_addr_node * 1283 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) 1284 { 1285 struct hclgevf_mac_addr_node *mac_node, *tmp; 1286 1287 list_for_each_entry_safe(mac_node, tmp, list, node) 1288 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 1289 return mac_node; 1290 1291 return NULL; 1292 } 1293 1294 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, 1295 enum HCLGEVF_MAC_NODE_STATE state) 1296 { 1297 switch (state) { 1298 /* from set_rx_mode or tmp_add_list */ 1299 case HCLGEVF_MAC_TO_ADD: 1300 if (mac_node->state == HCLGEVF_MAC_TO_DEL) 1301 mac_node->state = HCLGEVF_MAC_ACTIVE; 1302 break; 1303 /* only from set_rx_mode */ 1304 case HCLGEVF_MAC_TO_DEL: 1305 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1306 list_del(&mac_node->node); 1307 kfree(mac_node); 1308 } else { 1309 mac_node->state = HCLGEVF_MAC_TO_DEL; 1310 } 1311 break; 1312 /* only from tmp_add_list, the mac_node->state won't be 1313 * HCLGEVF_MAC_ACTIVE 1314 */ 1315 case HCLGEVF_MAC_ACTIVE: 1316 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1317 mac_node->state = HCLGEVF_MAC_ACTIVE; 1318 break; 1319 } 1320 } 1321 1322 static int hclgevf_update_mac_list(struct hnae3_handle *handle, 1323 enum HCLGEVF_MAC_NODE_STATE state, 1324 enum HCLGEVF_MAC_ADDR_TYPE mac_type, 1325 const unsigned char *addr) 1326 { 1327 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1328 struct hclgevf_mac_addr_node *mac_node; 1329 struct list_head *list; 1330 1331 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1332 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1333 1334 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1335 1336 /* if the mac addr is already in the mac list, no need to add a new 1337 * one into it, just check the mac addr state, convert it to a new 1338 * new state, or just remove it, or do nothing. 1339 */ 1340 mac_node = hclgevf_find_mac_node(list, addr); 1341 if (mac_node) { 1342 hclgevf_update_mac_node(mac_node, state); 1343 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1344 return 0; 1345 } 1346 /* if this address is never added, unnecessary to delete */ 1347 if (state == HCLGEVF_MAC_TO_DEL) { 1348 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1349 return -ENOENT; 1350 } 1351 1352 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 1353 if (!mac_node) { 1354 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1355 return -ENOMEM; 1356 } 1357 1358 mac_node->state = state; 1359 ether_addr_copy(mac_node->mac_addr, addr); 1360 list_add_tail(&mac_node->node, list); 1361 1362 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1363 return 0; 1364 } 1365 1366 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1367 const unsigned char *addr) 1368 { 1369 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1370 HCLGEVF_MAC_ADDR_UC, addr); 1371 } 1372 1373 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1374 const unsigned char *addr) 1375 { 1376 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1377 HCLGEVF_MAC_ADDR_UC, addr); 1378 } 1379 1380 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1381 const unsigned char *addr) 1382 { 1383 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1384 HCLGEVF_MAC_ADDR_MC, addr); 1385 } 1386 1387 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1388 const unsigned char *addr) 1389 { 1390 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1391 HCLGEVF_MAC_ADDR_MC, addr); 1392 } 1393 1394 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, 1395 struct hclgevf_mac_addr_node *mac_node, 1396 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1397 { 1398 struct hclge_vf_to_pf_msg send_msg; 1399 u8 code, subcode; 1400 1401 if (mac_type == HCLGEVF_MAC_ADDR_UC) { 1402 code = HCLGE_MBX_SET_UNICAST; 1403 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1404 subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; 1405 else 1406 subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; 1407 } else { 1408 code = HCLGE_MBX_SET_MULTICAST; 1409 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1410 subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; 1411 else 1412 subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; 1413 } 1414 1415 hclgevf_build_send_msg(&send_msg, code, subcode); 1416 ether_addr_copy(send_msg.data, mac_node->mac_addr); 1417 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1418 } 1419 1420 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, 1421 struct list_head *list, 1422 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1423 { 1424 struct hclgevf_mac_addr_node *mac_node, *tmp; 1425 int ret; 1426 1427 list_for_each_entry_safe(mac_node, tmp, list, node) { 1428 ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); 1429 if (ret) { 1430 dev_err(&hdev->pdev->dev, 1431 "failed to configure mac %pM, state = %d, ret = %d\n", 1432 mac_node->mac_addr, mac_node->state, ret); 1433 return; 1434 } 1435 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1436 mac_node->state = HCLGEVF_MAC_ACTIVE; 1437 } else { 1438 list_del(&mac_node->node); 1439 kfree(mac_node); 1440 } 1441 } 1442 } 1443 1444 static void hclgevf_sync_from_add_list(struct list_head *add_list, 1445 struct list_head *mac_list) 1446 { 1447 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1448 1449 list_for_each_entry_safe(mac_node, tmp, add_list, node) { 1450 /* if the mac address from tmp_add_list is not in the 1451 * uc/mc_mac_list, it means have received a TO_DEL request 1452 * during the time window of sending mac config request to PF 1453 * If mac_node state is ACTIVE, then change its state to TO_DEL, 1454 * then it will be removed at next time. If is TO_ADD, it means 1455 * send TO_ADD request failed, so just remove the mac node. 1456 */ 1457 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1458 if (new_node) { 1459 hclgevf_update_mac_node(new_node, mac_node->state); 1460 list_del(&mac_node->node); 1461 kfree(mac_node); 1462 } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { 1463 mac_node->state = HCLGEVF_MAC_TO_DEL; 1464 list_del(&mac_node->node); 1465 list_add_tail(&mac_node->node, mac_list); 1466 } else { 1467 list_del(&mac_node->node); 1468 kfree(mac_node); 1469 } 1470 } 1471 } 1472 1473 static void hclgevf_sync_from_del_list(struct list_head *del_list, 1474 struct list_head *mac_list) 1475 { 1476 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1477 1478 list_for_each_entry_safe(mac_node, tmp, del_list, node) { 1479 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1480 if (new_node) { 1481 /* If the mac addr is exist in the mac list, it means 1482 * received a new request TO_ADD during the time window 1483 * of sending mac addr configurrequest to PF, so just 1484 * change the mac state to ACTIVE. 1485 */ 1486 new_node->state = HCLGEVF_MAC_ACTIVE; 1487 list_del(&mac_node->node); 1488 kfree(mac_node); 1489 } else { 1490 list_del(&mac_node->node); 1491 list_add_tail(&mac_node->node, mac_list); 1492 } 1493 } 1494 } 1495 1496 static void hclgevf_clear_list(struct list_head *list) 1497 { 1498 struct hclgevf_mac_addr_node *mac_node, *tmp; 1499 1500 list_for_each_entry_safe(mac_node, tmp, list, node) { 1501 list_del(&mac_node->node); 1502 kfree(mac_node); 1503 } 1504 } 1505 1506 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, 1507 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1508 { 1509 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1510 struct list_head tmp_add_list, tmp_del_list; 1511 struct list_head *list; 1512 1513 INIT_LIST_HEAD(&tmp_add_list); 1514 INIT_LIST_HEAD(&tmp_del_list); 1515 1516 /* move the mac addr to the tmp_add_list and tmp_del_list, then 1517 * we can add/delete these mac addr outside the spin lock 1518 */ 1519 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1520 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1521 1522 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1523 1524 list_for_each_entry_safe(mac_node, tmp, list, node) { 1525 switch (mac_node->state) { 1526 case HCLGEVF_MAC_TO_DEL: 1527 list_del(&mac_node->node); 1528 list_add_tail(&mac_node->node, &tmp_del_list); 1529 break; 1530 case HCLGEVF_MAC_TO_ADD: 1531 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 1532 if (!new_node) 1533 goto stop_traverse; 1534 1535 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 1536 new_node->state = mac_node->state; 1537 list_add_tail(&new_node->node, &tmp_add_list); 1538 break; 1539 default: 1540 break; 1541 } 1542 } 1543 1544 stop_traverse: 1545 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1546 1547 /* delete first, in order to get max mac table space for adding */ 1548 hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); 1549 hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); 1550 1551 /* if some mac addresses were added/deleted fail, move back to the 1552 * mac_list, and retry at next time. 1553 */ 1554 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1555 1556 hclgevf_sync_from_del_list(&tmp_del_list, list); 1557 hclgevf_sync_from_add_list(&tmp_add_list, list); 1558 1559 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1560 } 1561 1562 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) 1563 { 1564 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); 1565 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); 1566 } 1567 1568 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) 1569 { 1570 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1571 1572 hclgevf_clear_list(&hdev->mac_table.uc_mac_list); 1573 hclgevf_clear_list(&hdev->mac_table.mc_mac_list); 1574 1575 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1576 } 1577 1578 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1579 __be16 proto, u16 vlan_id, 1580 bool is_kill) 1581 { 1582 #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 1583 #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 1584 #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 1585 1586 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1587 struct hclge_vf_to_pf_msg send_msg; 1588 int ret; 1589 1590 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1591 return -EINVAL; 1592 1593 if (proto != htons(ETH_P_8021Q)) 1594 return -EPROTONOSUPPORT; 1595 1596 /* When device is resetting or reset failed, firmware is unable to 1597 * handle mailbox. Just record the vlan id, and remove it after 1598 * reset finished. 1599 */ 1600 if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 1601 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { 1602 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1603 return -EBUSY; 1604 } 1605 1606 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1607 HCLGE_MBX_VLAN_FILTER); 1608 send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; 1609 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, 1610 sizeof(vlan_id)); 1611 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, 1612 sizeof(proto)); 1613 /* when remove hw vlan filter failed, record the vlan id, 1614 * and try to remove it from hw later, to be consistence 1615 * with stack. 1616 */ 1617 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1618 if (is_kill && ret) 1619 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1620 1621 return ret; 1622 } 1623 1624 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1625 { 1626 #define HCLGEVF_MAX_SYNC_COUNT 60 1627 struct hnae3_handle *handle = &hdev->nic; 1628 int ret, sync_cnt = 0; 1629 u16 vlan_id; 1630 1631 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1632 while (vlan_id != VLAN_N_VID) { 1633 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1634 vlan_id, true); 1635 if (ret) 1636 return; 1637 1638 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1639 sync_cnt++; 1640 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1641 return; 1642 1643 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1644 } 1645 } 1646 1647 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1648 { 1649 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1650 struct hclge_vf_to_pf_msg send_msg; 1651 1652 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1653 HCLGE_MBX_VLAN_RX_OFF_CFG); 1654 send_msg.data[0] = enable ? 1 : 0; 1655 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1656 } 1657 1658 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1659 { 1660 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1661 struct hclge_vf_to_pf_msg send_msg; 1662 int ret; 1663 1664 /* disable vf queue before send queue reset msg to PF */ 1665 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1666 if (ret) 1667 return ret; 1668 1669 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1670 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 1671 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1672 } 1673 1674 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1675 { 1676 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1677 struct hclge_vf_to_pf_msg send_msg; 1678 1679 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1680 memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); 1681 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1682 } 1683 1684 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1685 enum hnae3_reset_notify_type type) 1686 { 1687 struct hnae3_client *client = hdev->nic_client; 1688 struct hnae3_handle *handle = &hdev->nic; 1689 int ret; 1690 1691 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1692 !client) 1693 return 0; 1694 1695 if (!client->ops->reset_notify) 1696 return -EOPNOTSUPP; 1697 1698 ret = client->ops->reset_notify(handle, type); 1699 if (ret) 1700 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1701 type, ret); 1702 1703 return ret; 1704 } 1705 1706 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1707 { 1708 #define HCLGEVF_RESET_WAIT_US 20000 1709 #define HCLGEVF_RESET_WAIT_CNT 2000 1710 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1711 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1712 1713 u32 val; 1714 int ret; 1715 1716 if (hdev->reset_type == HNAE3_VF_RESET) 1717 ret = readl_poll_timeout(hdev->hw.io_base + 1718 HCLGEVF_VF_RST_ING, val, 1719 !(val & HCLGEVF_VF_RST_ING_BIT), 1720 HCLGEVF_RESET_WAIT_US, 1721 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1722 else 1723 ret = readl_poll_timeout(hdev->hw.io_base + 1724 HCLGEVF_RST_ING, val, 1725 !(val & HCLGEVF_RST_ING_BITS), 1726 HCLGEVF_RESET_WAIT_US, 1727 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1728 1729 /* hardware completion status should be available by this time */ 1730 if (ret) { 1731 dev_err(&hdev->pdev->dev, 1732 "couldn't get reset done status from h/w, timeout!\n"); 1733 return ret; 1734 } 1735 1736 /* we will wait a bit more to let reset of the stack to complete. This 1737 * might happen in case reset assertion was made by PF. Yes, this also 1738 * means we might end up waiting bit more even for VF reset. 1739 */ 1740 msleep(5000); 1741 1742 return 0; 1743 } 1744 1745 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 1746 { 1747 u32 reg_val; 1748 1749 reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); 1750 if (enable) 1751 reg_val |= HCLGEVF_NIC_SW_RST_RDY; 1752 else 1753 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 1754 1755 hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 1756 reg_val); 1757 } 1758 1759 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1760 { 1761 int ret; 1762 1763 /* uninitialize the nic client */ 1764 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1765 if (ret) 1766 return ret; 1767 1768 /* re-initialize the hclge device */ 1769 ret = hclgevf_reset_hdev(hdev); 1770 if (ret) { 1771 dev_err(&hdev->pdev->dev, 1772 "hclge device re-init failed, VF is disabled!\n"); 1773 return ret; 1774 } 1775 1776 /* bring up the nic client again */ 1777 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1778 if (ret) 1779 return ret; 1780 1781 /* clear handshake status with IMP */ 1782 hclgevf_reset_handshake(hdev, false); 1783 1784 /* bring up the nic to enable TX/RX again */ 1785 return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1786 } 1787 1788 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1789 { 1790 #define HCLGEVF_RESET_SYNC_TIME 100 1791 1792 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1793 struct hclge_vf_to_pf_msg send_msg; 1794 int ret; 1795 1796 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1797 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1798 if (ret) { 1799 dev_err(&hdev->pdev->dev, 1800 "failed to assert VF reset, ret = %d\n", ret); 1801 return ret; 1802 } 1803 hdev->rst_stats.vf_func_rst_cnt++; 1804 } 1805 1806 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1807 /* inform hardware that preparatory work is done */ 1808 msleep(HCLGEVF_RESET_SYNC_TIME); 1809 hclgevf_reset_handshake(hdev, true); 1810 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n", 1811 hdev->reset_type); 1812 1813 return 0; 1814 } 1815 1816 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 1817 { 1818 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 1819 hdev->rst_stats.vf_func_rst_cnt); 1820 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1821 hdev->rst_stats.flr_rst_cnt); 1822 dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 1823 hdev->rst_stats.vf_rst_cnt); 1824 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1825 hdev->rst_stats.rst_done_cnt); 1826 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1827 hdev->rst_stats.hw_rst_done_cnt); 1828 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1829 hdev->rst_stats.rst_cnt); 1830 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1831 hdev->rst_stats.rst_fail_cnt); 1832 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1833 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 1834 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 1835 hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG)); 1836 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1837 hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); 1838 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1839 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 1840 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1841 } 1842 1843 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1844 { 1845 /* recover handshake status with IMP when reset fail */ 1846 hclgevf_reset_handshake(hdev, true); 1847 hdev->rst_stats.rst_fail_cnt++; 1848 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1849 hdev->rst_stats.rst_fail_cnt); 1850 1851 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1852 set_bit(hdev->reset_type, &hdev->reset_pending); 1853 1854 if (hclgevf_is_reset_pending(hdev)) { 1855 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1856 hclgevf_reset_task_schedule(hdev); 1857 } else { 1858 set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1859 hclgevf_dump_rst_info(hdev); 1860 } 1861 } 1862 1863 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 1864 { 1865 int ret; 1866 1867 hdev->rst_stats.rst_cnt++; 1868 1869 rtnl_lock(); 1870 /* bring down the nic to stop any ongoing TX/RX */ 1871 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1872 rtnl_unlock(); 1873 if (ret) 1874 return ret; 1875 1876 return hclgevf_reset_prepare_wait(hdev); 1877 } 1878 1879 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 1880 { 1881 int ret; 1882 1883 hdev->rst_stats.hw_rst_done_cnt++; 1884 1885 rtnl_lock(); 1886 /* now, re-initialize the nic client and ae device */ 1887 ret = hclgevf_reset_stack(hdev); 1888 rtnl_unlock(); 1889 if (ret) { 1890 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1891 return ret; 1892 } 1893 1894 hdev->last_reset_time = jiffies; 1895 hdev->rst_stats.rst_done_cnt++; 1896 hdev->rst_stats.rst_fail_cnt = 0; 1897 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1898 1899 return 0; 1900 } 1901 1902 static void hclgevf_reset(struct hclgevf_dev *hdev) 1903 { 1904 if (hclgevf_reset_prepare(hdev)) 1905 goto err_reset; 1906 1907 /* check if VF could successfully fetch the hardware reset completion 1908 * status from the hardware 1909 */ 1910 if (hclgevf_reset_wait(hdev)) { 1911 /* can't do much in this situation, will disable VF */ 1912 dev_err(&hdev->pdev->dev, 1913 "failed to fetch H/W reset completion status\n"); 1914 goto err_reset; 1915 } 1916 1917 if (hclgevf_reset_rebuild(hdev)) 1918 goto err_reset; 1919 1920 return; 1921 1922 err_reset: 1923 hclgevf_reset_err_handle(hdev); 1924 } 1925 1926 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1927 unsigned long *addr) 1928 { 1929 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1930 1931 /* return the highest priority reset level amongst all */ 1932 if (test_bit(HNAE3_VF_RESET, addr)) { 1933 rst_level = HNAE3_VF_RESET; 1934 clear_bit(HNAE3_VF_RESET, addr); 1935 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1936 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1937 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1938 rst_level = HNAE3_VF_FULL_RESET; 1939 clear_bit(HNAE3_VF_FULL_RESET, addr); 1940 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1941 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1942 rst_level = HNAE3_VF_PF_FUNC_RESET; 1943 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1944 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1945 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1946 rst_level = HNAE3_VF_FUNC_RESET; 1947 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1948 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1949 rst_level = HNAE3_FLR_RESET; 1950 clear_bit(HNAE3_FLR_RESET, addr); 1951 } 1952 1953 return rst_level; 1954 } 1955 1956 static void hclgevf_reset_event(struct pci_dev *pdev, 1957 struct hnae3_handle *handle) 1958 { 1959 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1960 struct hclgevf_dev *hdev = ae_dev->priv; 1961 1962 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1963 1964 if (hdev->default_reset_request) 1965 hdev->reset_level = 1966 hclgevf_get_reset_level(hdev, 1967 &hdev->default_reset_request); 1968 else 1969 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1970 1971 /* reset of this VF requested */ 1972 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1973 hclgevf_reset_task_schedule(hdev); 1974 1975 hdev->last_reset_time = jiffies; 1976 } 1977 1978 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1979 enum hnae3_reset_type rst_type) 1980 { 1981 struct hclgevf_dev *hdev = ae_dev->priv; 1982 1983 set_bit(rst_type, &hdev->default_reset_request); 1984 } 1985 1986 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1987 { 1988 writel(en ? 1 : 0, vector->addr); 1989 } 1990 1991 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1992 { 1993 #define HCLGEVF_FLR_RETRY_WAIT_MS 500 1994 #define HCLGEVF_FLR_RETRY_CNT 5 1995 1996 struct hclgevf_dev *hdev = ae_dev->priv; 1997 int retry_cnt = 0; 1998 int ret; 1999 2000 retry: 2001 down(&hdev->reset_sem); 2002 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2003 hdev->reset_type = HNAE3_FLR_RESET; 2004 ret = hclgevf_reset_prepare(hdev); 2005 if (ret) { 2006 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", 2007 ret); 2008 if (hdev->reset_pending || 2009 retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) { 2010 dev_err(&hdev->pdev->dev, 2011 "reset_pending:0x%lx, retry_cnt:%d\n", 2012 hdev->reset_pending, retry_cnt); 2013 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2014 up(&hdev->reset_sem); 2015 msleep(HCLGEVF_FLR_RETRY_WAIT_MS); 2016 goto retry; 2017 } 2018 } 2019 2020 /* disable misc vector before FLR done */ 2021 hclgevf_enable_vector(&hdev->misc_vector, false); 2022 hdev->rst_stats.flr_rst_cnt++; 2023 } 2024 2025 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 2026 { 2027 struct hclgevf_dev *hdev = ae_dev->priv; 2028 int ret; 2029 2030 hclgevf_enable_vector(&hdev->misc_vector, true); 2031 2032 ret = hclgevf_reset_rebuild(hdev); 2033 if (ret) 2034 dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 2035 ret); 2036 2037 hdev->reset_type = HNAE3_NONE_RESET; 2038 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2039 up(&hdev->reset_sem); 2040 } 2041 2042 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 2043 { 2044 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2045 2046 return hdev->fw_version; 2047 } 2048 2049 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 2050 { 2051 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 2052 2053 vector->vector_irq = pci_irq_vector(hdev->pdev, 2054 HCLGEVF_MISC_VECTOR_NUM); 2055 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 2056 /* vector status always valid for Vector 0 */ 2057 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 2058 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 2059 2060 hdev->num_msi_left -= 1; 2061 hdev->num_msi_used += 1; 2062 } 2063 2064 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 2065 { 2066 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2067 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 2068 &hdev->state)) 2069 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2070 } 2071 2072 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 2073 { 2074 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2075 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 2076 &hdev->state)) 2077 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2078 } 2079 2080 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 2081 unsigned long delay) 2082 { 2083 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2084 !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2085 mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 2086 } 2087 2088 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 2089 { 2090 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 2091 2092 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 2093 return; 2094 2095 down(&hdev->reset_sem); 2096 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2097 2098 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 2099 &hdev->reset_state)) { 2100 /* PF has initmated that it is about to reset the hardware. 2101 * We now have to poll & check if hardware has actually 2102 * completed the reset sequence. On hardware reset completion, 2103 * VF needs to reset the client and ae device. 2104 */ 2105 hdev->reset_attempts = 0; 2106 2107 hdev->last_reset_time = jiffies; 2108 while ((hdev->reset_type = 2109 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 2110 != HNAE3_NONE_RESET) 2111 hclgevf_reset(hdev); 2112 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 2113 &hdev->reset_state)) { 2114 /* we could be here when either of below happens: 2115 * 1. reset was initiated due to watchdog timeout caused by 2116 * a. IMP was earlier reset and our TX got choked down and 2117 * which resulted in watchdog reacting and inducing VF 2118 * reset. This also means our cmdq would be unreliable. 2119 * b. problem in TX due to other lower layer(example link 2120 * layer not functioning properly etc.) 2121 * 2. VF reset might have been initiated due to some config 2122 * change. 2123 * 2124 * NOTE: Theres no clear way to detect above cases than to react 2125 * to the response of PF for this reset request. PF will ack the 2126 * 1b and 2. cases but we will not get any intimation about 1a 2127 * from PF as cmdq would be in unreliable state i.e. mailbox 2128 * communication between PF and VF would be broken. 2129 * 2130 * if we are never geting into pending state it means either: 2131 * 1. PF is not receiving our request which could be due to IMP 2132 * reset 2133 * 2. PF is screwed 2134 * We cannot do much for 2. but to check first we can try reset 2135 * our PCIe + stack and see if it alleviates the problem. 2136 */ 2137 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 2138 /* prepare for full reset of stack + pcie interface */ 2139 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 2140 2141 /* "defer" schedule the reset task again */ 2142 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2143 } else { 2144 hdev->reset_attempts++; 2145 2146 set_bit(hdev->reset_level, &hdev->reset_pending); 2147 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2148 } 2149 hclgevf_reset_task_schedule(hdev); 2150 } 2151 2152 hdev->reset_type = HNAE3_NONE_RESET; 2153 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2154 up(&hdev->reset_sem); 2155 } 2156 2157 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 2158 { 2159 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2160 return; 2161 2162 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 2163 return; 2164 2165 hclgevf_mbx_async_handler(hdev); 2166 2167 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2168 } 2169 2170 static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 2171 { 2172 struct hclge_vf_to_pf_msg send_msg; 2173 int ret; 2174 2175 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) 2176 return; 2177 2178 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 2179 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2180 if (ret) 2181 dev_err(&hdev->pdev->dev, 2182 "VF sends keep alive cmd failed(=%d)\n", ret); 2183 } 2184 2185 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 2186 { 2187 unsigned long delta = round_jiffies_relative(HZ); 2188 struct hnae3_handle *handle = &hdev->nic; 2189 2190 if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2191 return; 2192 2193 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 2194 delta = jiffies - hdev->last_serv_processed; 2195 2196 if (delta < round_jiffies_relative(HZ)) { 2197 delta = round_jiffies_relative(HZ) - delta; 2198 goto out; 2199 } 2200 } 2201 2202 hdev->serv_processed_cnt++; 2203 if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 2204 hclgevf_keep_alive(hdev); 2205 2206 if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 2207 hdev->last_serv_processed = jiffies; 2208 goto out; 2209 } 2210 2211 if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 2212 hclgevf_tqps_update_stats(handle); 2213 2214 /* request the link status from the PF. PF would be able to tell VF 2215 * about such updates in future so we might remove this later 2216 */ 2217 hclgevf_request_link_info(hdev); 2218 2219 hclgevf_update_link_mode(hdev); 2220 2221 hclgevf_sync_vlan_filter(hdev); 2222 2223 hclgevf_sync_mac_table(hdev); 2224 2225 hclgevf_sync_promisc_mode(hdev); 2226 2227 hdev->last_serv_processed = jiffies; 2228 2229 out: 2230 hclgevf_task_schedule(hdev, delta); 2231 } 2232 2233 static void hclgevf_service_task(struct work_struct *work) 2234 { 2235 struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 2236 service_task.work); 2237 2238 hclgevf_reset_service_task(hdev); 2239 hclgevf_mailbox_service_task(hdev); 2240 hclgevf_periodic_service_task(hdev); 2241 2242 /* Handle reset and mbx again in case periodical task delays the 2243 * handling by calling hclgevf_task_schedule() in 2244 * hclgevf_periodic_service_task() 2245 */ 2246 hclgevf_reset_service_task(hdev); 2247 hclgevf_mailbox_service_task(hdev); 2248 } 2249 2250 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 2251 { 2252 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 2253 } 2254 2255 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 2256 u32 *clearval) 2257 { 2258 u32 val, cmdq_stat_reg, rst_ing_reg; 2259 2260 /* fetch the events from their corresponding regs */ 2261 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 2262 HCLGEVF_VECTOR0_CMDQ_STATE_REG); 2263 2264 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 2265 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2266 dev_info(&hdev->pdev->dev, 2267 "receive reset interrupt 0x%x!\n", rst_ing_reg); 2268 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 2269 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2270 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 2271 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 2272 hdev->rst_stats.vf_rst_cnt++; 2273 /* set up VF hardware reset status, its PF will clear 2274 * this status when PF has initialized done. 2275 */ 2276 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 2277 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 2278 val | HCLGEVF_VF_RST_ING_BIT); 2279 return HCLGEVF_VECTOR0_EVENT_RST; 2280 } 2281 2282 /* check for vector0 mailbox(=CMDQ RX) event source */ 2283 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 2284 /* for revision 0x21, clearing interrupt is writing bit 0 2285 * to the clear register, writing bit 1 means to keep the 2286 * old value. 2287 * for revision 0x20, the clear register is a read & write 2288 * register, so we should just write 0 to the bit we are 2289 * handling, and keep other bits as cmdq_stat_reg. 2290 */ 2291 if (hdev->pdev->revision >= 0x21) 2292 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2293 else 2294 *clearval = cmdq_stat_reg & 2295 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2296 2297 return HCLGEVF_VECTOR0_EVENT_MBX; 2298 } 2299 2300 /* print other vector0 event source */ 2301 dev_info(&hdev->pdev->dev, 2302 "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2303 cmdq_stat_reg); 2304 2305 return HCLGEVF_VECTOR0_EVENT_OTHER; 2306 } 2307 2308 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2309 { 2310 enum hclgevf_evt_cause event_cause; 2311 struct hclgevf_dev *hdev = data; 2312 u32 clearval; 2313 2314 hclgevf_enable_vector(&hdev->misc_vector, false); 2315 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2316 2317 switch (event_cause) { 2318 case HCLGEVF_VECTOR0_EVENT_RST: 2319 hclgevf_reset_task_schedule(hdev); 2320 break; 2321 case HCLGEVF_VECTOR0_EVENT_MBX: 2322 hclgevf_mbx_handler(hdev); 2323 break; 2324 default: 2325 break; 2326 } 2327 2328 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 2329 hclgevf_clear_event_cause(hdev, clearval); 2330 hclgevf_enable_vector(&hdev->misc_vector, true); 2331 } 2332 2333 return IRQ_HANDLED; 2334 } 2335 2336 static int hclgevf_configure(struct hclgevf_dev *hdev) 2337 { 2338 int ret; 2339 2340 /* get current port based vlan state from PF */ 2341 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2342 if (ret) 2343 return ret; 2344 2345 /* get queue configuration from PF */ 2346 ret = hclgevf_get_queue_info(hdev); 2347 if (ret) 2348 return ret; 2349 2350 /* get queue depth info from PF */ 2351 ret = hclgevf_get_queue_depth(hdev); 2352 if (ret) 2353 return ret; 2354 2355 ret = hclgevf_get_pf_media_type(hdev); 2356 if (ret) 2357 return ret; 2358 2359 /* get tc configuration from PF */ 2360 return hclgevf_get_tc_info(hdev); 2361 } 2362 2363 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 2364 { 2365 struct pci_dev *pdev = ae_dev->pdev; 2366 struct hclgevf_dev *hdev; 2367 2368 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 2369 if (!hdev) 2370 return -ENOMEM; 2371 2372 hdev->pdev = pdev; 2373 hdev->ae_dev = ae_dev; 2374 ae_dev->priv = hdev; 2375 2376 return 0; 2377 } 2378 2379 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2380 { 2381 struct hnae3_handle *roce = &hdev->roce; 2382 struct hnae3_handle *nic = &hdev->nic; 2383 2384 roce->rinfo.num_vectors = hdev->num_roce_msix; 2385 2386 if (hdev->num_msi_left < roce->rinfo.num_vectors || 2387 hdev->num_msi_left == 0) 2388 return -EINVAL; 2389 2390 roce->rinfo.base_vector = hdev->roce_base_vector; 2391 2392 roce->rinfo.netdev = nic->kinfo.netdev; 2393 roce->rinfo.roce_io_base = hdev->hw.io_base; 2394 2395 roce->pdev = nic->pdev; 2396 roce->ae_algo = nic->ae_algo; 2397 roce->numa_node_mask = nic->numa_node_mask; 2398 2399 return 0; 2400 } 2401 2402 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 2403 { 2404 struct hclgevf_cfg_gro_status_cmd *req; 2405 struct hclgevf_desc desc; 2406 int ret; 2407 2408 if (!hnae3_dev_gro_supported(hdev)) 2409 return 0; 2410 2411 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2412 false); 2413 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2414 2415 req->gro_en = en ? 1 : 0; 2416 2417 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2418 if (ret) 2419 dev_err(&hdev->pdev->dev, 2420 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2421 2422 return ret; 2423 } 2424 2425 static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) 2426 { 2427 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2428 struct hclgevf_rss_tuple_cfg *tuple_sets; 2429 u32 i; 2430 2431 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 2432 rss_cfg->rss_size = hdev->nic.kinfo.rss_size; 2433 tuple_sets = &rss_cfg->rss_tuple_sets; 2434 if (hdev->pdev->revision >= 0x21) { 2435 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 2436 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2437 HCLGEVF_RSS_KEY_SIZE); 2438 2439 tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2440 tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2441 tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2442 tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2443 tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2444 tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2445 tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2446 tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2447 } 2448 2449 /* Initialize RSS indirect table */ 2450 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 2451 rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 2452 } 2453 2454 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2455 { 2456 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2457 int ret; 2458 2459 if (hdev->pdev->revision >= 0x21) { 2460 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 2461 rss_cfg->rss_hash_key); 2462 if (ret) 2463 return ret; 2464 2465 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2466 if (ret) 2467 return ret; 2468 } 2469 2470 ret = hclgevf_set_rss_indir_table(hdev); 2471 if (ret) 2472 return ret; 2473 2474 return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); 2475 } 2476 2477 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2478 { 2479 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2480 false); 2481 } 2482 2483 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2484 { 2485 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2486 2487 unsigned long last = hdev->serv_processed_cnt; 2488 int i = 0; 2489 2490 while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2491 i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2492 last == hdev->serv_processed_cnt) 2493 usleep_range(1, 1); 2494 } 2495 2496 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2497 { 2498 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2499 2500 if (enable) { 2501 hclgevf_task_schedule(hdev, 0); 2502 } else { 2503 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2504 2505 /* flush memory to make sure DOWN is seen by service task */ 2506 smp_mb__before_atomic(); 2507 hclgevf_flush_link_update(hdev); 2508 } 2509 } 2510 2511 static int hclgevf_ae_start(struct hnae3_handle *handle) 2512 { 2513 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2514 2515 hclgevf_reset_tqp_stats(handle); 2516 2517 hclgevf_request_link_info(hdev); 2518 2519 hclgevf_update_link_mode(hdev); 2520 2521 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2522 2523 return 0; 2524 } 2525 2526 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2527 { 2528 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2529 int i; 2530 2531 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2532 2533 if (hdev->reset_type != HNAE3_VF_RESET) 2534 for (i = 0; i < handle->kinfo.num_tqps; i++) 2535 if (hclgevf_reset_tqp(handle, i)) 2536 break; 2537 2538 hclgevf_reset_tqp_stats(handle); 2539 hclgevf_update_link_status(hdev, 0); 2540 } 2541 2542 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2543 { 2544 #define HCLGEVF_STATE_ALIVE 1 2545 #define HCLGEVF_STATE_NOT_ALIVE 0 2546 2547 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2548 struct hclge_vf_to_pf_msg send_msg; 2549 2550 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2551 send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2552 HCLGEVF_STATE_NOT_ALIVE; 2553 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2554 } 2555 2556 static int hclgevf_client_start(struct hnae3_handle *handle) 2557 { 2558 return hclgevf_set_alive(handle, true); 2559 } 2560 2561 static void hclgevf_client_stop(struct hnae3_handle *handle) 2562 { 2563 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2564 int ret; 2565 2566 ret = hclgevf_set_alive(handle, false); 2567 if (ret) 2568 dev_warn(&hdev->pdev->dev, 2569 "%s failed %d\n", __func__, ret); 2570 } 2571 2572 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2573 { 2574 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2575 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2576 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2577 2578 INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 2579 2580 mutex_init(&hdev->mbx_resp.mbx_mutex); 2581 sema_init(&hdev->reset_sem, 1); 2582 2583 spin_lock_init(&hdev->mac_table.mac_list_lock); 2584 INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); 2585 INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); 2586 2587 /* bring the device down */ 2588 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2589 } 2590 2591 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2592 { 2593 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2594 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2595 2596 if (hdev->service_task.work.func) 2597 cancel_delayed_work_sync(&hdev->service_task); 2598 2599 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2600 } 2601 2602 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2603 { 2604 struct pci_dev *pdev = hdev->pdev; 2605 int vectors; 2606 int i; 2607 2608 if (hnae3_dev_roce_supported(hdev)) 2609 vectors = pci_alloc_irq_vectors(pdev, 2610 hdev->roce_base_msix_offset + 1, 2611 hdev->num_msi, 2612 PCI_IRQ_MSIX); 2613 else 2614 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2615 hdev->num_msi, 2616 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2617 2618 if (vectors < 0) { 2619 dev_err(&pdev->dev, 2620 "failed(%d) to allocate MSI/MSI-X vectors\n", 2621 vectors); 2622 return vectors; 2623 } 2624 if (vectors < hdev->num_msi) 2625 dev_warn(&hdev->pdev->dev, 2626 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2627 hdev->num_msi, vectors); 2628 2629 hdev->num_msi = vectors; 2630 hdev->num_msi_left = vectors; 2631 2632 hdev->base_msi_vector = pdev->irq; 2633 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2634 2635 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2636 sizeof(u16), GFP_KERNEL); 2637 if (!hdev->vector_status) { 2638 pci_free_irq_vectors(pdev); 2639 return -ENOMEM; 2640 } 2641 2642 for (i = 0; i < hdev->num_msi; i++) 2643 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2644 2645 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2646 sizeof(int), GFP_KERNEL); 2647 if (!hdev->vector_irq) { 2648 devm_kfree(&pdev->dev, hdev->vector_status); 2649 pci_free_irq_vectors(pdev); 2650 return -ENOMEM; 2651 } 2652 2653 return 0; 2654 } 2655 2656 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2657 { 2658 struct pci_dev *pdev = hdev->pdev; 2659 2660 devm_kfree(&pdev->dev, hdev->vector_status); 2661 devm_kfree(&pdev->dev, hdev->vector_irq); 2662 pci_free_irq_vectors(pdev); 2663 } 2664 2665 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2666 { 2667 int ret; 2668 2669 hclgevf_get_misc_vector(hdev); 2670 2671 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2672 HCLGEVF_NAME, pci_name(hdev->pdev)); 2673 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2674 0, hdev->misc_vector.name, hdev); 2675 if (ret) { 2676 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2677 hdev->misc_vector.vector_irq); 2678 return ret; 2679 } 2680 2681 hclgevf_clear_event_cause(hdev, 0); 2682 2683 /* enable misc. vector(vector 0) */ 2684 hclgevf_enable_vector(&hdev->misc_vector, true); 2685 2686 return ret; 2687 } 2688 2689 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2690 { 2691 /* disable misc vector(vector 0) */ 2692 hclgevf_enable_vector(&hdev->misc_vector, false); 2693 synchronize_irq(hdev->misc_vector.vector_irq); 2694 free_irq(hdev->misc_vector.vector_irq, hdev); 2695 hclgevf_free_vector(hdev, 0); 2696 } 2697 2698 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2699 { 2700 struct device *dev = &hdev->pdev->dev; 2701 2702 dev_info(dev, "VF info begin:\n"); 2703 2704 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2705 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2706 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2707 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2708 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2709 dev_info(dev, "PF media type of this VF: %u\n", 2710 hdev->hw.mac.media_type); 2711 2712 dev_info(dev, "VF info end.\n"); 2713 } 2714 2715 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2716 struct hnae3_client *client) 2717 { 2718 struct hclgevf_dev *hdev = ae_dev->priv; 2719 int rst_cnt = hdev->rst_stats.rst_cnt; 2720 int ret; 2721 2722 ret = client->ops->init_instance(&hdev->nic); 2723 if (ret) 2724 return ret; 2725 2726 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2727 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 2728 rst_cnt != hdev->rst_stats.rst_cnt) { 2729 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2730 2731 client->ops->uninit_instance(&hdev->nic, 0); 2732 return -EBUSY; 2733 } 2734 2735 hnae3_set_client_init_flag(client, ae_dev, 1); 2736 2737 if (netif_msg_drv(&hdev->nic)) 2738 hclgevf_info_show(hdev); 2739 2740 return 0; 2741 } 2742 2743 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2744 struct hnae3_client *client) 2745 { 2746 struct hclgevf_dev *hdev = ae_dev->priv; 2747 int ret; 2748 2749 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2750 !hdev->nic_client) 2751 return 0; 2752 2753 ret = hclgevf_init_roce_base_info(hdev); 2754 if (ret) 2755 return ret; 2756 2757 ret = client->ops->init_instance(&hdev->roce); 2758 if (ret) 2759 return ret; 2760 2761 hnae3_set_client_init_flag(client, ae_dev, 1); 2762 2763 return 0; 2764 } 2765 2766 static int hclgevf_init_client_instance(struct hnae3_client *client, 2767 struct hnae3_ae_dev *ae_dev) 2768 { 2769 struct hclgevf_dev *hdev = ae_dev->priv; 2770 int ret; 2771 2772 switch (client->type) { 2773 case HNAE3_CLIENT_KNIC: 2774 hdev->nic_client = client; 2775 hdev->nic.client = client; 2776 2777 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2778 if (ret) 2779 goto clear_nic; 2780 2781 ret = hclgevf_init_roce_client_instance(ae_dev, 2782 hdev->roce_client); 2783 if (ret) 2784 goto clear_roce; 2785 2786 break; 2787 case HNAE3_CLIENT_ROCE: 2788 if (hnae3_dev_roce_supported(hdev)) { 2789 hdev->roce_client = client; 2790 hdev->roce.client = client; 2791 } 2792 2793 ret = hclgevf_init_roce_client_instance(ae_dev, client); 2794 if (ret) 2795 goto clear_roce; 2796 2797 break; 2798 default: 2799 return -EINVAL; 2800 } 2801 2802 return 0; 2803 2804 clear_nic: 2805 hdev->nic_client = NULL; 2806 hdev->nic.client = NULL; 2807 return ret; 2808 clear_roce: 2809 hdev->roce_client = NULL; 2810 hdev->roce.client = NULL; 2811 return ret; 2812 } 2813 2814 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2815 struct hnae3_ae_dev *ae_dev) 2816 { 2817 struct hclgevf_dev *hdev = ae_dev->priv; 2818 2819 /* un-init roce, if it exists */ 2820 if (hdev->roce_client) { 2821 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2822 hdev->roce_client = NULL; 2823 hdev->roce.client = NULL; 2824 } 2825 2826 /* un-init nic/unic, if this was not called by roce client */ 2827 if (client->ops->uninit_instance && hdev->nic_client && 2828 client->type != HNAE3_CLIENT_ROCE) { 2829 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2830 2831 client->ops->uninit_instance(&hdev->nic, 0); 2832 hdev->nic_client = NULL; 2833 hdev->nic.client = NULL; 2834 } 2835 } 2836 2837 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2838 { 2839 struct pci_dev *pdev = hdev->pdev; 2840 struct hclgevf_hw *hw; 2841 int ret; 2842 2843 ret = pci_enable_device(pdev); 2844 if (ret) { 2845 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2846 return ret; 2847 } 2848 2849 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2850 if (ret) { 2851 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2852 goto err_disable_device; 2853 } 2854 2855 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2856 if (ret) { 2857 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2858 goto err_disable_device; 2859 } 2860 2861 pci_set_master(pdev); 2862 hw = &hdev->hw; 2863 hw->hdev = hdev; 2864 hw->io_base = pci_iomap(pdev, 2, 0); 2865 if (!hw->io_base) { 2866 dev_err(&pdev->dev, "can't map configuration register space\n"); 2867 ret = -ENOMEM; 2868 goto err_clr_master; 2869 } 2870 2871 return 0; 2872 2873 err_clr_master: 2874 pci_clear_master(pdev); 2875 pci_release_regions(pdev); 2876 err_disable_device: 2877 pci_disable_device(pdev); 2878 2879 return ret; 2880 } 2881 2882 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2883 { 2884 struct pci_dev *pdev = hdev->pdev; 2885 2886 pci_iounmap(pdev, hdev->hw.io_base); 2887 pci_clear_master(pdev); 2888 pci_release_regions(pdev); 2889 pci_disable_device(pdev); 2890 } 2891 2892 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2893 { 2894 struct hclgevf_query_res_cmd *req; 2895 struct hclgevf_desc desc; 2896 int ret; 2897 2898 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2899 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2900 if (ret) { 2901 dev_err(&hdev->pdev->dev, 2902 "query vf resource failed, ret = %d.\n", ret); 2903 return ret; 2904 } 2905 2906 req = (struct hclgevf_query_res_cmd *)desc.data; 2907 2908 if (hnae3_dev_roce_supported(hdev)) { 2909 hdev->roce_base_msix_offset = 2910 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 2911 HCLGEVF_MSIX_OFT_ROCEE_M, 2912 HCLGEVF_MSIX_OFT_ROCEE_S); 2913 hdev->num_roce_msix = 2914 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 2915 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2916 2917 /* nic's msix numbers is always equals to the roce's. */ 2918 hdev->num_nic_msix = hdev->num_roce_msix; 2919 2920 /* VF should have NIC vectors and Roce vectors, NIC vectors 2921 * are queued before Roce vectors. The offset is fixed to 64. 2922 */ 2923 hdev->num_msi = hdev->num_roce_msix + 2924 hdev->roce_base_msix_offset; 2925 } else { 2926 hdev->num_msi = 2927 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 2928 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2929 2930 hdev->num_nic_msix = hdev->num_msi; 2931 } 2932 2933 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 2934 dev_err(&hdev->pdev->dev, 2935 "Just %u msi resources, not enough for vf(min:2).\n", 2936 hdev->num_nic_msix); 2937 return -EINVAL; 2938 } 2939 2940 return 0; 2941 } 2942 2943 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2944 { 2945 struct pci_dev *pdev = hdev->pdev; 2946 int ret = 0; 2947 2948 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2949 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2950 hclgevf_misc_irq_uninit(hdev); 2951 hclgevf_uninit_msi(hdev); 2952 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2953 } 2954 2955 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2956 pci_set_master(pdev); 2957 ret = hclgevf_init_msi(hdev); 2958 if (ret) { 2959 dev_err(&pdev->dev, 2960 "failed(%d) to init MSI/MSI-X\n", ret); 2961 return ret; 2962 } 2963 2964 ret = hclgevf_misc_irq_init(hdev); 2965 if (ret) { 2966 hclgevf_uninit_msi(hdev); 2967 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2968 ret); 2969 return ret; 2970 } 2971 2972 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2973 } 2974 2975 return ret; 2976 } 2977 2978 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) 2979 { 2980 struct hclge_vf_to_pf_msg send_msg; 2981 2982 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, 2983 HCLGE_MBX_VPORT_LIST_CLEAR); 2984 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2985 } 2986 2987 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2988 { 2989 struct pci_dev *pdev = hdev->pdev; 2990 int ret; 2991 2992 ret = hclgevf_pci_reset(hdev); 2993 if (ret) { 2994 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2995 return ret; 2996 } 2997 2998 ret = hclgevf_cmd_init(hdev); 2999 if (ret) { 3000 dev_err(&pdev->dev, "cmd failed %d\n", ret); 3001 return ret; 3002 } 3003 3004 ret = hclgevf_rss_init_hw(hdev); 3005 if (ret) { 3006 dev_err(&hdev->pdev->dev, 3007 "failed(%d) to initialize RSS\n", ret); 3008 return ret; 3009 } 3010 3011 ret = hclgevf_config_gro(hdev, true); 3012 if (ret) 3013 return ret; 3014 3015 ret = hclgevf_init_vlan_config(hdev); 3016 if (ret) { 3017 dev_err(&hdev->pdev->dev, 3018 "failed(%d) to initialize VLAN config\n", ret); 3019 return ret; 3020 } 3021 3022 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 3023 3024 dev_info(&hdev->pdev->dev, "Reset done\n"); 3025 3026 return 0; 3027 } 3028 3029 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 3030 { 3031 struct pci_dev *pdev = hdev->pdev; 3032 int ret; 3033 3034 ret = hclgevf_pci_init(hdev); 3035 if (ret) 3036 return ret; 3037 3038 ret = hclgevf_cmd_queue_init(hdev); 3039 if (ret) 3040 goto err_cmd_queue_init; 3041 3042 ret = hclgevf_cmd_init(hdev); 3043 if (ret) 3044 goto err_cmd_init; 3045 3046 /* Get vf resource */ 3047 ret = hclgevf_query_vf_resource(hdev); 3048 if (ret) 3049 goto err_cmd_init; 3050 3051 ret = hclgevf_init_msi(hdev); 3052 if (ret) { 3053 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 3054 goto err_cmd_init; 3055 } 3056 3057 hclgevf_state_init(hdev); 3058 hdev->reset_level = HNAE3_VF_FUNC_RESET; 3059 hdev->reset_type = HNAE3_NONE_RESET; 3060 3061 ret = hclgevf_misc_irq_init(hdev); 3062 if (ret) 3063 goto err_misc_irq_init; 3064 3065 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3066 3067 ret = hclgevf_configure(hdev); 3068 if (ret) { 3069 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 3070 goto err_config; 3071 } 3072 3073 ret = hclgevf_alloc_tqps(hdev); 3074 if (ret) { 3075 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 3076 goto err_config; 3077 } 3078 3079 ret = hclgevf_set_handle_info(hdev); 3080 if (ret) 3081 goto err_config; 3082 3083 ret = hclgevf_config_gro(hdev, true); 3084 if (ret) 3085 goto err_config; 3086 3087 /* Initialize RSS for this VF */ 3088 hclgevf_rss_init_cfg(hdev); 3089 ret = hclgevf_rss_init_hw(hdev); 3090 if (ret) { 3091 dev_err(&hdev->pdev->dev, 3092 "failed(%d) to initialize RSS\n", ret); 3093 goto err_config; 3094 } 3095 3096 /* ensure vf tbl list as empty before init*/ 3097 ret = hclgevf_clear_vport_list(hdev); 3098 if (ret) { 3099 dev_err(&pdev->dev, 3100 "failed to clear tbl list configuration, ret = %d.\n", 3101 ret); 3102 goto err_config; 3103 } 3104 3105 ret = hclgevf_init_vlan_config(hdev); 3106 if (ret) { 3107 dev_err(&hdev->pdev->dev, 3108 "failed(%d) to initialize VLAN config\n", ret); 3109 goto err_config; 3110 } 3111 3112 hdev->last_reset_time = jiffies; 3113 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 3114 HCLGEVF_DRIVER_NAME); 3115 3116 hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 3117 3118 return 0; 3119 3120 err_config: 3121 hclgevf_misc_irq_uninit(hdev); 3122 err_misc_irq_init: 3123 hclgevf_state_uninit(hdev); 3124 hclgevf_uninit_msi(hdev); 3125 err_cmd_init: 3126 hclgevf_cmd_uninit(hdev); 3127 err_cmd_queue_init: 3128 hclgevf_pci_uninit(hdev); 3129 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3130 return ret; 3131 } 3132 3133 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 3134 { 3135 struct hclge_vf_to_pf_msg send_msg; 3136 3137 hclgevf_state_uninit(hdev); 3138 3139 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 3140 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3141 3142 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3143 hclgevf_misc_irq_uninit(hdev); 3144 hclgevf_uninit_msi(hdev); 3145 } 3146 3147 hclgevf_pci_uninit(hdev); 3148 hclgevf_cmd_uninit(hdev); 3149 hclgevf_uninit_mac_list(hdev); 3150 } 3151 3152 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 3153 { 3154 struct pci_dev *pdev = ae_dev->pdev; 3155 int ret; 3156 3157 ret = hclgevf_alloc_hdev(ae_dev); 3158 if (ret) { 3159 dev_err(&pdev->dev, "hclge device allocation failed\n"); 3160 return ret; 3161 } 3162 3163 ret = hclgevf_init_hdev(ae_dev->priv); 3164 if (ret) { 3165 dev_err(&pdev->dev, "hclge device initialization failed\n"); 3166 return ret; 3167 } 3168 3169 return 0; 3170 } 3171 3172 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 3173 { 3174 struct hclgevf_dev *hdev = ae_dev->priv; 3175 3176 hclgevf_uninit_hdev(hdev); 3177 ae_dev->priv = NULL; 3178 } 3179 3180 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3181 { 3182 struct hnae3_handle *nic = &hdev->nic; 3183 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3184 3185 return min_t(u32, hdev->rss_size_max, 3186 hdev->num_tqps / kinfo->num_tc); 3187 } 3188 3189 /** 3190 * hclgevf_get_channels - Get the current channels enabled and max supported. 3191 * @handle: hardware information for network interface 3192 * @ch: ethtool channels structure 3193 * 3194 * We don't support separate tx and rx queues as channels. The other count 3195 * represents how many queues are being used for control. max_combined counts 3196 * how many queue pairs we can support. They may not be mapped 1 to 1 with 3197 * q_vectors since we support a lot more queue pairs than q_vectors. 3198 **/ 3199 static void hclgevf_get_channels(struct hnae3_handle *handle, 3200 struct ethtool_channels *ch) 3201 { 3202 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3203 3204 ch->max_combined = hclgevf_get_max_channels(hdev); 3205 ch->other_count = 0; 3206 ch->max_other = 0; 3207 ch->combined_count = handle->kinfo.rss_size; 3208 } 3209 3210 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 3211 u16 *alloc_tqps, u16 *max_rss_size) 3212 { 3213 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3214 3215 *alloc_tqps = hdev->num_tqps; 3216 *max_rss_size = hdev->rss_size_max; 3217 } 3218 3219 static void hclgevf_update_rss_size(struct hnae3_handle *handle, 3220 u32 new_tqps_num) 3221 { 3222 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3223 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3224 u16 max_rss_size; 3225 3226 kinfo->req_rss_size = new_tqps_num; 3227 3228 max_rss_size = min_t(u16, hdev->rss_size_max, 3229 hdev->num_tqps / kinfo->num_tc); 3230 3231 /* Use the user's configuration when it is not larger than 3232 * max_rss_size, otherwise, use the maximum specification value. 3233 */ 3234 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 3235 kinfo->req_rss_size <= max_rss_size) 3236 kinfo->rss_size = kinfo->req_rss_size; 3237 else if (kinfo->rss_size > max_rss_size || 3238 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 3239 kinfo->rss_size = max_rss_size; 3240 3241 kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; 3242 } 3243 3244 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 3245 bool rxfh_configured) 3246 { 3247 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3248 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3249 u16 cur_rss_size = kinfo->rss_size; 3250 u16 cur_tqps = kinfo->num_tqps; 3251 u32 *rss_indir; 3252 unsigned int i; 3253 int ret; 3254 3255 hclgevf_update_rss_size(handle, new_tqps_num); 3256 3257 ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); 3258 if (ret) 3259 return ret; 3260 3261 /* RSS indirection table has been configuared by user */ 3262 if (rxfh_configured) 3263 goto out; 3264 3265 /* Reinitializes the rss indirect table according to the new RSS size */ 3266 rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 3267 if (!rss_indir) 3268 return -ENOMEM; 3269 3270 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 3271 rss_indir[i] = i % kinfo->rss_size; 3272 3273 hdev->rss_cfg.rss_size = kinfo->rss_size; 3274 3275 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 3276 if (ret) 3277 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 3278 ret); 3279 3280 kfree(rss_indir); 3281 3282 out: 3283 if (!ret) 3284 dev_info(&hdev->pdev->dev, 3285 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 3286 cur_rss_size, kinfo->rss_size, 3287 cur_tqps, kinfo->rss_size * kinfo->num_tc); 3288 3289 return ret; 3290 } 3291 3292 static int hclgevf_get_status(struct hnae3_handle *handle) 3293 { 3294 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3295 3296 return hdev->hw.mac.link; 3297 } 3298 3299 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 3300 u8 *auto_neg, u32 *speed, 3301 u8 *duplex) 3302 { 3303 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3304 3305 if (speed) 3306 *speed = hdev->hw.mac.speed; 3307 if (duplex) 3308 *duplex = hdev->hw.mac.duplex; 3309 if (auto_neg) 3310 *auto_neg = AUTONEG_DISABLE; 3311 } 3312 3313 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 3314 u8 duplex) 3315 { 3316 hdev->hw.mac.speed = speed; 3317 hdev->hw.mac.duplex = duplex; 3318 } 3319 3320 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 3321 { 3322 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3323 3324 return hclgevf_config_gro(hdev, enable); 3325 } 3326 3327 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 3328 u8 *module_type) 3329 { 3330 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3331 3332 if (media_type) 3333 *media_type = hdev->hw.mac.media_type; 3334 3335 if (module_type) 3336 *module_type = hdev->hw.mac.module_type; 3337 } 3338 3339 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 3340 { 3341 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3342 3343 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 3344 } 3345 3346 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 3347 { 3348 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3349 3350 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 3351 } 3352 3353 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 3354 { 3355 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3356 3357 return hdev->rst_stats.hw_rst_done_cnt; 3358 } 3359 3360 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 3361 unsigned long *supported, 3362 unsigned long *advertising) 3363 { 3364 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3365 3366 *supported = hdev->hw.mac.supported; 3367 *advertising = hdev->hw.mac.advertising; 3368 } 3369 3370 #define MAX_SEPARATE_NUM 4 3371 #define SEPARATOR_VALUE 0xFFFFFFFF 3372 #define REG_NUM_PER_LINE 4 3373 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 3374 3375 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 3376 { 3377 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 3378 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3379 3380 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 3381 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 3382 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 3383 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 3384 3385 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 3386 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 3387 } 3388 3389 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 3390 void *data) 3391 { 3392 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3393 int i, j, reg_um, separator_num; 3394 u32 *reg = data; 3395 3396 *version = hdev->fw_version; 3397 3398 /* fetching per-VF registers values from VF PCIe register space */ 3399 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 3400 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3401 for (i = 0; i < reg_um; i++) 3402 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 3403 for (i = 0; i < separator_num; i++) 3404 *reg++ = SEPARATOR_VALUE; 3405 3406 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 3407 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3408 for (i = 0; i < reg_um; i++) 3409 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 3410 for (i = 0; i < separator_num; i++) 3411 *reg++ = SEPARATOR_VALUE; 3412 3413 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 3414 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3415 for (j = 0; j < hdev->num_tqps; j++) { 3416 for (i = 0; i < reg_um; i++) 3417 *reg++ = hclgevf_read_dev(&hdev->hw, 3418 ring_reg_addr_list[i] + 3419 0x200 * j); 3420 for (i = 0; i < separator_num; i++) 3421 *reg++ = SEPARATOR_VALUE; 3422 } 3423 3424 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 3425 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3426 for (j = 0; j < hdev->num_msi_used - 1; j++) { 3427 for (i = 0; i < reg_um; i++) 3428 *reg++ = hclgevf_read_dev(&hdev->hw, 3429 tqp_intr_reg_addr_list[i] + 3430 4 * j); 3431 for (i = 0; i < separator_num; i++) 3432 *reg++ = SEPARATOR_VALUE; 3433 } 3434 } 3435 3436 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 3437 u8 *port_base_vlan_info, u8 data_size) 3438 { 3439 struct hnae3_handle *nic = &hdev->nic; 3440 struct hclge_vf_to_pf_msg send_msg; 3441 int ret; 3442 3443 rtnl_lock(); 3444 3445 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 3446 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { 3447 dev_warn(&hdev->pdev->dev, 3448 "is resetting when updating port based vlan info\n"); 3449 rtnl_unlock(); 3450 return; 3451 } 3452 3453 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3454 if (ret) { 3455 rtnl_unlock(); 3456 return; 3457 } 3458 3459 /* send msg to PF and wait update port based vlan info */ 3460 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3461 HCLGE_MBX_PORT_BASE_VLAN_CFG); 3462 memcpy(send_msg.data, port_base_vlan_info, data_size); 3463 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3464 if (!ret) { 3465 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3466 nic->port_base_vlan_state = state; 3467 else 3468 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3469 } 3470 3471 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 3472 rtnl_unlock(); 3473 } 3474 3475 static const struct hnae3_ae_ops hclgevf_ops = { 3476 .init_ae_dev = hclgevf_init_ae_dev, 3477 .uninit_ae_dev = hclgevf_uninit_ae_dev, 3478 .flr_prepare = hclgevf_flr_prepare, 3479 .flr_done = hclgevf_flr_done, 3480 .init_client_instance = hclgevf_init_client_instance, 3481 .uninit_client_instance = hclgevf_uninit_client_instance, 3482 .start = hclgevf_ae_start, 3483 .stop = hclgevf_ae_stop, 3484 .client_start = hclgevf_client_start, 3485 .client_stop = hclgevf_client_stop, 3486 .map_ring_to_vector = hclgevf_map_ring_to_vector, 3487 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3488 .get_vector = hclgevf_get_vector, 3489 .put_vector = hclgevf_put_vector, 3490 .reset_queue = hclgevf_reset_tqp, 3491 .get_mac_addr = hclgevf_get_mac_addr, 3492 .set_mac_addr = hclgevf_set_mac_addr, 3493 .add_uc_addr = hclgevf_add_uc_addr, 3494 .rm_uc_addr = hclgevf_rm_uc_addr, 3495 .add_mc_addr = hclgevf_add_mc_addr, 3496 .rm_mc_addr = hclgevf_rm_mc_addr, 3497 .get_stats = hclgevf_get_stats, 3498 .update_stats = hclgevf_update_stats, 3499 .get_strings = hclgevf_get_strings, 3500 .get_sset_count = hclgevf_get_sset_count, 3501 .get_rss_key_size = hclgevf_get_rss_key_size, 3502 .get_rss_indir_size = hclgevf_get_rss_indir_size, 3503 .get_rss = hclgevf_get_rss, 3504 .set_rss = hclgevf_set_rss, 3505 .get_rss_tuple = hclgevf_get_rss_tuple, 3506 .set_rss_tuple = hclgevf_set_rss_tuple, 3507 .get_tc_size = hclgevf_get_tc_size, 3508 .get_fw_version = hclgevf_get_fw_version, 3509 .set_vlan_filter = hclgevf_set_vlan_filter, 3510 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 3511 .reset_event = hclgevf_reset_event, 3512 .set_default_reset_request = hclgevf_set_def_reset_request, 3513 .set_channels = hclgevf_set_channels, 3514 .get_channels = hclgevf_get_channels, 3515 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 3516 .get_regs_len = hclgevf_get_regs_len, 3517 .get_regs = hclgevf_get_regs, 3518 .get_status = hclgevf_get_status, 3519 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3520 .get_media_type = hclgevf_get_media_type, 3521 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 3522 .ae_dev_resetting = hclgevf_ae_dev_resetting, 3523 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 3524 .set_gro_en = hclgevf_gro_en, 3525 .set_mtu = hclgevf_set_mtu, 3526 .get_global_queue_id = hclgevf_get_qid_global, 3527 .set_timer_task = hclgevf_set_timer_task, 3528 .get_link_mode = hclgevf_get_link_mode, 3529 .set_promisc_mode = hclgevf_set_promisc_mode, 3530 .request_update_promisc_mode = hclgevf_request_update_promisc_mode, 3531 }; 3532 3533 static struct hnae3_ae_algo ae_algovf = { 3534 .ops = &hclgevf_ops, 3535 .pdev_id_table = ae_algovf_pci_tbl, 3536 }; 3537 3538 static int hclgevf_init(void) 3539 { 3540 pr_info("%s is initializing\n", HCLGEVF_NAME); 3541 3542 hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME); 3543 if (!hclgevf_wq) { 3544 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 3545 return -ENOMEM; 3546 } 3547 3548 hnae3_register_ae_algo(&ae_algovf); 3549 3550 return 0; 3551 } 3552 3553 static void hclgevf_exit(void) 3554 { 3555 hnae3_unregister_ae_algo(&ae_algovf); 3556 destroy_workqueue(hclgevf_wq); 3557 } 3558 module_init(hclgevf_init); 3559 module_exit(hclgevf_exit); 3560 3561 MODULE_LICENSE("GPL"); 3562 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3563 MODULE_DESCRIPTION("HCLGEVF Driver"); 3564 MODULE_VERSION(HCLGEVF_MOD_VERSION); 3565