1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 #include "hclgevf_devlink.h" 12 #include "hclge_comm_rss.h" 13 14 #define HCLGEVF_NAME "hclgevf" 15 16 #define HCLGEVF_RESET_MAX_FAIL_CNT 5 17 18 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 19 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 20 unsigned long delay); 21 22 static struct hnae3_ae_algo ae_algovf; 23 24 static struct workqueue_struct *hclgevf_wq; 25 26 static const struct pci_device_id ae_algovf_pci_tbl[] = { 27 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, 28 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 29 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 30 /* required last entry */ 31 {0, } 32 }; 33 34 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 35 36 static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 37 HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 38 HCLGE_COMM_NIC_CSQ_DEPTH_REG, 39 HCLGE_COMM_NIC_CSQ_TAIL_REG, 40 HCLGE_COMM_NIC_CSQ_HEAD_REG, 41 HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 42 HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 43 HCLGE_COMM_NIC_CRQ_DEPTH_REG, 44 HCLGE_COMM_NIC_CRQ_TAIL_REG, 45 HCLGE_COMM_NIC_CRQ_HEAD_REG, 46 HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, 47 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG, 48 HCLGE_COMM_CMDQ_INTR_EN_REG, 49 HCLGE_COMM_CMDQ_INTR_GEN_REG}; 50 51 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 52 HCLGEVF_RST_ING, 53 HCLGEVF_GRO_EN_REG}; 54 55 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 56 HCLGEVF_RING_RX_ADDR_H_REG, 57 HCLGEVF_RING_RX_BD_NUM_REG, 58 HCLGEVF_RING_RX_BD_LENGTH_REG, 59 HCLGEVF_RING_RX_MERGE_EN_REG, 60 HCLGEVF_RING_RX_TAIL_REG, 61 HCLGEVF_RING_RX_HEAD_REG, 62 HCLGEVF_RING_RX_FBD_NUM_REG, 63 HCLGEVF_RING_RX_OFFSET_REG, 64 HCLGEVF_RING_RX_FBD_OFFSET_REG, 65 HCLGEVF_RING_RX_STASH_REG, 66 HCLGEVF_RING_RX_BD_ERR_REG, 67 HCLGEVF_RING_TX_ADDR_L_REG, 68 HCLGEVF_RING_TX_ADDR_H_REG, 69 HCLGEVF_RING_TX_BD_NUM_REG, 70 HCLGEVF_RING_TX_PRIORITY_REG, 71 HCLGEVF_RING_TX_TC_REG, 72 HCLGEVF_RING_TX_MERGE_EN_REG, 73 HCLGEVF_RING_TX_TAIL_REG, 74 HCLGEVF_RING_TX_HEAD_REG, 75 HCLGEVF_RING_TX_FBD_NUM_REG, 76 HCLGEVF_RING_TX_OFFSET_REG, 77 HCLGEVF_RING_TX_EBD_NUM_REG, 78 HCLGEVF_RING_TX_EBD_OFFSET_REG, 79 HCLGEVF_RING_TX_BD_ERR_REG, 80 HCLGEVF_RING_EN_REG}; 81 82 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 83 HCLGEVF_TQP_INTR_GL0_REG, 84 HCLGEVF_TQP_INTR_GL1_REG, 85 HCLGEVF_TQP_INTR_GL2_REG, 86 HCLGEVF_TQP_INTR_RL_REG}; 87 88 /* hclgevf_cmd_send - send command to command queue 89 * @hw: pointer to the hw struct 90 * @desc: prefilled descriptor for describing the command 91 * @num : the number of descriptors to be sent 92 * 93 * This is the main send command for command queue, it 94 * sends the queue, cleans the queue, etc 95 */ 96 int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num) 97 { 98 return hclge_comm_cmd_send(&hw->hw, desc, num); 99 } 100 101 void hclgevf_arq_init(struct hclgevf_dev *hdev) 102 { 103 struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq; 104 105 spin_lock(&cmdq->crq.lock); 106 /* initialize the pointers of async rx queue of mailbox */ 107 hdev->arq.hdev = hdev; 108 hdev->arq.head = 0; 109 hdev->arq.tail = 0; 110 atomic_set(&hdev->arq.count, 0); 111 spin_unlock(&cmdq->crq.lock); 112 } 113 114 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 115 { 116 if (!handle->client) 117 return container_of(handle, struct hclgevf_dev, nic); 118 else if (handle->client->type == HNAE3_CLIENT_ROCE) 119 return container_of(handle, struct hclgevf_dev, roce); 120 else 121 return container_of(handle, struct hclgevf_dev, nic); 122 } 123 124 static void hclgevf_update_stats(struct hnae3_handle *handle, 125 struct net_device_stats *net_stats) 126 { 127 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 128 int status; 129 130 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); 131 if (status) 132 dev_err(&hdev->pdev->dev, 133 "VF update of TQPS stats fail, status = %d.\n", 134 status); 135 } 136 137 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 138 { 139 if (strset == ETH_SS_TEST) 140 return -EOPNOTSUPP; 141 else if (strset == ETH_SS_STATS) 142 return hclge_comm_tqps_get_sset_count(handle); 143 144 return 0; 145 } 146 147 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 148 u8 *data) 149 { 150 u8 *p = (char *)data; 151 152 if (strset == ETH_SS_STATS) 153 p = hclge_comm_tqps_get_strings(handle, p); 154 } 155 156 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 157 { 158 hclge_comm_tqps_get_stats(handle, data); 159 } 160 161 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 162 u8 subcode) 163 { 164 if (msg) { 165 memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 166 msg->code = code; 167 msg->subcode = subcode; 168 } 169 } 170 171 static int hclgevf_get_basic_info(struct hclgevf_dev *hdev) 172 { 173 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 174 u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE]; 175 struct hclge_basic_info *basic_info; 176 struct hclge_vf_to_pf_msg send_msg; 177 unsigned long caps; 178 int status; 179 180 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0); 181 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 182 sizeof(resp_msg)); 183 if (status) { 184 dev_err(&hdev->pdev->dev, 185 "failed to get basic info from pf, ret = %d", status); 186 return status; 187 } 188 189 basic_info = (struct hclge_basic_info *)resp_msg; 190 191 hdev->hw_tc_map = basic_info->hw_tc_map; 192 hdev->mbx_api_version = basic_info->mbx_api_version; 193 caps = basic_info->pf_caps; 194 if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps)) 195 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); 196 197 return 0; 198 } 199 200 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 201 { 202 struct hnae3_handle *nic = &hdev->nic; 203 struct hclge_vf_to_pf_msg send_msg; 204 u8 resp_msg; 205 int ret; 206 207 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 208 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 209 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 210 sizeof(u8)); 211 if (ret) { 212 dev_err(&hdev->pdev->dev, 213 "VF request to get port based vlan state failed %d", 214 ret); 215 return ret; 216 } 217 218 nic->port_base_vlan_state = resp_msg; 219 220 return 0; 221 } 222 223 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 224 { 225 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 226 #define HCLGEVF_TQPS_ALLOC_OFFSET 0 227 #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 228 #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 229 230 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 231 struct hclge_vf_to_pf_msg send_msg; 232 int status; 233 234 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 235 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 236 HCLGEVF_TQPS_RSS_INFO_LEN); 237 if (status) { 238 dev_err(&hdev->pdev->dev, 239 "VF request to get tqp info from PF failed %d", 240 status); 241 return status; 242 } 243 244 memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], 245 sizeof(u16)); 246 memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], 247 sizeof(u16)); 248 memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], 249 sizeof(u16)); 250 251 return 0; 252 } 253 254 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 255 { 256 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 257 #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 258 #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 259 260 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 261 struct hclge_vf_to_pf_msg send_msg; 262 int ret; 263 264 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 265 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 266 HCLGEVF_TQPS_DEPTH_INFO_LEN); 267 if (ret) { 268 dev_err(&hdev->pdev->dev, 269 "VF request to get tqp depth info from PF failed %d", 270 ret); 271 return ret; 272 } 273 274 memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], 275 sizeof(u16)); 276 memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], 277 sizeof(u16)); 278 279 return 0; 280 } 281 282 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 283 { 284 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 285 struct hclge_vf_to_pf_msg send_msg; 286 u16 qid_in_pf = 0; 287 u8 resp_data[2]; 288 int ret; 289 290 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 291 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 292 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 293 sizeof(resp_data)); 294 if (!ret) 295 qid_in_pf = *(u16 *)resp_data; 296 297 return qid_in_pf; 298 } 299 300 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 301 { 302 struct hclge_vf_to_pf_msg send_msg; 303 u8 resp_msg[2]; 304 int ret; 305 306 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 307 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 308 sizeof(resp_msg)); 309 if (ret) { 310 dev_err(&hdev->pdev->dev, 311 "VF request to get the pf port media type failed %d", 312 ret); 313 return ret; 314 } 315 316 hdev->hw.mac.media_type = resp_msg[0]; 317 hdev->hw.mac.module_type = resp_msg[1]; 318 319 return 0; 320 } 321 322 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 323 { 324 struct hclge_comm_tqp *tqp; 325 int i; 326 327 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 328 sizeof(struct hclge_comm_tqp), GFP_KERNEL); 329 if (!hdev->htqp) 330 return -ENOMEM; 331 332 tqp = hdev->htqp; 333 334 for (i = 0; i < hdev->num_tqps; i++) { 335 tqp->dev = &hdev->pdev->dev; 336 tqp->index = i; 337 338 tqp->q.ae_algo = &ae_algovf; 339 tqp->q.buf_size = hdev->rx_buf_len; 340 tqp->q.tx_desc_num = hdev->num_tx_desc; 341 tqp->q.rx_desc_num = hdev->num_rx_desc; 342 343 /* need an extended offset to configure queues >= 344 * HCLGEVF_TQP_MAX_SIZE_DEV_V2. 345 */ 346 if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2) 347 tqp->q.io_base = hdev->hw.hw.io_base + 348 HCLGEVF_TQP_REG_OFFSET + 349 i * HCLGEVF_TQP_REG_SIZE; 350 else 351 tqp->q.io_base = hdev->hw.hw.io_base + 352 HCLGEVF_TQP_REG_OFFSET + 353 HCLGEVF_TQP_EXT_REG_OFFSET + 354 (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) * 355 HCLGEVF_TQP_REG_SIZE; 356 357 tqp++; 358 } 359 360 return 0; 361 } 362 363 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 364 { 365 struct hnae3_handle *nic = &hdev->nic; 366 struct hnae3_knic_private_info *kinfo; 367 u16 new_tqps = hdev->num_tqps; 368 unsigned int i; 369 u8 num_tc = 0; 370 371 kinfo = &nic->kinfo; 372 kinfo->num_tx_desc = hdev->num_tx_desc; 373 kinfo->num_rx_desc = hdev->num_rx_desc; 374 kinfo->rx_buf_len = hdev->rx_buf_len; 375 for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) 376 if (hdev->hw_tc_map & BIT(i)) 377 num_tc++; 378 379 num_tc = num_tc ? num_tc : 1; 380 kinfo->tc_info.num_tc = num_tc; 381 kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc); 382 new_tqps = kinfo->rss_size * num_tc; 383 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 384 385 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 386 sizeof(struct hnae3_queue *), GFP_KERNEL); 387 if (!kinfo->tqp) 388 return -ENOMEM; 389 390 for (i = 0; i < kinfo->num_tqps; i++) { 391 hdev->htqp[i].q.handle = &hdev->nic; 392 hdev->htqp[i].q.tqp_index = i; 393 kinfo->tqp[i] = &hdev->htqp[i].q; 394 } 395 396 /* after init the max rss_size and tqps, adjust the default tqp numbers 397 * and rss size with the actual vector numbers 398 */ 399 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 400 kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc, 401 kinfo->rss_size); 402 403 return 0; 404 } 405 406 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 407 { 408 struct hclge_vf_to_pf_msg send_msg; 409 int status; 410 411 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 412 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 413 if (status) 414 dev_err(&hdev->pdev->dev, 415 "VF failed to fetch link status(%d) from PF", status); 416 } 417 418 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 419 { 420 struct hnae3_handle *rhandle = &hdev->roce; 421 struct hnae3_handle *handle = &hdev->nic; 422 struct hnae3_client *rclient; 423 struct hnae3_client *client; 424 425 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 426 return; 427 428 client = handle->client; 429 rclient = hdev->roce_client; 430 431 link_state = 432 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 433 if (link_state != hdev->hw.mac.link) { 434 hdev->hw.mac.link = link_state; 435 client->ops->link_status_change(handle, !!link_state); 436 if (rclient && rclient->ops->link_status_change) 437 rclient->ops->link_status_change(rhandle, !!link_state); 438 } 439 440 clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 441 } 442 443 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 444 { 445 #define HCLGEVF_ADVERTISING 0 446 #define HCLGEVF_SUPPORTED 1 447 448 struct hclge_vf_to_pf_msg send_msg; 449 450 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 451 send_msg.data[0] = HCLGEVF_ADVERTISING; 452 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 453 send_msg.data[0] = HCLGEVF_SUPPORTED; 454 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 455 } 456 457 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 458 { 459 struct hnae3_handle *nic = &hdev->nic; 460 int ret; 461 462 nic->ae_algo = &ae_algovf; 463 nic->pdev = hdev->pdev; 464 nic->numa_node_mask = hdev->numa_node_mask; 465 nic->flags |= HNAE3_SUPPORT_VF; 466 nic->kinfo.io_base = hdev->hw.hw.io_base; 467 468 ret = hclgevf_knic_setup(hdev); 469 if (ret) 470 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 471 ret); 472 return ret; 473 } 474 475 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 476 { 477 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 478 dev_warn(&hdev->pdev->dev, 479 "vector(vector_id %d) has been freed.\n", vector_id); 480 return; 481 } 482 483 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 484 hdev->num_msi_left += 1; 485 hdev->num_msi_used -= 1; 486 } 487 488 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 489 struct hnae3_vector_info *vector_info) 490 { 491 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 492 struct hnae3_vector_info *vector = vector_info; 493 int alloc = 0; 494 int i, j; 495 496 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 497 vector_num = min(hdev->num_msi_left, vector_num); 498 499 for (j = 0; j < vector_num; j++) { 500 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 501 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 502 vector->vector = pci_irq_vector(hdev->pdev, i); 503 vector->io_addr = hdev->hw.hw.io_base + 504 HCLGEVF_VECTOR_REG_BASE + 505 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 506 hdev->vector_status[i] = 0; 507 hdev->vector_irq[i] = vector->vector; 508 509 vector++; 510 alloc++; 511 512 break; 513 } 514 } 515 } 516 hdev->num_msi_left -= alloc; 517 hdev->num_msi_used += alloc; 518 519 return alloc; 520 } 521 522 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 523 { 524 int i; 525 526 for (i = 0; i < hdev->num_msi; i++) 527 if (vector == hdev->vector_irq[i]) 528 return i; 529 530 return -EINVAL; 531 } 532 533 /* for revision 0x20, vf shared the same rss config with pf */ 534 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 535 { 536 #define HCLGEVF_RSS_MBX_RESP_LEN 8 537 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 538 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 539 struct hclge_vf_to_pf_msg send_msg; 540 u16 msg_num, hash_key_index; 541 u8 index; 542 int ret; 543 544 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 545 msg_num = (HCLGE_COMM_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 546 HCLGEVF_RSS_MBX_RESP_LEN; 547 for (index = 0; index < msg_num; index++) { 548 send_msg.data[0] = index; 549 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 550 HCLGEVF_RSS_MBX_RESP_LEN); 551 if (ret) { 552 dev_err(&hdev->pdev->dev, 553 "VF get rss hash key from PF failed, ret=%d", 554 ret); 555 return ret; 556 } 557 558 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 559 if (index == msg_num - 1) 560 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 561 &resp_msg[0], 562 HCLGE_COMM_RSS_KEY_SIZE - hash_key_index); 563 else 564 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 565 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 566 } 567 568 return 0; 569 } 570 571 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 572 u8 *hfunc) 573 { 574 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 575 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 576 int ret; 577 578 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 579 hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc); 580 } else { 581 if (hfunc) 582 *hfunc = ETH_RSS_HASH_TOP; 583 if (key) { 584 ret = hclgevf_get_rss_hash_key(hdev); 585 if (ret) 586 return ret; 587 memcpy(key, rss_cfg->rss_hash_key, 588 HCLGE_COMM_RSS_KEY_SIZE); 589 } 590 } 591 592 hclge_comm_get_rss_indir_tbl(rss_cfg, indir, 593 hdev->ae_dev->dev_specs.rss_ind_tbl_size); 594 595 return 0; 596 } 597 598 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 599 const u8 *key, const u8 hfunc) 600 { 601 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 602 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 603 int ret, i; 604 605 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 606 ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, 607 hfunc); 608 if (ret) 609 return ret; 610 } 611 612 /* update the shadow RSS table with user specified qids */ 613 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 614 rss_cfg->rss_indirection_tbl[i] = indir[i]; 615 616 /* update the hardware */ 617 return hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, 618 rss_cfg->rss_indirection_tbl); 619 } 620 621 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 622 struct ethtool_rxnfc *nfc) 623 { 624 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 625 int ret; 626 627 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 628 return -EOPNOTSUPP; 629 630 ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw, 631 &hdev->rss_cfg, nfc); 632 if (ret) 633 dev_err(&hdev->pdev->dev, 634 "failed to set rss tuple, ret = %d.\n", ret); 635 636 return ret; 637 } 638 639 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 640 struct ethtool_rxnfc *nfc) 641 { 642 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 643 u8 tuple_sets; 644 int ret; 645 646 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 647 return -EOPNOTSUPP; 648 649 nfc->data = 0; 650 651 ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type, 652 &tuple_sets); 653 if (ret || !tuple_sets) 654 return ret; 655 656 nfc->data = hclge_comm_convert_rss_tuple(tuple_sets); 657 658 return 0; 659 } 660 661 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 662 { 663 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 664 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 665 666 return rss_cfg->rss_size; 667 } 668 669 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 670 int vector_id, 671 struct hnae3_ring_chain_node *ring_chain) 672 { 673 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 674 struct hclge_vf_to_pf_msg send_msg; 675 struct hnae3_ring_chain_node *node; 676 int status; 677 int i = 0; 678 679 memset(&send_msg, 0, sizeof(send_msg)); 680 send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 681 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 682 send_msg.vector_id = vector_id; 683 684 for (node = ring_chain; node; node = node->next) { 685 send_msg.param[i].ring_type = 686 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 687 688 send_msg.param[i].tqp_index = node->tqp_index; 689 send_msg.param[i].int_gl_index = 690 hnae3_get_field(node->int_gl_idx, 691 HNAE3_RING_GL_IDX_M, 692 HNAE3_RING_GL_IDX_S); 693 694 i++; 695 if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 696 send_msg.ring_num = i; 697 698 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 699 NULL, 0); 700 if (status) { 701 dev_err(&hdev->pdev->dev, 702 "Map TQP fail, status is %d.\n", 703 status); 704 return status; 705 } 706 i = 0; 707 } 708 } 709 710 return 0; 711 } 712 713 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 714 struct hnae3_ring_chain_node *ring_chain) 715 { 716 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 717 int vector_id; 718 719 vector_id = hclgevf_get_vector_index(hdev, vector); 720 if (vector_id < 0) { 721 dev_err(&handle->pdev->dev, 722 "Get vector index fail. ret =%d\n", vector_id); 723 return vector_id; 724 } 725 726 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 727 } 728 729 static int hclgevf_unmap_ring_from_vector( 730 struct hnae3_handle *handle, 731 int vector, 732 struct hnae3_ring_chain_node *ring_chain) 733 { 734 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 735 int ret, vector_id; 736 737 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 738 return 0; 739 740 vector_id = hclgevf_get_vector_index(hdev, vector); 741 if (vector_id < 0) { 742 dev_err(&handle->pdev->dev, 743 "Get vector index fail. ret =%d\n", vector_id); 744 return vector_id; 745 } 746 747 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 748 if (ret) 749 dev_err(&handle->pdev->dev, 750 "Unmap ring from vector fail. vector=%d, ret =%d\n", 751 vector_id, 752 ret); 753 754 return ret; 755 } 756 757 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 758 { 759 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 760 int vector_id; 761 762 vector_id = hclgevf_get_vector_index(hdev, vector); 763 if (vector_id < 0) { 764 dev_err(&handle->pdev->dev, 765 "hclgevf_put_vector get vector index fail. ret =%d\n", 766 vector_id); 767 return vector_id; 768 } 769 770 hclgevf_free_vector(hdev, vector_id); 771 772 return 0; 773 } 774 775 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 776 bool en_uc_pmc, bool en_mc_pmc, 777 bool en_bc_pmc) 778 { 779 struct hnae3_handle *handle = &hdev->nic; 780 struct hclge_vf_to_pf_msg send_msg; 781 int ret; 782 783 memset(&send_msg, 0, sizeof(send_msg)); 784 send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 785 send_msg.en_bc = en_bc_pmc ? 1 : 0; 786 send_msg.en_uc = en_uc_pmc ? 1 : 0; 787 send_msg.en_mc = en_mc_pmc ? 1 : 0; 788 send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC, 789 &handle->priv_flags) ? 1 : 0; 790 791 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 792 if (ret) 793 dev_err(&hdev->pdev->dev, 794 "Set promisc mode fail, status is %d.\n", ret); 795 796 return ret; 797 } 798 799 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 800 bool en_mc_pmc) 801 { 802 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 803 bool en_bc_pmc; 804 805 en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2; 806 807 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 808 en_bc_pmc); 809 } 810 811 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) 812 { 813 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 814 815 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 816 hclgevf_task_schedule(hdev, 0); 817 } 818 819 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) 820 { 821 struct hnae3_handle *handle = &hdev->nic; 822 bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; 823 bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; 824 int ret; 825 826 if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { 827 ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); 828 if (!ret) 829 clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 830 } 831 } 832 833 static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id, 834 u16 stream_id, bool enable) 835 { 836 struct hclgevf_cfg_com_tqp_queue_cmd *req; 837 struct hclge_desc desc; 838 839 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 840 841 hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); 842 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 843 req->stream_id = cpu_to_le16(stream_id); 844 if (enable) 845 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 846 847 return hclgevf_cmd_send(&hdev->hw, &desc, 1); 848 } 849 850 static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable) 851 { 852 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 853 int ret; 854 u16 i; 855 856 for (i = 0; i < handle->kinfo.num_tqps; i++) { 857 ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable); 858 if (ret) 859 return ret; 860 } 861 862 return 0; 863 } 864 865 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 866 { 867 struct hclge_vf_to_pf_msg send_msg; 868 u8 host_mac[ETH_ALEN]; 869 int status; 870 871 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 872 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 873 ETH_ALEN); 874 if (status) { 875 dev_err(&hdev->pdev->dev, 876 "fail to get VF MAC from host %d", status); 877 return status; 878 } 879 880 ether_addr_copy(p, host_mac); 881 882 return 0; 883 } 884 885 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 886 { 887 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 888 u8 host_mac_addr[ETH_ALEN]; 889 890 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 891 return; 892 893 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 894 if (hdev->has_pf_mac) 895 ether_addr_copy(p, host_mac_addr); 896 else 897 ether_addr_copy(p, hdev->hw.mac.mac_addr); 898 } 899 900 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p, 901 bool is_first) 902 { 903 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 904 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 905 struct hclge_vf_to_pf_msg send_msg; 906 u8 *new_mac_addr = (u8 *)p; 907 int status; 908 909 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 910 send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; 911 ether_addr_copy(send_msg.data, new_mac_addr); 912 if (is_first && !hdev->has_pf_mac) 913 eth_zero_addr(&send_msg.data[ETH_ALEN]); 914 else 915 ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 916 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 917 if (!status) 918 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 919 920 return status; 921 } 922 923 static struct hclgevf_mac_addr_node * 924 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) 925 { 926 struct hclgevf_mac_addr_node *mac_node, *tmp; 927 928 list_for_each_entry_safe(mac_node, tmp, list, node) 929 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 930 return mac_node; 931 932 return NULL; 933 } 934 935 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, 936 enum HCLGEVF_MAC_NODE_STATE state) 937 { 938 switch (state) { 939 /* from set_rx_mode or tmp_add_list */ 940 case HCLGEVF_MAC_TO_ADD: 941 if (mac_node->state == HCLGEVF_MAC_TO_DEL) 942 mac_node->state = HCLGEVF_MAC_ACTIVE; 943 break; 944 /* only from set_rx_mode */ 945 case HCLGEVF_MAC_TO_DEL: 946 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 947 list_del(&mac_node->node); 948 kfree(mac_node); 949 } else { 950 mac_node->state = HCLGEVF_MAC_TO_DEL; 951 } 952 break; 953 /* only from tmp_add_list, the mac_node->state won't be 954 * HCLGEVF_MAC_ACTIVE 955 */ 956 case HCLGEVF_MAC_ACTIVE: 957 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 958 mac_node->state = HCLGEVF_MAC_ACTIVE; 959 break; 960 } 961 } 962 963 static int hclgevf_update_mac_list(struct hnae3_handle *handle, 964 enum HCLGEVF_MAC_NODE_STATE state, 965 enum HCLGEVF_MAC_ADDR_TYPE mac_type, 966 const unsigned char *addr) 967 { 968 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 969 struct hclgevf_mac_addr_node *mac_node; 970 struct list_head *list; 971 972 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 973 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 974 975 spin_lock_bh(&hdev->mac_table.mac_list_lock); 976 977 /* if the mac addr is already in the mac list, no need to add a new 978 * one into it, just check the mac addr state, convert it to a new 979 * new state, or just remove it, or do nothing. 980 */ 981 mac_node = hclgevf_find_mac_node(list, addr); 982 if (mac_node) { 983 hclgevf_update_mac_node(mac_node, state); 984 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 985 return 0; 986 } 987 /* if this address is never added, unnecessary to delete */ 988 if (state == HCLGEVF_MAC_TO_DEL) { 989 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 990 return -ENOENT; 991 } 992 993 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 994 if (!mac_node) { 995 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 996 return -ENOMEM; 997 } 998 999 mac_node->state = state; 1000 ether_addr_copy(mac_node->mac_addr, addr); 1001 list_add_tail(&mac_node->node, list); 1002 1003 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1004 return 0; 1005 } 1006 1007 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1008 const unsigned char *addr) 1009 { 1010 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1011 HCLGEVF_MAC_ADDR_UC, addr); 1012 } 1013 1014 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1015 const unsigned char *addr) 1016 { 1017 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1018 HCLGEVF_MAC_ADDR_UC, addr); 1019 } 1020 1021 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1022 const unsigned char *addr) 1023 { 1024 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1025 HCLGEVF_MAC_ADDR_MC, addr); 1026 } 1027 1028 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1029 const unsigned char *addr) 1030 { 1031 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1032 HCLGEVF_MAC_ADDR_MC, addr); 1033 } 1034 1035 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, 1036 struct hclgevf_mac_addr_node *mac_node, 1037 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1038 { 1039 struct hclge_vf_to_pf_msg send_msg; 1040 u8 code, subcode; 1041 1042 if (mac_type == HCLGEVF_MAC_ADDR_UC) { 1043 code = HCLGE_MBX_SET_UNICAST; 1044 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1045 subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; 1046 else 1047 subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; 1048 } else { 1049 code = HCLGE_MBX_SET_MULTICAST; 1050 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1051 subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; 1052 else 1053 subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; 1054 } 1055 1056 hclgevf_build_send_msg(&send_msg, code, subcode); 1057 ether_addr_copy(send_msg.data, mac_node->mac_addr); 1058 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1059 } 1060 1061 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, 1062 struct list_head *list, 1063 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1064 { 1065 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 1066 struct hclgevf_mac_addr_node *mac_node, *tmp; 1067 int ret; 1068 1069 list_for_each_entry_safe(mac_node, tmp, list, node) { 1070 ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); 1071 if (ret) { 1072 hnae3_format_mac_addr(format_mac_addr, 1073 mac_node->mac_addr); 1074 dev_err(&hdev->pdev->dev, 1075 "failed to configure mac %s, state = %d, ret = %d\n", 1076 format_mac_addr, mac_node->state, ret); 1077 return; 1078 } 1079 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1080 mac_node->state = HCLGEVF_MAC_ACTIVE; 1081 } else { 1082 list_del(&mac_node->node); 1083 kfree(mac_node); 1084 } 1085 } 1086 } 1087 1088 static void hclgevf_sync_from_add_list(struct list_head *add_list, 1089 struct list_head *mac_list) 1090 { 1091 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1092 1093 list_for_each_entry_safe(mac_node, tmp, add_list, node) { 1094 /* if the mac address from tmp_add_list is not in the 1095 * uc/mc_mac_list, it means have received a TO_DEL request 1096 * during the time window of sending mac config request to PF 1097 * If mac_node state is ACTIVE, then change its state to TO_DEL, 1098 * then it will be removed at next time. If is TO_ADD, it means 1099 * send TO_ADD request failed, so just remove the mac node. 1100 */ 1101 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1102 if (new_node) { 1103 hclgevf_update_mac_node(new_node, mac_node->state); 1104 list_del(&mac_node->node); 1105 kfree(mac_node); 1106 } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { 1107 mac_node->state = HCLGEVF_MAC_TO_DEL; 1108 list_move_tail(&mac_node->node, mac_list); 1109 } else { 1110 list_del(&mac_node->node); 1111 kfree(mac_node); 1112 } 1113 } 1114 } 1115 1116 static void hclgevf_sync_from_del_list(struct list_head *del_list, 1117 struct list_head *mac_list) 1118 { 1119 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1120 1121 list_for_each_entry_safe(mac_node, tmp, del_list, node) { 1122 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1123 if (new_node) { 1124 /* If the mac addr is exist in the mac list, it means 1125 * received a new request TO_ADD during the time window 1126 * of sending mac addr configurrequest to PF, so just 1127 * change the mac state to ACTIVE. 1128 */ 1129 new_node->state = HCLGEVF_MAC_ACTIVE; 1130 list_del(&mac_node->node); 1131 kfree(mac_node); 1132 } else { 1133 list_move_tail(&mac_node->node, mac_list); 1134 } 1135 } 1136 } 1137 1138 static void hclgevf_clear_list(struct list_head *list) 1139 { 1140 struct hclgevf_mac_addr_node *mac_node, *tmp; 1141 1142 list_for_each_entry_safe(mac_node, tmp, list, node) { 1143 list_del(&mac_node->node); 1144 kfree(mac_node); 1145 } 1146 } 1147 1148 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, 1149 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1150 { 1151 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1152 struct list_head tmp_add_list, tmp_del_list; 1153 struct list_head *list; 1154 1155 INIT_LIST_HEAD(&tmp_add_list); 1156 INIT_LIST_HEAD(&tmp_del_list); 1157 1158 /* move the mac addr to the tmp_add_list and tmp_del_list, then 1159 * we can add/delete these mac addr outside the spin lock 1160 */ 1161 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1162 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1163 1164 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1165 1166 list_for_each_entry_safe(mac_node, tmp, list, node) { 1167 switch (mac_node->state) { 1168 case HCLGEVF_MAC_TO_DEL: 1169 list_move_tail(&mac_node->node, &tmp_del_list); 1170 break; 1171 case HCLGEVF_MAC_TO_ADD: 1172 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 1173 if (!new_node) 1174 goto stop_traverse; 1175 1176 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 1177 new_node->state = mac_node->state; 1178 list_add_tail(&new_node->node, &tmp_add_list); 1179 break; 1180 default: 1181 break; 1182 } 1183 } 1184 1185 stop_traverse: 1186 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1187 1188 /* delete first, in order to get max mac table space for adding */ 1189 hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); 1190 hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); 1191 1192 /* if some mac addresses were added/deleted fail, move back to the 1193 * mac_list, and retry at next time. 1194 */ 1195 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1196 1197 hclgevf_sync_from_del_list(&tmp_del_list, list); 1198 hclgevf_sync_from_add_list(&tmp_add_list, list); 1199 1200 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1201 } 1202 1203 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) 1204 { 1205 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); 1206 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); 1207 } 1208 1209 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) 1210 { 1211 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1212 1213 hclgevf_clear_list(&hdev->mac_table.uc_mac_list); 1214 hclgevf_clear_list(&hdev->mac_table.mc_mac_list); 1215 1216 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1217 } 1218 1219 static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 1220 { 1221 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1222 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 1223 struct hclge_vf_to_pf_msg send_msg; 1224 1225 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) 1226 return -EOPNOTSUPP; 1227 1228 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1229 HCLGE_MBX_ENABLE_VLAN_FILTER); 1230 send_msg.data[0] = enable ? 1 : 0; 1231 1232 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1233 } 1234 1235 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1236 __be16 proto, u16 vlan_id, 1237 bool is_kill) 1238 { 1239 #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 1240 #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 1241 #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 1242 1243 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1244 struct hclge_vf_to_pf_msg send_msg; 1245 int ret; 1246 1247 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1248 return -EINVAL; 1249 1250 if (proto != htons(ETH_P_8021Q)) 1251 return -EPROTONOSUPPORT; 1252 1253 /* When device is resetting or reset failed, firmware is unable to 1254 * handle mailbox. Just record the vlan id, and remove it after 1255 * reset finished. 1256 */ 1257 if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 1258 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { 1259 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1260 return -EBUSY; 1261 } 1262 1263 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1264 HCLGE_MBX_VLAN_FILTER); 1265 send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; 1266 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, 1267 sizeof(vlan_id)); 1268 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, 1269 sizeof(proto)); 1270 /* when remove hw vlan filter failed, record the vlan id, 1271 * and try to remove it from hw later, to be consistence 1272 * with stack. 1273 */ 1274 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1275 if (is_kill && ret) 1276 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1277 1278 return ret; 1279 } 1280 1281 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1282 { 1283 #define HCLGEVF_MAX_SYNC_COUNT 60 1284 struct hnae3_handle *handle = &hdev->nic; 1285 int ret, sync_cnt = 0; 1286 u16 vlan_id; 1287 1288 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1289 while (vlan_id != VLAN_N_VID) { 1290 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1291 vlan_id, true); 1292 if (ret) 1293 return; 1294 1295 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1296 sync_cnt++; 1297 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1298 return; 1299 1300 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1301 } 1302 } 1303 1304 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1305 { 1306 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1307 struct hclge_vf_to_pf_msg send_msg; 1308 1309 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1310 HCLGE_MBX_VLAN_RX_OFF_CFG); 1311 send_msg.data[0] = enable ? 1 : 0; 1312 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1313 } 1314 1315 static int hclgevf_reset_tqp(struct hnae3_handle *handle) 1316 { 1317 #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U 1318 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1319 struct hclge_vf_to_pf_msg send_msg; 1320 u8 return_status = 0; 1321 int ret; 1322 u16 i; 1323 1324 /* disable vf queue before send queue reset msg to PF */ 1325 ret = hclgevf_tqp_enable(handle, false); 1326 if (ret) { 1327 dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n", 1328 ret); 1329 return ret; 1330 } 1331 1332 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1333 1334 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status, 1335 sizeof(return_status)); 1336 if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE) 1337 return ret; 1338 1339 for (i = 1; i < handle->kinfo.num_tqps; i++) { 1340 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1341 memcpy(send_msg.data, &i, sizeof(i)); 1342 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1343 if (ret) 1344 return ret; 1345 } 1346 1347 return 0; 1348 } 1349 1350 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1351 { 1352 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1353 struct hclge_vf_to_pf_msg send_msg; 1354 1355 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1356 memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); 1357 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1358 } 1359 1360 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1361 enum hnae3_reset_notify_type type) 1362 { 1363 struct hnae3_client *client = hdev->nic_client; 1364 struct hnae3_handle *handle = &hdev->nic; 1365 int ret; 1366 1367 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1368 !client) 1369 return 0; 1370 1371 if (!client->ops->reset_notify) 1372 return -EOPNOTSUPP; 1373 1374 ret = client->ops->reset_notify(handle, type); 1375 if (ret) 1376 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1377 type, ret); 1378 1379 return ret; 1380 } 1381 1382 static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev, 1383 enum hnae3_reset_notify_type type) 1384 { 1385 struct hnae3_client *client = hdev->roce_client; 1386 struct hnae3_handle *handle = &hdev->roce; 1387 int ret; 1388 1389 if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client) 1390 return 0; 1391 1392 if (!client->ops->reset_notify) 1393 return -EOPNOTSUPP; 1394 1395 ret = client->ops->reset_notify(handle, type); 1396 if (ret) 1397 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", 1398 type, ret); 1399 return ret; 1400 } 1401 1402 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1403 { 1404 #define HCLGEVF_RESET_WAIT_US 20000 1405 #define HCLGEVF_RESET_WAIT_CNT 2000 1406 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1407 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1408 1409 u32 val; 1410 int ret; 1411 1412 if (hdev->reset_type == HNAE3_VF_RESET) 1413 ret = readl_poll_timeout(hdev->hw.hw.io_base + 1414 HCLGEVF_VF_RST_ING, val, 1415 !(val & HCLGEVF_VF_RST_ING_BIT), 1416 HCLGEVF_RESET_WAIT_US, 1417 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1418 else 1419 ret = readl_poll_timeout(hdev->hw.hw.io_base + 1420 HCLGEVF_RST_ING, val, 1421 !(val & HCLGEVF_RST_ING_BITS), 1422 HCLGEVF_RESET_WAIT_US, 1423 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1424 1425 /* hardware completion status should be available by this time */ 1426 if (ret) { 1427 dev_err(&hdev->pdev->dev, 1428 "couldn't get reset done status from h/w, timeout!\n"); 1429 return ret; 1430 } 1431 1432 /* we will wait a bit more to let reset of the stack to complete. This 1433 * might happen in case reset assertion was made by PF. Yes, this also 1434 * means we might end up waiting bit more even for VF reset. 1435 */ 1436 msleep(5000); 1437 1438 return 0; 1439 } 1440 1441 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 1442 { 1443 u32 reg_val; 1444 1445 reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); 1446 if (enable) 1447 reg_val |= HCLGEVF_NIC_SW_RST_RDY; 1448 else 1449 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 1450 1451 hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 1452 reg_val); 1453 } 1454 1455 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1456 { 1457 int ret; 1458 1459 /* uninitialize the nic client */ 1460 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1461 if (ret) 1462 return ret; 1463 1464 /* re-initialize the hclge device */ 1465 ret = hclgevf_reset_hdev(hdev); 1466 if (ret) { 1467 dev_err(&hdev->pdev->dev, 1468 "hclge device re-init failed, VF is disabled!\n"); 1469 return ret; 1470 } 1471 1472 /* bring up the nic client again */ 1473 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1474 if (ret) 1475 return ret; 1476 1477 /* clear handshake status with IMP */ 1478 hclgevf_reset_handshake(hdev, false); 1479 1480 /* bring up the nic to enable TX/RX again */ 1481 return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1482 } 1483 1484 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1485 { 1486 #define HCLGEVF_RESET_SYNC_TIME 100 1487 1488 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1489 struct hclge_vf_to_pf_msg send_msg; 1490 int ret; 1491 1492 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1493 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1494 if (ret) { 1495 dev_err(&hdev->pdev->dev, 1496 "failed to assert VF reset, ret = %d\n", ret); 1497 return ret; 1498 } 1499 hdev->rst_stats.vf_func_rst_cnt++; 1500 } 1501 1502 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 1503 /* inform hardware that preparatory work is done */ 1504 msleep(HCLGEVF_RESET_SYNC_TIME); 1505 hclgevf_reset_handshake(hdev, true); 1506 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n", 1507 hdev->reset_type); 1508 1509 return 0; 1510 } 1511 1512 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 1513 { 1514 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 1515 hdev->rst_stats.vf_func_rst_cnt); 1516 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1517 hdev->rst_stats.flr_rst_cnt); 1518 dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 1519 hdev->rst_stats.vf_rst_cnt); 1520 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1521 hdev->rst_stats.rst_done_cnt); 1522 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1523 hdev->rst_stats.hw_rst_done_cnt); 1524 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1525 hdev->rst_stats.rst_cnt); 1526 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1527 hdev->rst_stats.rst_fail_cnt); 1528 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1529 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 1530 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 1531 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG)); 1532 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1533 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG)); 1534 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1535 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 1536 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1537 } 1538 1539 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1540 { 1541 /* recover handshake status with IMP when reset fail */ 1542 hclgevf_reset_handshake(hdev, true); 1543 hdev->rst_stats.rst_fail_cnt++; 1544 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1545 hdev->rst_stats.rst_fail_cnt); 1546 1547 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1548 set_bit(hdev->reset_type, &hdev->reset_pending); 1549 1550 if (hclgevf_is_reset_pending(hdev)) { 1551 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1552 hclgevf_reset_task_schedule(hdev); 1553 } else { 1554 set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1555 hclgevf_dump_rst_info(hdev); 1556 } 1557 } 1558 1559 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 1560 { 1561 int ret; 1562 1563 hdev->rst_stats.rst_cnt++; 1564 1565 /* perform reset of the stack & ae device for a client */ 1566 ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); 1567 if (ret) 1568 return ret; 1569 1570 rtnl_lock(); 1571 /* bring down the nic to stop any ongoing TX/RX */ 1572 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1573 rtnl_unlock(); 1574 if (ret) 1575 return ret; 1576 1577 return hclgevf_reset_prepare_wait(hdev); 1578 } 1579 1580 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 1581 { 1582 int ret; 1583 1584 hdev->rst_stats.hw_rst_done_cnt++; 1585 ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); 1586 if (ret) 1587 return ret; 1588 1589 rtnl_lock(); 1590 /* now, re-initialize the nic client and ae device */ 1591 ret = hclgevf_reset_stack(hdev); 1592 rtnl_unlock(); 1593 if (ret) { 1594 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1595 return ret; 1596 } 1597 1598 ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT); 1599 /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1 1600 * times 1601 */ 1602 if (ret && 1603 hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1) 1604 return ret; 1605 1606 ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT); 1607 if (ret) 1608 return ret; 1609 1610 hdev->last_reset_time = jiffies; 1611 hdev->rst_stats.rst_done_cnt++; 1612 hdev->rst_stats.rst_fail_cnt = 0; 1613 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1614 1615 return 0; 1616 } 1617 1618 static void hclgevf_reset(struct hclgevf_dev *hdev) 1619 { 1620 if (hclgevf_reset_prepare(hdev)) 1621 goto err_reset; 1622 1623 /* check if VF could successfully fetch the hardware reset completion 1624 * status from the hardware 1625 */ 1626 if (hclgevf_reset_wait(hdev)) { 1627 /* can't do much in this situation, will disable VF */ 1628 dev_err(&hdev->pdev->dev, 1629 "failed to fetch H/W reset completion status\n"); 1630 goto err_reset; 1631 } 1632 1633 if (hclgevf_reset_rebuild(hdev)) 1634 goto err_reset; 1635 1636 return; 1637 1638 err_reset: 1639 hclgevf_reset_err_handle(hdev); 1640 } 1641 1642 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1643 unsigned long *addr) 1644 { 1645 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1646 1647 /* return the highest priority reset level amongst all */ 1648 if (test_bit(HNAE3_VF_RESET, addr)) { 1649 rst_level = HNAE3_VF_RESET; 1650 clear_bit(HNAE3_VF_RESET, addr); 1651 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1652 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1653 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1654 rst_level = HNAE3_VF_FULL_RESET; 1655 clear_bit(HNAE3_VF_FULL_RESET, addr); 1656 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1657 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1658 rst_level = HNAE3_VF_PF_FUNC_RESET; 1659 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1660 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1661 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1662 rst_level = HNAE3_VF_FUNC_RESET; 1663 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1664 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1665 rst_level = HNAE3_FLR_RESET; 1666 clear_bit(HNAE3_FLR_RESET, addr); 1667 } 1668 1669 return rst_level; 1670 } 1671 1672 static void hclgevf_reset_event(struct pci_dev *pdev, 1673 struct hnae3_handle *handle) 1674 { 1675 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1676 struct hclgevf_dev *hdev = ae_dev->priv; 1677 1678 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1679 1680 if (hdev->default_reset_request) 1681 hdev->reset_level = 1682 hclgevf_get_reset_level(hdev, 1683 &hdev->default_reset_request); 1684 else 1685 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1686 1687 /* reset of this VF requested */ 1688 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1689 hclgevf_reset_task_schedule(hdev); 1690 1691 hdev->last_reset_time = jiffies; 1692 } 1693 1694 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1695 enum hnae3_reset_type rst_type) 1696 { 1697 struct hclgevf_dev *hdev = ae_dev->priv; 1698 1699 set_bit(rst_type, &hdev->default_reset_request); 1700 } 1701 1702 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1703 { 1704 writel(en ? 1 : 0, vector->addr); 1705 } 1706 1707 static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev, 1708 enum hnae3_reset_type rst_type) 1709 { 1710 #define HCLGEVF_RESET_RETRY_WAIT_MS 500 1711 #define HCLGEVF_RESET_RETRY_CNT 5 1712 1713 struct hclgevf_dev *hdev = ae_dev->priv; 1714 int retry_cnt = 0; 1715 int ret; 1716 1717 while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) { 1718 down(&hdev->reset_sem); 1719 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1720 hdev->reset_type = rst_type; 1721 ret = hclgevf_reset_prepare(hdev); 1722 if (!ret && !hdev->reset_pending) 1723 break; 1724 1725 dev_err(&hdev->pdev->dev, 1726 "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n", 1727 ret, hdev->reset_pending, retry_cnt); 1728 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1729 up(&hdev->reset_sem); 1730 msleep(HCLGEVF_RESET_RETRY_WAIT_MS); 1731 } 1732 1733 /* disable misc vector before reset done */ 1734 hclgevf_enable_vector(&hdev->misc_vector, false); 1735 1736 if (hdev->reset_type == HNAE3_FLR_RESET) 1737 hdev->rst_stats.flr_rst_cnt++; 1738 } 1739 1740 static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev) 1741 { 1742 struct hclgevf_dev *hdev = ae_dev->priv; 1743 int ret; 1744 1745 hclgevf_enable_vector(&hdev->misc_vector, true); 1746 1747 ret = hclgevf_reset_rebuild(hdev); 1748 if (ret) 1749 dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 1750 ret); 1751 1752 hdev->reset_type = HNAE3_NONE_RESET; 1753 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1754 up(&hdev->reset_sem); 1755 } 1756 1757 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1758 { 1759 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1760 1761 return hdev->fw_version; 1762 } 1763 1764 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1765 { 1766 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1767 1768 vector->vector_irq = pci_irq_vector(hdev->pdev, 1769 HCLGEVF_MISC_VECTOR_NUM); 1770 vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1771 /* vector status always valid for Vector 0 */ 1772 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1773 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1774 1775 hdev->num_msi_left -= 1; 1776 hdev->num_msi_used += 1; 1777 } 1778 1779 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1780 { 1781 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 1782 test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) && 1783 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 1784 &hdev->state)) 1785 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 1786 } 1787 1788 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1789 { 1790 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 1791 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 1792 &hdev->state)) 1793 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 1794 } 1795 1796 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 1797 unsigned long delay) 1798 { 1799 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 1800 !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 1801 mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 1802 } 1803 1804 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 1805 { 1806 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 1807 1808 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 1809 return; 1810 1811 down(&hdev->reset_sem); 1812 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1813 1814 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1815 &hdev->reset_state)) { 1816 /* PF has intimated that it is about to reset the hardware. 1817 * We now have to poll & check if hardware has actually 1818 * completed the reset sequence. On hardware reset completion, 1819 * VF needs to reset the client and ae device. 1820 */ 1821 hdev->reset_attempts = 0; 1822 1823 hdev->last_reset_time = jiffies; 1824 hdev->reset_type = 1825 hclgevf_get_reset_level(hdev, &hdev->reset_pending); 1826 if (hdev->reset_type != HNAE3_NONE_RESET) 1827 hclgevf_reset(hdev); 1828 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1829 &hdev->reset_state)) { 1830 /* we could be here when either of below happens: 1831 * 1. reset was initiated due to watchdog timeout caused by 1832 * a. IMP was earlier reset and our TX got choked down and 1833 * which resulted in watchdog reacting and inducing VF 1834 * reset. This also means our cmdq would be unreliable. 1835 * b. problem in TX due to other lower layer(example link 1836 * layer not functioning properly etc.) 1837 * 2. VF reset might have been initiated due to some config 1838 * change. 1839 * 1840 * NOTE: Theres no clear way to detect above cases than to react 1841 * to the response of PF for this reset request. PF will ack the 1842 * 1b and 2. cases but we will not get any intimation about 1a 1843 * from PF as cmdq would be in unreliable state i.e. mailbox 1844 * communication between PF and VF would be broken. 1845 * 1846 * if we are never geting into pending state it means either: 1847 * 1. PF is not receiving our request which could be due to IMP 1848 * reset 1849 * 2. PF is screwed 1850 * We cannot do much for 2. but to check first we can try reset 1851 * our PCIe + stack and see if it alleviates the problem. 1852 */ 1853 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 1854 /* prepare for full reset of stack + pcie interface */ 1855 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1856 1857 /* "defer" schedule the reset task again */ 1858 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1859 } else { 1860 hdev->reset_attempts++; 1861 1862 set_bit(hdev->reset_level, &hdev->reset_pending); 1863 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1864 } 1865 hclgevf_reset_task_schedule(hdev); 1866 } 1867 1868 hdev->reset_type = HNAE3_NONE_RESET; 1869 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1870 up(&hdev->reset_sem); 1871 } 1872 1873 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 1874 { 1875 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 1876 return; 1877 1878 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1879 return; 1880 1881 hclgevf_mbx_async_handler(hdev); 1882 1883 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1884 } 1885 1886 static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 1887 { 1888 struct hclge_vf_to_pf_msg send_msg; 1889 int ret; 1890 1891 if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) 1892 return; 1893 1894 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 1895 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1896 if (ret) 1897 dev_err(&hdev->pdev->dev, 1898 "VF sends keep alive cmd failed(=%d)\n", ret); 1899 } 1900 1901 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 1902 { 1903 unsigned long delta = round_jiffies_relative(HZ); 1904 struct hnae3_handle *handle = &hdev->nic; 1905 1906 if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 1907 return; 1908 1909 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 1910 delta = jiffies - hdev->last_serv_processed; 1911 1912 if (delta < round_jiffies_relative(HZ)) { 1913 delta = round_jiffies_relative(HZ) - delta; 1914 goto out; 1915 } 1916 } 1917 1918 hdev->serv_processed_cnt++; 1919 if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 1920 hclgevf_keep_alive(hdev); 1921 1922 if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 1923 hdev->last_serv_processed = jiffies; 1924 goto out; 1925 } 1926 1927 if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 1928 hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); 1929 1930 /* VF does not need to request link status when this bit is set, because 1931 * PF will push its link status to VFs when link status changed. 1932 */ 1933 if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state)) 1934 hclgevf_request_link_info(hdev); 1935 1936 hclgevf_update_link_mode(hdev); 1937 1938 hclgevf_sync_vlan_filter(hdev); 1939 1940 hclgevf_sync_mac_table(hdev); 1941 1942 hclgevf_sync_promisc_mode(hdev); 1943 1944 hdev->last_serv_processed = jiffies; 1945 1946 out: 1947 hclgevf_task_schedule(hdev, delta); 1948 } 1949 1950 static void hclgevf_service_task(struct work_struct *work) 1951 { 1952 struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 1953 service_task.work); 1954 1955 hclgevf_reset_service_task(hdev); 1956 hclgevf_mailbox_service_task(hdev); 1957 hclgevf_periodic_service_task(hdev); 1958 1959 /* Handle reset and mbx again in case periodical task delays the 1960 * handling by calling hclgevf_task_schedule() in 1961 * hclgevf_periodic_service_task() 1962 */ 1963 hclgevf_reset_service_task(hdev); 1964 hclgevf_mailbox_service_task(hdev); 1965 } 1966 1967 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1968 { 1969 hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr); 1970 } 1971 1972 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1973 u32 *clearval) 1974 { 1975 u32 val, cmdq_stat_reg, rst_ing_reg; 1976 1977 /* fetch the events from their corresponding regs */ 1978 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 1979 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG); 1980 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 1981 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1982 dev_info(&hdev->pdev->dev, 1983 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1984 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1985 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1986 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 1987 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 1988 hdev->rst_stats.vf_rst_cnt++; 1989 /* set up VF hardware reset status, its PF will clear 1990 * this status when PF has initialized done. 1991 */ 1992 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 1993 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 1994 val | HCLGEVF_VF_RST_ING_BIT); 1995 return HCLGEVF_VECTOR0_EVENT_RST; 1996 } 1997 1998 /* check for vector0 mailbox(=CMDQ RX) event source */ 1999 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 2000 /* for revision 0x21, clearing interrupt is writing bit 0 2001 * to the clear register, writing bit 1 means to keep the 2002 * old value. 2003 * for revision 0x20, the clear register is a read & write 2004 * register, so we should just write 0 to the bit we are 2005 * handling, and keep other bits as cmdq_stat_reg. 2006 */ 2007 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) 2008 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2009 else 2010 *clearval = cmdq_stat_reg & 2011 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2012 2013 return HCLGEVF_VECTOR0_EVENT_MBX; 2014 } 2015 2016 /* print other vector0 event source */ 2017 dev_info(&hdev->pdev->dev, 2018 "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2019 cmdq_stat_reg); 2020 2021 return HCLGEVF_VECTOR0_EVENT_OTHER; 2022 } 2023 2024 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2025 { 2026 enum hclgevf_evt_cause event_cause; 2027 struct hclgevf_dev *hdev = data; 2028 u32 clearval; 2029 2030 hclgevf_enable_vector(&hdev->misc_vector, false); 2031 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2032 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) 2033 hclgevf_clear_event_cause(hdev, clearval); 2034 2035 switch (event_cause) { 2036 case HCLGEVF_VECTOR0_EVENT_RST: 2037 hclgevf_reset_task_schedule(hdev); 2038 break; 2039 case HCLGEVF_VECTOR0_EVENT_MBX: 2040 hclgevf_mbx_handler(hdev); 2041 break; 2042 default: 2043 break; 2044 } 2045 2046 hclgevf_enable_vector(&hdev->misc_vector, true); 2047 2048 return IRQ_HANDLED; 2049 } 2050 2051 static int hclgevf_configure(struct hclgevf_dev *hdev) 2052 { 2053 int ret; 2054 2055 hdev->gro_en = true; 2056 2057 ret = hclgevf_get_basic_info(hdev); 2058 if (ret) 2059 return ret; 2060 2061 /* get current port based vlan state from PF */ 2062 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2063 if (ret) 2064 return ret; 2065 2066 /* get queue configuration from PF */ 2067 ret = hclgevf_get_queue_info(hdev); 2068 if (ret) 2069 return ret; 2070 2071 /* get queue depth info from PF */ 2072 ret = hclgevf_get_queue_depth(hdev); 2073 if (ret) 2074 return ret; 2075 2076 return hclgevf_get_pf_media_type(hdev); 2077 } 2078 2079 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 2080 { 2081 struct pci_dev *pdev = ae_dev->pdev; 2082 struct hclgevf_dev *hdev; 2083 2084 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 2085 if (!hdev) 2086 return -ENOMEM; 2087 2088 hdev->pdev = pdev; 2089 hdev->ae_dev = ae_dev; 2090 ae_dev->priv = hdev; 2091 2092 return 0; 2093 } 2094 2095 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2096 { 2097 struct hnae3_handle *roce = &hdev->roce; 2098 struct hnae3_handle *nic = &hdev->nic; 2099 2100 roce->rinfo.num_vectors = hdev->num_roce_msix; 2101 2102 if (hdev->num_msi_left < roce->rinfo.num_vectors || 2103 hdev->num_msi_left == 0) 2104 return -EINVAL; 2105 2106 roce->rinfo.base_vector = hdev->roce_base_msix_offset; 2107 2108 roce->rinfo.netdev = nic->kinfo.netdev; 2109 roce->rinfo.roce_io_base = hdev->hw.hw.io_base; 2110 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; 2111 2112 roce->pdev = nic->pdev; 2113 roce->ae_algo = nic->ae_algo; 2114 roce->numa_node_mask = nic->numa_node_mask; 2115 2116 return 0; 2117 } 2118 2119 static int hclgevf_config_gro(struct hclgevf_dev *hdev) 2120 { 2121 struct hclgevf_cfg_gro_status_cmd *req; 2122 struct hclge_desc desc; 2123 int ret; 2124 2125 if (!hnae3_dev_gro_supported(hdev)) 2126 return 0; 2127 2128 hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, 2129 false); 2130 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2131 2132 req->gro_en = hdev->gro_en ? 1 : 0; 2133 2134 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2135 if (ret) 2136 dev_err(&hdev->pdev->dev, 2137 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2138 2139 return ret; 2140 } 2141 2142 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2143 { 2144 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 2145 u16 tc_offset[HCLGE_COMM_MAX_TC_NUM]; 2146 u16 tc_valid[HCLGE_COMM_MAX_TC_NUM]; 2147 u16 tc_size[HCLGE_COMM_MAX_TC_NUM]; 2148 int ret; 2149 2150 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2151 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, 2152 rss_cfg->rss_algo, 2153 rss_cfg->rss_hash_key); 2154 if (ret) 2155 return ret; 2156 2157 ret = hclge_comm_set_rss_input_tuple(&hdev->nic, &hdev->hw.hw, 2158 false, rss_cfg); 2159 if (ret) 2160 return ret; 2161 } 2162 2163 ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, 2164 rss_cfg->rss_indirection_tbl); 2165 if (ret) 2166 return ret; 2167 2168 hclge_comm_get_rss_tc_info(rss_cfg->rss_size, hdev->hw_tc_map, 2169 tc_offset, tc_valid, tc_size); 2170 2171 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, 2172 tc_valid, tc_size); 2173 } 2174 2175 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2176 { 2177 struct hnae3_handle *nic = &hdev->nic; 2178 int ret; 2179 2180 ret = hclgevf_en_hw_strip_rxvtag(nic, true); 2181 if (ret) { 2182 dev_err(&hdev->pdev->dev, 2183 "failed to enable rx vlan offload, ret = %d\n", ret); 2184 return ret; 2185 } 2186 2187 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2188 false); 2189 } 2190 2191 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2192 { 2193 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2194 2195 unsigned long last = hdev->serv_processed_cnt; 2196 int i = 0; 2197 2198 while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2199 i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2200 last == hdev->serv_processed_cnt) 2201 usleep_range(1, 1); 2202 } 2203 2204 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2205 { 2206 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2207 2208 if (enable) { 2209 hclgevf_task_schedule(hdev, 0); 2210 } else { 2211 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2212 2213 /* flush memory to make sure DOWN is seen by service task */ 2214 smp_mb__before_atomic(); 2215 hclgevf_flush_link_update(hdev); 2216 } 2217 } 2218 2219 static int hclgevf_ae_start(struct hnae3_handle *handle) 2220 { 2221 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2222 2223 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2224 clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state); 2225 2226 hclge_comm_reset_tqp_stats(handle); 2227 2228 hclgevf_request_link_info(hdev); 2229 2230 hclgevf_update_link_mode(hdev); 2231 2232 return 0; 2233 } 2234 2235 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2236 { 2237 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2238 2239 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2240 2241 if (hdev->reset_type != HNAE3_VF_RESET) 2242 hclgevf_reset_tqp(handle); 2243 2244 hclge_comm_reset_tqp_stats(handle); 2245 hclgevf_update_link_status(hdev, 0); 2246 } 2247 2248 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2249 { 2250 #define HCLGEVF_STATE_ALIVE 1 2251 #define HCLGEVF_STATE_NOT_ALIVE 0 2252 2253 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2254 struct hclge_vf_to_pf_msg send_msg; 2255 2256 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2257 send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2258 HCLGEVF_STATE_NOT_ALIVE; 2259 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2260 } 2261 2262 static int hclgevf_client_start(struct hnae3_handle *handle) 2263 { 2264 return hclgevf_set_alive(handle, true); 2265 } 2266 2267 static void hclgevf_client_stop(struct hnae3_handle *handle) 2268 { 2269 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2270 int ret; 2271 2272 ret = hclgevf_set_alive(handle, false); 2273 if (ret) 2274 dev_warn(&hdev->pdev->dev, 2275 "%s failed %d\n", __func__, ret); 2276 } 2277 2278 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2279 { 2280 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2281 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2282 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2283 2284 INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 2285 2286 mutex_init(&hdev->mbx_resp.mbx_mutex); 2287 sema_init(&hdev->reset_sem, 1); 2288 2289 spin_lock_init(&hdev->mac_table.mac_list_lock); 2290 INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); 2291 INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); 2292 2293 /* bring the device down */ 2294 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2295 } 2296 2297 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2298 { 2299 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2300 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2301 2302 if (hdev->service_task.work.func) 2303 cancel_delayed_work_sync(&hdev->service_task); 2304 2305 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2306 } 2307 2308 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2309 { 2310 struct pci_dev *pdev = hdev->pdev; 2311 int vectors; 2312 int i; 2313 2314 if (hnae3_dev_roce_supported(hdev)) 2315 vectors = pci_alloc_irq_vectors(pdev, 2316 hdev->roce_base_msix_offset + 1, 2317 hdev->num_msi, 2318 PCI_IRQ_MSIX); 2319 else 2320 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2321 hdev->num_msi, 2322 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2323 2324 if (vectors < 0) { 2325 dev_err(&pdev->dev, 2326 "failed(%d) to allocate MSI/MSI-X vectors\n", 2327 vectors); 2328 return vectors; 2329 } 2330 if (vectors < hdev->num_msi) 2331 dev_warn(&hdev->pdev->dev, 2332 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2333 hdev->num_msi, vectors); 2334 2335 hdev->num_msi = vectors; 2336 hdev->num_msi_left = vectors; 2337 2338 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2339 sizeof(u16), GFP_KERNEL); 2340 if (!hdev->vector_status) { 2341 pci_free_irq_vectors(pdev); 2342 return -ENOMEM; 2343 } 2344 2345 for (i = 0; i < hdev->num_msi; i++) 2346 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2347 2348 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2349 sizeof(int), GFP_KERNEL); 2350 if (!hdev->vector_irq) { 2351 devm_kfree(&pdev->dev, hdev->vector_status); 2352 pci_free_irq_vectors(pdev); 2353 return -ENOMEM; 2354 } 2355 2356 return 0; 2357 } 2358 2359 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2360 { 2361 struct pci_dev *pdev = hdev->pdev; 2362 2363 devm_kfree(&pdev->dev, hdev->vector_status); 2364 devm_kfree(&pdev->dev, hdev->vector_irq); 2365 pci_free_irq_vectors(pdev); 2366 } 2367 2368 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2369 { 2370 int ret; 2371 2372 hclgevf_get_misc_vector(hdev); 2373 2374 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2375 HCLGEVF_NAME, pci_name(hdev->pdev)); 2376 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2377 0, hdev->misc_vector.name, hdev); 2378 if (ret) { 2379 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2380 hdev->misc_vector.vector_irq); 2381 return ret; 2382 } 2383 2384 hclgevf_clear_event_cause(hdev, 0); 2385 2386 /* enable misc. vector(vector 0) */ 2387 hclgevf_enable_vector(&hdev->misc_vector, true); 2388 2389 return ret; 2390 } 2391 2392 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2393 { 2394 /* disable misc vector(vector 0) */ 2395 hclgevf_enable_vector(&hdev->misc_vector, false); 2396 synchronize_irq(hdev->misc_vector.vector_irq); 2397 free_irq(hdev->misc_vector.vector_irq, hdev); 2398 hclgevf_free_vector(hdev, 0); 2399 } 2400 2401 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2402 { 2403 struct device *dev = &hdev->pdev->dev; 2404 2405 dev_info(dev, "VF info begin:\n"); 2406 2407 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2408 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2409 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2410 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2411 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2412 dev_info(dev, "PF media type of this VF: %u\n", 2413 hdev->hw.mac.media_type); 2414 2415 dev_info(dev, "VF info end.\n"); 2416 } 2417 2418 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2419 struct hnae3_client *client) 2420 { 2421 struct hclgevf_dev *hdev = ae_dev->priv; 2422 int rst_cnt = hdev->rst_stats.rst_cnt; 2423 int ret; 2424 2425 ret = client->ops->init_instance(&hdev->nic); 2426 if (ret) 2427 return ret; 2428 2429 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2430 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 2431 rst_cnt != hdev->rst_stats.rst_cnt) { 2432 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2433 2434 client->ops->uninit_instance(&hdev->nic, 0); 2435 return -EBUSY; 2436 } 2437 2438 hnae3_set_client_init_flag(client, ae_dev, 1); 2439 2440 if (netif_msg_drv(&hdev->nic)) 2441 hclgevf_info_show(hdev); 2442 2443 return 0; 2444 } 2445 2446 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2447 struct hnae3_client *client) 2448 { 2449 struct hclgevf_dev *hdev = ae_dev->priv; 2450 int ret; 2451 2452 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2453 !hdev->nic_client) 2454 return 0; 2455 2456 ret = hclgevf_init_roce_base_info(hdev); 2457 if (ret) 2458 return ret; 2459 2460 ret = client->ops->init_instance(&hdev->roce); 2461 if (ret) 2462 return ret; 2463 2464 set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2465 hnae3_set_client_init_flag(client, ae_dev, 1); 2466 2467 return 0; 2468 } 2469 2470 static int hclgevf_init_client_instance(struct hnae3_client *client, 2471 struct hnae3_ae_dev *ae_dev) 2472 { 2473 struct hclgevf_dev *hdev = ae_dev->priv; 2474 int ret; 2475 2476 switch (client->type) { 2477 case HNAE3_CLIENT_KNIC: 2478 hdev->nic_client = client; 2479 hdev->nic.client = client; 2480 2481 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2482 if (ret) 2483 goto clear_nic; 2484 2485 ret = hclgevf_init_roce_client_instance(ae_dev, 2486 hdev->roce_client); 2487 if (ret) 2488 goto clear_roce; 2489 2490 break; 2491 case HNAE3_CLIENT_ROCE: 2492 if (hnae3_dev_roce_supported(hdev)) { 2493 hdev->roce_client = client; 2494 hdev->roce.client = client; 2495 } 2496 2497 ret = hclgevf_init_roce_client_instance(ae_dev, client); 2498 if (ret) 2499 goto clear_roce; 2500 2501 break; 2502 default: 2503 return -EINVAL; 2504 } 2505 2506 return 0; 2507 2508 clear_nic: 2509 hdev->nic_client = NULL; 2510 hdev->nic.client = NULL; 2511 return ret; 2512 clear_roce: 2513 hdev->roce_client = NULL; 2514 hdev->roce.client = NULL; 2515 return ret; 2516 } 2517 2518 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2519 struct hnae3_ae_dev *ae_dev) 2520 { 2521 struct hclgevf_dev *hdev = ae_dev->priv; 2522 2523 /* un-init roce, if it exists */ 2524 if (hdev->roce_client) { 2525 while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 2526 msleep(HCLGEVF_WAIT_RESET_DONE); 2527 clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2528 2529 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2530 hdev->roce_client = NULL; 2531 hdev->roce.client = NULL; 2532 } 2533 2534 /* un-init nic/unic, if this was not called by roce client */ 2535 if (client->ops->uninit_instance && hdev->nic_client && 2536 client->type != HNAE3_CLIENT_ROCE) { 2537 while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 2538 msleep(HCLGEVF_WAIT_RESET_DONE); 2539 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2540 2541 client->ops->uninit_instance(&hdev->nic, 0); 2542 hdev->nic_client = NULL; 2543 hdev->nic.client = NULL; 2544 } 2545 } 2546 2547 static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev) 2548 { 2549 #define HCLGEVF_MEM_BAR 4 2550 2551 struct pci_dev *pdev = hdev->pdev; 2552 struct hclgevf_hw *hw = &hdev->hw; 2553 2554 /* for device does not have device memory, return directly */ 2555 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR))) 2556 return 0; 2557 2558 hw->hw.mem_base = 2559 devm_ioremap_wc(&pdev->dev, 2560 pci_resource_start(pdev, HCLGEVF_MEM_BAR), 2561 pci_resource_len(pdev, HCLGEVF_MEM_BAR)); 2562 if (!hw->hw.mem_base) { 2563 dev_err(&pdev->dev, "failed to map device memory\n"); 2564 return -EFAULT; 2565 } 2566 2567 return 0; 2568 } 2569 2570 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2571 { 2572 struct pci_dev *pdev = hdev->pdev; 2573 struct hclgevf_hw *hw; 2574 int ret; 2575 2576 ret = pci_enable_device(pdev); 2577 if (ret) { 2578 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2579 return ret; 2580 } 2581 2582 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2583 if (ret) { 2584 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2585 goto err_disable_device; 2586 } 2587 2588 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2589 if (ret) { 2590 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2591 goto err_disable_device; 2592 } 2593 2594 pci_set_master(pdev); 2595 hw = &hdev->hw; 2596 hw->hw.io_base = pci_iomap(pdev, 2, 0); 2597 if (!hw->hw.io_base) { 2598 dev_err(&pdev->dev, "can't map configuration register space\n"); 2599 ret = -ENOMEM; 2600 goto err_clr_master; 2601 } 2602 2603 ret = hclgevf_dev_mem_map(hdev); 2604 if (ret) 2605 goto err_unmap_io_base; 2606 2607 return 0; 2608 2609 err_unmap_io_base: 2610 pci_iounmap(pdev, hdev->hw.hw.io_base); 2611 err_clr_master: 2612 pci_clear_master(pdev); 2613 pci_release_regions(pdev); 2614 err_disable_device: 2615 pci_disable_device(pdev); 2616 2617 return ret; 2618 } 2619 2620 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2621 { 2622 struct pci_dev *pdev = hdev->pdev; 2623 2624 if (hdev->hw.hw.mem_base) 2625 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); 2626 2627 pci_iounmap(pdev, hdev->hw.hw.io_base); 2628 pci_clear_master(pdev); 2629 pci_release_regions(pdev); 2630 pci_disable_device(pdev); 2631 } 2632 2633 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2634 { 2635 struct hclgevf_query_res_cmd *req; 2636 struct hclge_desc desc; 2637 int ret; 2638 2639 hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RSRC, true); 2640 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2641 if (ret) { 2642 dev_err(&hdev->pdev->dev, 2643 "query vf resource failed, ret = %d.\n", ret); 2644 return ret; 2645 } 2646 2647 req = (struct hclgevf_query_res_cmd *)desc.data; 2648 2649 if (hnae3_dev_roce_supported(hdev)) { 2650 hdev->roce_base_msix_offset = 2651 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 2652 HCLGEVF_MSIX_OFT_ROCEE_M, 2653 HCLGEVF_MSIX_OFT_ROCEE_S); 2654 hdev->num_roce_msix = 2655 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 2656 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2657 2658 /* nic's msix numbers is always equals to the roce's. */ 2659 hdev->num_nic_msix = hdev->num_roce_msix; 2660 2661 /* VF should have NIC vectors and Roce vectors, NIC vectors 2662 * are queued before Roce vectors. The offset is fixed to 64. 2663 */ 2664 hdev->num_msi = hdev->num_roce_msix + 2665 hdev->roce_base_msix_offset; 2666 } else { 2667 hdev->num_msi = 2668 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 2669 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2670 2671 hdev->num_nic_msix = hdev->num_msi; 2672 } 2673 2674 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 2675 dev_err(&hdev->pdev->dev, 2676 "Just %u msi resources, not enough for vf(min:2).\n", 2677 hdev->num_nic_msix); 2678 return -EINVAL; 2679 } 2680 2681 return 0; 2682 } 2683 2684 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) 2685 { 2686 #define HCLGEVF_MAX_NON_TSO_BD_NUM 8U 2687 2688 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 2689 2690 ae_dev->dev_specs.max_non_tso_bd_num = 2691 HCLGEVF_MAX_NON_TSO_BD_NUM; 2692 ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 2693 ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; 2694 ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 2695 ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME; 2696 } 2697 2698 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, 2699 struct hclge_desc *desc) 2700 { 2701 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 2702 struct hclgevf_dev_specs_0_cmd *req0; 2703 struct hclgevf_dev_specs_1_cmd *req1; 2704 2705 req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data; 2706 req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data; 2707 2708 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; 2709 ae_dev->dev_specs.rss_ind_tbl_size = 2710 le16_to_cpu(req0->rss_ind_tbl_size); 2711 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); 2712 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); 2713 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); 2714 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); 2715 } 2716 2717 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) 2718 { 2719 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; 2720 2721 if (!dev_specs->max_non_tso_bd_num) 2722 dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM; 2723 if (!dev_specs->rss_ind_tbl_size) 2724 dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 2725 if (!dev_specs->rss_key_size) 2726 dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; 2727 if (!dev_specs->max_int_gl) 2728 dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 2729 if (!dev_specs->max_frm_size) 2730 dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME; 2731 } 2732 2733 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) 2734 { 2735 struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; 2736 int ret; 2737 int i; 2738 2739 /* set default specifications as devices lower than version V3 do not 2740 * support querying specifications from firmware. 2741 */ 2742 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 2743 hclgevf_set_default_dev_specs(hdev); 2744 return 0; 2745 } 2746 2747 for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 2748 hclgevf_cmd_setup_basic_desc(&desc[i], 2749 HCLGE_OPC_QUERY_DEV_SPECS, true); 2750 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2751 } 2752 hclgevf_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true); 2753 2754 ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM); 2755 if (ret) 2756 return ret; 2757 2758 hclgevf_parse_dev_specs(hdev, desc); 2759 hclgevf_check_dev_specs(hdev); 2760 2761 return 0; 2762 } 2763 2764 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2765 { 2766 struct pci_dev *pdev = hdev->pdev; 2767 int ret = 0; 2768 2769 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2770 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2771 hclgevf_misc_irq_uninit(hdev); 2772 hclgevf_uninit_msi(hdev); 2773 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2774 } 2775 2776 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2777 pci_set_master(pdev); 2778 ret = hclgevf_init_msi(hdev); 2779 if (ret) { 2780 dev_err(&pdev->dev, 2781 "failed(%d) to init MSI/MSI-X\n", ret); 2782 return ret; 2783 } 2784 2785 ret = hclgevf_misc_irq_init(hdev); 2786 if (ret) { 2787 hclgevf_uninit_msi(hdev); 2788 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2789 ret); 2790 return ret; 2791 } 2792 2793 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2794 } 2795 2796 return ret; 2797 } 2798 2799 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) 2800 { 2801 struct hclge_vf_to_pf_msg send_msg; 2802 2803 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, 2804 HCLGE_MBX_VPORT_LIST_CLEAR); 2805 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2806 } 2807 2808 static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev) 2809 { 2810 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 2811 hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1); 2812 } 2813 2814 static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev) 2815 { 2816 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 2817 hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0); 2818 } 2819 2820 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2821 { 2822 struct pci_dev *pdev = hdev->pdev; 2823 int ret; 2824 2825 ret = hclgevf_pci_reset(hdev); 2826 if (ret) { 2827 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2828 return ret; 2829 } 2830 2831 hclgevf_arq_init(hdev); 2832 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, 2833 &hdev->fw_version, false, 2834 hdev->reset_pending); 2835 if (ret) { 2836 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2837 return ret; 2838 } 2839 2840 ret = hclgevf_rss_init_hw(hdev); 2841 if (ret) { 2842 dev_err(&hdev->pdev->dev, 2843 "failed(%d) to initialize RSS\n", ret); 2844 return ret; 2845 } 2846 2847 ret = hclgevf_config_gro(hdev); 2848 if (ret) 2849 return ret; 2850 2851 ret = hclgevf_init_vlan_config(hdev); 2852 if (ret) { 2853 dev_err(&hdev->pdev->dev, 2854 "failed(%d) to initialize VLAN config\n", ret); 2855 return ret; 2856 } 2857 2858 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 2859 2860 hclgevf_init_rxd_adv_layout(hdev); 2861 2862 dev_info(&hdev->pdev->dev, "Reset done\n"); 2863 2864 return 0; 2865 } 2866 2867 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2868 { 2869 struct pci_dev *pdev = hdev->pdev; 2870 int ret; 2871 2872 ret = hclgevf_pci_init(hdev); 2873 if (ret) 2874 return ret; 2875 2876 ret = hclgevf_devlink_init(hdev); 2877 if (ret) 2878 goto err_devlink_init; 2879 2880 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); 2881 if (ret) 2882 goto err_cmd_queue_init; 2883 2884 hclgevf_arq_init(hdev); 2885 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, 2886 &hdev->fw_version, false, 2887 hdev->reset_pending); 2888 if (ret) 2889 goto err_cmd_init; 2890 2891 /* Get vf resource */ 2892 ret = hclgevf_query_vf_resource(hdev); 2893 if (ret) 2894 goto err_cmd_init; 2895 2896 ret = hclgevf_query_dev_specs(hdev); 2897 if (ret) { 2898 dev_err(&pdev->dev, 2899 "failed to query dev specifications, ret = %d\n", ret); 2900 goto err_cmd_init; 2901 } 2902 2903 ret = hclgevf_init_msi(hdev); 2904 if (ret) { 2905 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2906 goto err_cmd_init; 2907 } 2908 2909 hclgevf_state_init(hdev); 2910 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2911 hdev->reset_type = HNAE3_NONE_RESET; 2912 2913 ret = hclgevf_misc_irq_init(hdev); 2914 if (ret) 2915 goto err_misc_irq_init; 2916 2917 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2918 2919 ret = hclgevf_configure(hdev); 2920 if (ret) { 2921 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2922 goto err_config; 2923 } 2924 2925 ret = hclgevf_alloc_tqps(hdev); 2926 if (ret) { 2927 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2928 goto err_config; 2929 } 2930 2931 ret = hclgevf_set_handle_info(hdev); 2932 if (ret) 2933 goto err_config; 2934 2935 ret = hclgevf_config_gro(hdev); 2936 if (ret) 2937 goto err_config; 2938 2939 /* Initialize RSS for this VF */ 2940 ret = hclge_comm_rss_init_cfg(&hdev->nic, hdev->ae_dev, 2941 &hdev->rss_cfg); 2942 if (ret) { 2943 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); 2944 goto err_config; 2945 } 2946 2947 ret = hclgevf_rss_init_hw(hdev); 2948 if (ret) { 2949 dev_err(&hdev->pdev->dev, 2950 "failed(%d) to initialize RSS\n", ret); 2951 goto err_config; 2952 } 2953 2954 /* ensure vf tbl list as empty before init*/ 2955 ret = hclgevf_clear_vport_list(hdev); 2956 if (ret) { 2957 dev_err(&pdev->dev, 2958 "failed to clear tbl list configuration, ret = %d.\n", 2959 ret); 2960 goto err_config; 2961 } 2962 2963 ret = hclgevf_init_vlan_config(hdev); 2964 if (ret) { 2965 dev_err(&hdev->pdev->dev, 2966 "failed(%d) to initialize VLAN config\n", ret); 2967 goto err_config; 2968 } 2969 2970 hclgevf_init_rxd_adv_layout(hdev); 2971 2972 set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state); 2973 2974 hdev->last_reset_time = jiffies; 2975 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 2976 HCLGEVF_DRIVER_NAME); 2977 2978 hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 2979 2980 return 0; 2981 2982 err_config: 2983 hclgevf_misc_irq_uninit(hdev); 2984 err_misc_irq_init: 2985 hclgevf_state_uninit(hdev); 2986 hclgevf_uninit_msi(hdev); 2987 err_cmd_init: 2988 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 2989 err_cmd_queue_init: 2990 hclgevf_devlink_uninit(hdev); 2991 err_devlink_init: 2992 hclgevf_pci_uninit(hdev); 2993 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2994 return ret; 2995 } 2996 2997 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2998 { 2999 struct hclge_vf_to_pf_msg send_msg; 3000 3001 hclgevf_state_uninit(hdev); 3002 hclgevf_uninit_rxd_adv_layout(hdev); 3003 3004 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 3005 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3006 3007 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3008 hclgevf_misc_irq_uninit(hdev); 3009 hclgevf_uninit_msi(hdev); 3010 } 3011 3012 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 3013 hclgevf_devlink_uninit(hdev); 3014 hclgevf_pci_uninit(hdev); 3015 hclgevf_uninit_mac_list(hdev); 3016 } 3017 3018 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 3019 { 3020 struct pci_dev *pdev = ae_dev->pdev; 3021 int ret; 3022 3023 ret = hclgevf_alloc_hdev(ae_dev); 3024 if (ret) { 3025 dev_err(&pdev->dev, "hclge device allocation failed\n"); 3026 return ret; 3027 } 3028 3029 ret = hclgevf_init_hdev(ae_dev->priv); 3030 if (ret) { 3031 dev_err(&pdev->dev, "hclge device initialization failed\n"); 3032 return ret; 3033 } 3034 3035 return 0; 3036 } 3037 3038 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 3039 { 3040 struct hclgevf_dev *hdev = ae_dev->priv; 3041 3042 hclgevf_uninit_hdev(hdev); 3043 ae_dev->priv = NULL; 3044 } 3045 3046 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3047 { 3048 struct hnae3_handle *nic = &hdev->nic; 3049 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3050 3051 return min_t(u32, hdev->rss_size_max, 3052 hdev->num_tqps / kinfo->tc_info.num_tc); 3053 } 3054 3055 /** 3056 * hclgevf_get_channels - Get the current channels enabled and max supported. 3057 * @handle: hardware information for network interface 3058 * @ch: ethtool channels structure 3059 * 3060 * We don't support separate tx and rx queues as channels. The other count 3061 * represents how many queues are being used for control. max_combined counts 3062 * how many queue pairs we can support. They may not be mapped 1 to 1 with 3063 * q_vectors since we support a lot more queue pairs than q_vectors. 3064 **/ 3065 static void hclgevf_get_channels(struct hnae3_handle *handle, 3066 struct ethtool_channels *ch) 3067 { 3068 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3069 3070 ch->max_combined = hclgevf_get_max_channels(hdev); 3071 ch->other_count = 0; 3072 ch->max_other = 0; 3073 ch->combined_count = handle->kinfo.rss_size; 3074 } 3075 3076 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 3077 u16 *alloc_tqps, u16 *max_rss_size) 3078 { 3079 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3080 3081 *alloc_tqps = hdev->num_tqps; 3082 *max_rss_size = hdev->rss_size_max; 3083 } 3084 3085 static void hclgevf_update_rss_size(struct hnae3_handle *handle, 3086 u32 new_tqps_num) 3087 { 3088 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3089 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3090 u16 max_rss_size; 3091 3092 kinfo->req_rss_size = new_tqps_num; 3093 3094 max_rss_size = min_t(u16, hdev->rss_size_max, 3095 hdev->num_tqps / kinfo->tc_info.num_tc); 3096 3097 /* Use the user's configuration when it is not larger than 3098 * max_rss_size, otherwise, use the maximum specification value. 3099 */ 3100 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 3101 kinfo->req_rss_size <= max_rss_size) 3102 kinfo->rss_size = kinfo->req_rss_size; 3103 else if (kinfo->rss_size > max_rss_size || 3104 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 3105 kinfo->rss_size = max_rss_size; 3106 3107 kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size; 3108 } 3109 3110 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 3111 bool rxfh_configured) 3112 { 3113 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3114 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3115 u16 tc_offset[HCLGE_COMM_MAX_TC_NUM]; 3116 u16 tc_valid[HCLGE_COMM_MAX_TC_NUM]; 3117 u16 tc_size[HCLGE_COMM_MAX_TC_NUM]; 3118 u16 cur_rss_size = kinfo->rss_size; 3119 u16 cur_tqps = kinfo->num_tqps; 3120 u32 *rss_indir; 3121 unsigned int i; 3122 int ret; 3123 3124 hclgevf_update_rss_size(handle, new_tqps_num); 3125 3126 hclge_comm_get_rss_tc_info(cur_rss_size, hdev->hw_tc_map, 3127 tc_offset, tc_valid, tc_size); 3128 ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, 3129 tc_valid, tc_size); 3130 if (ret) 3131 return ret; 3132 3133 /* RSS indirection table has been configured by user */ 3134 if (rxfh_configured) 3135 goto out; 3136 3137 /* Reinitializes the rss indirect table according to the new RSS size */ 3138 rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size, 3139 sizeof(u32), GFP_KERNEL); 3140 if (!rss_indir) 3141 return -ENOMEM; 3142 3143 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 3144 rss_indir[i] = i % kinfo->rss_size; 3145 3146 hdev->rss_cfg.rss_size = kinfo->rss_size; 3147 3148 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 3149 if (ret) 3150 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 3151 ret); 3152 3153 kfree(rss_indir); 3154 3155 out: 3156 if (!ret) 3157 dev_info(&hdev->pdev->dev, 3158 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 3159 cur_rss_size, kinfo->rss_size, 3160 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); 3161 3162 return ret; 3163 } 3164 3165 static int hclgevf_get_status(struct hnae3_handle *handle) 3166 { 3167 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3168 3169 return hdev->hw.mac.link; 3170 } 3171 3172 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 3173 u8 *auto_neg, u32 *speed, 3174 u8 *duplex) 3175 { 3176 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3177 3178 if (speed) 3179 *speed = hdev->hw.mac.speed; 3180 if (duplex) 3181 *duplex = hdev->hw.mac.duplex; 3182 if (auto_neg) 3183 *auto_neg = AUTONEG_DISABLE; 3184 } 3185 3186 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 3187 u8 duplex) 3188 { 3189 hdev->hw.mac.speed = speed; 3190 hdev->hw.mac.duplex = duplex; 3191 } 3192 3193 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 3194 { 3195 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3196 bool gro_en_old = hdev->gro_en; 3197 int ret; 3198 3199 hdev->gro_en = enable; 3200 ret = hclgevf_config_gro(hdev); 3201 if (ret) 3202 hdev->gro_en = gro_en_old; 3203 3204 return ret; 3205 } 3206 3207 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 3208 u8 *module_type) 3209 { 3210 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3211 3212 if (media_type) 3213 *media_type = hdev->hw.mac.media_type; 3214 3215 if (module_type) 3216 *module_type = hdev->hw.mac.module_type; 3217 } 3218 3219 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 3220 { 3221 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3222 3223 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 3224 } 3225 3226 static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle) 3227 { 3228 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3229 3230 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 3231 } 3232 3233 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 3234 { 3235 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3236 3237 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 3238 } 3239 3240 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 3241 { 3242 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3243 3244 return hdev->rst_stats.hw_rst_done_cnt; 3245 } 3246 3247 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 3248 unsigned long *supported, 3249 unsigned long *advertising) 3250 { 3251 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3252 3253 *supported = hdev->hw.mac.supported; 3254 *advertising = hdev->hw.mac.advertising; 3255 } 3256 3257 #define MAX_SEPARATE_NUM 4 3258 #define SEPARATOR_VALUE 0xFDFCFBFA 3259 #define REG_NUM_PER_LINE 4 3260 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 3261 3262 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 3263 { 3264 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 3265 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3266 3267 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 3268 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 3269 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 3270 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 3271 3272 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 3273 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 3274 } 3275 3276 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 3277 void *data) 3278 { 3279 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3280 int i, j, reg_um, separator_num; 3281 u32 *reg = data; 3282 3283 *version = hdev->fw_version; 3284 3285 /* fetching per-VF registers values from VF PCIe register space */ 3286 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 3287 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3288 for (i = 0; i < reg_um; i++) 3289 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 3290 for (i = 0; i < separator_num; i++) 3291 *reg++ = SEPARATOR_VALUE; 3292 3293 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 3294 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3295 for (i = 0; i < reg_um; i++) 3296 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 3297 for (i = 0; i < separator_num; i++) 3298 *reg++ = SEPARATOR_VALUE; 3299 3300 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 3301 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3302 for (j = 0; j < hdev->num_tqps; j++) { 3303 for (i = 0; i < reg_um; i++) 3304 *reg++ = hclgevf_read_dev(&hdev->hw, 3305 ring_reg_addr_list[i] + 3306 0x200 * j); 3307 for (i = 0; i < separator_num; i++) 3308 *reg++ = SEPARATOR_VALUE; 3309 } 3310 3311 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 3312 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3313 for (j = 0; j < hdev->num_msi_used - 1; j++) { 3314 for (i = 0; i < reg_um; i++) 3315 *reg++ = hclgevf_read_dev(&hdev->hw, 3316 tqp_intr_reg_addr_list[i] + 3317 4 * j); 3318 for (i = 0; i < separator_num; i++) 3319 *reg++ = SEPARATOR_VALUE; 3320 } 3321 } 3322 3323 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 3324 u8 *port_base_vlan_info, u8 data_size) 3325 { 3326 struct hnae3_handle *nic = &hdev->nic; 3327 struct hclge_vf_to_pf_msg send_msg; 3328 int ret; 3329 3330 rtnl_lock(); 3331 3332 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 3333 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { 3334 dev_warn(&hdev->pdev->dev, 3335 "is resetting when updating port based vlan info\n"); 3336 rtnl_unlock(); 3337 return; 3338 } 3339 3340 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3341 if (ret) { 3342 rtnl_unlock(); 3343 return; 3344 } 3345 3346 /* send msg to PF and wait update port based vlan info */ 3347 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3348 HCLGE_MBX_PORT_BASE_VLAN_CFG); 3349 memcpy(send_msg.data, port_base_vlan_info, data_size); 3350 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3351 if (!ret) { 3352 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3353 nic->port_base_vlan_state = state; 3354 else 3355 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3356 } 3357 3358 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 3359 rtnl_unlock(); 3360 } 3361 3362 static const struct hnae3_ae_ops hclgevf_ops = { 3363 .init_ae_dev = hclgevf_init_ae_dev, 3364 .uninit_ae_dev = hclgevf_uninit_ae_dev, 3365 .reset_prepare = hclgevf_reset_prepare_general, 3366 .reset_done = hclgevf_reset_done, 3367 .init_client_instance = hclgevf_init_client_instance, 3368 .uninit_client_instance = hclgevf_uninit_client_instance, 3369 .start = hclgevf_ae_start, 3370 .stop = hclgevf_ae_stop, 3371 .client_start = hclgevf_client_start, 3372 .client_stop = hclgevf_client_stop, 3373 .map_ring_to_vector = hclgevf_map_ring_to_vector, 3374 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3375 .get_vector = hclgevf_get_vector, 3376 .put_vector = hclgevf_put_vector, 3377 .reset_queue = hclgevf_reset_tqp, 3378 .get_mac_addr = hclgevf_get_mac_addr, 3379 .set_mac_addr = hclgevf_set_mac_addr, 3380 .add_uc_addr = hclgevf_add_uc_addr, 3381 .rm_uc_addr = hclgevf_rm_uc_addr, 3382 .add_mc_addr = hclgevf_add_mc_addr, 3383 .rm_mc_addr = hclgevf_rm_mc_addr, 3384 .get_stats = hclgevf_get_stats, 3385 .update_stats = hclgevf_update_stats, 3386 .get_strings = hclgevf_get_strings, 3387 .get_sset_count = hclgevf_get_sset_count, 3388 .get_rss_key_size = hclge_comm_get_rss_key_size, 3389 .get_rss = hclgevf_get_rss, 3390 .set_rss = hclgevf_set_rss, 3391 .get_rss_tuple = hclgevf_get_rss_tuple, 3392 .set_rss_tuple = hclgevf_set_rss_tuple, 3393 .get_tc_size = hclgevf_get_tc_size, 3394 .get_fw_version = hclgevf_get_fw_version, 3395 .set_vlan_filter = hclgevf_set_vlan_filter, 3396 .enable_vlan_filter = hclgevf_enable_vlan_filter, 3397 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 3398 .reset_event = hclgevf_reset_event, 3399 .set_default_reset_request = hclgevf_set_def_reset_request, 3400 .set_channels = hclgevf_set_channels, 3401 .get_channels = hclgevf_get_channels, 3402 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 3403 .get_regs_len = hclgevf_get_regs_len, 3404 .get_regs = hclgevf_get_regs, 3405 .get_status = hclgevf_get_status, 3406 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3407 .get_media_type = hclgevf_get_media_type, 3408 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 3409 .ae_dev_resetting = hclgevf_ae_dev_resetting, 3410 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 3411 .set_gro_en = hclgevf_gro_en, 3412 .set_mtu = hclgevf_set_mtu, 3413 .get_global_queue_id = hclgevf_get_qid_global, 3414 .set_timer_task = hclgevf_set_timer_task, 3415 .get_link_mode = hclgevf_get_link_mode, 3416 .set_promisc_mode = hclgevf_set_promisc_mode, 3417 .request_update_promisc_mode = hclgevf_request_update_promisc_mode, 3418 .get_cmdq_stat = hclgevf_get_cmdq_stat, 3419 }; 3420 3421 static struct hnae3_ae_algo ae_algovf = { 3422 .ops = &hclgevf_ops, 3423 .pdev_id_table = ae_algovf_pci_tbl, 3424 }; 3425 3426 static int hclgevf_init(void) 3427 { 3428 pr_info("%s is initializing\n", HCLGEVF_NAME); 3429 3430 hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME); 3431 if (!hclgevf_wq) { 3432 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 3433 return -ENOMEM; 3434 } 3435 3436 hnae3_register_ae_algo(&ae_algovf); 3437 3438 return 0; 3439 } 3440 3441 static void hclgevf_exit(void) 3442 { 3443 hnae3_unregister_ae_algo(&ae_algovf); 3444 destroy_workqueue(hclgevf_wq); 3445 } 3446 module_init(hclgevf_init); 3447 module_exit(hclgevf_exit); 3448 3449 MODULE_LICENSE("GPL"); 3450 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3451 MODULE_DESCRIPTION("HCLGEVF Driver"); 3452 MODULE_VERSION(HCLGEVF_MOD_VERSION); 3453