1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 #include "hclgevf_devlink.h" 12 #include "hclge_comm_rss.h" 13 14 #define HCLGEVF_NAME "hclgevf" 15 16 #define HCLGEVF_RESET_MAX_FAIL_CNT 5 17 18 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 19 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 20 unsigned long delay); 21 22 static struct hnae3_ae_algo ae_algovf; 23 24 static struct workqueue_struct *hclgevf_wq; 25 26 static const struct pci_device_id ae_algovf_pci_tbl[] = { 27 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, 28 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 29 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 30 /* required last entry */ 31 {0, } 32 }; 33 34 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 35 36 static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 37 HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 38 HCLGE_COMM_NIC_CSQ_DEPTH_REG, 39 HCLGE_COMM_NIC_CSQ_TAIL_REG, 40 HCLGE_COMM_NIC_CSQ_HEAD_REG, 41 HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 42 HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 43 HCLGE_COMM_NIC_CRQ_DEPTH_REG, 44 HCLGE_COMM_NIC_CRQ_TAIL_REG, 45 HCLGE_COMM_NIC_CRQ_HEAD_REG, 46 HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, 47 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG, 48 HCLGE_COMM_CMDQ_INTR_EN_REG, 49 HCLGE_COMM_CMDQ_INTR_GEN_REG}; 50 51 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 52 HCLGEVF_RST_ING, 53 HCLGEVF_GRO_EN_REG}; 54 55 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 56 HCLGEVF_RING_RX_ADDR_H_REG, 57 HCLGEVF_RING_RX_BD_NUM_REG, 58 HCLGEVF_RING_RX_BD_LENGTH_REG, 59 HCLGEVF_RING_RX_MERGE_EN_REG, 60 HCLGEVF_RING_RX_TAIL_REG, 61 HCLGEVF_RING_RX_HEAD_REG, 62 HCLGEVF_RING_RX_FBD_NUM_REG, 63 HCLGEVF_RING_RX_OFFSET_REG, 64 HCLGEVF_RING_RX_FBD_OFFSET_REG, 65 HCLGEVF_RING_RX_STASH_REG, 66 HCLGEVF_RING_RX_BD_ERR_REG, 67 HCLGEVF_RING_TX_ADDR_L_REG, 68 HCLGEVF_RING_TX_ADDR_H_REG, 69 HCLGEVF_RING_TX_BD_NUM_REG, 70 HCLGEVF_RING_TX_PRIORITY_REG, 71 HCLGEVF_RING_TX_TC_REG, 72 HCLGEVF_RING_TX_MERGE_EN_REG, 73 HCLGEVF_RING_TX_TAIL_REG, 74 HCLGEVF_RING_TX_HEAD_REG, 75 HCLGEVF_RING_TX_FBD_NUM_REG, 76 HCLGEVF_RING_TX_OFFSET_REG, 77 HCLGEVF_RING_TX_EBD_NUM_REG, 78 HCLGEVF_RING_TX_EBD_OFFSET_REG, 79 HCLGEVF_RING_TX_BD_ERR_REG, 80 HCLGEVF_RING_EN_REG}; 81 82 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 83 HCLGEVF_TQP_INTR_GL0_REG, 84 HCLGEVF_TQP_INTR_GL1_REG, 85 HCLGEVF_TQP_INTR_GL2_REG, 86 HCLGEVF_TQP_INTR_RL_REG}; 87 88 /* hclgevf_cmd_send - send command to command queue 89 * @hw: pointer to the hw struct 90 * @desc: prefilled descriptor for describing the command 91 * @num : the number of descriptors to be sent 92 * 93 * This is the main send command for command queue, it 94 * sends the queue, cleans the queue, etc 95 */ 96 int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num) 97 { 98 return hclge_comm_cmd_send(&hw->hw, desc, num); 99 } 100 101 void hclgevf_arq_init(struct hclgevf_dev *hdev) 102 { 103 struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq; 104 105 spin_lock(&cmdq->crq.lock); 106 /* initialize the pointers of async rx queue of mailbox */ 107 hdev->arq.hdev = hdev; 108 hdev->arq.head = 0; 109 hdev->arq.tail = 0; 110 atomic_set(&hdev->arq.count, 0); 111 spin_unlock(&cmdq->crq.lock); 112 } 113 114 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 115 { 116 if (!handle->client) 117 return container_of(handle, struct hclgevf_dev, nic); 118 else if (handle->client->type == HNAE3_CLIENT_ROCE) 119 return container_of(handle, struct hclgevf_dev, roce); 120 else 121 return container_of(handle, struct hclgevf_dev, nic); 122 } 123 124 static void hclgevf_update_stats(struct hnae3_handle *handle, 125 struct net_device_stats *net_stats) 126 { 127 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 128 int status; 129 130 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); 131 if (status) 132 dev_err(&hdev->pdev->dev, 133 "VF update of TQPS stats fail, status = %d.\n", 134 status); 135 } 136 137 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 138 { 139 if (strset == ETH_SS_TEST) 140 return -EOPNOTSUPP; 141 else if (strset == ETH_SS_STATS) 142 return hclge_comm_tqps_get_sset_count(handle); 143 144 return 0; 145 } 146 147 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 148 u8 *data) 149 { 150 u8 *p = (char *)data; 151 152 if (strset == ETH_SS_STATS) 153 p = hclge_comm_tqps_get_strings(handle, p); 154 } 155 156 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 157 { 158 hclge_comm_tqps_get_stats(handle, data); 159 } 160 161 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 162 u8 subcode) 163 { 164 if (msg) { 165 memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 166 msg->code = code; 167 msg->subcode = subcode; 168 } 169 } 170 171 static int hclgevf_get_basic_info(struct hclgevf_dev *hdev) 172 { 173 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 174 u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE]; 175 struct hclge_basic_info *basic_info; 176 struct hclge_vf_to_pf_msg send_msg; 177 unsigned long caps; 178 int status; 179 180 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0); 181 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 182 sizeof(resp_msg)); 183 if (status) { 184 dev_err(&hdev->pdev->dev, 185 "failed to get basic info from pf, ret = %d", status); 186 return status; 187 } 188 189 basic_info = (struct hclge_basic_info *)resp_msg; 190 191 hdev->hw_tc_map = basic_info->hw_tc_map; 192 hdev->mbx_api_version = basic_info->mbx_api_version; 193 caps = basic_info->pf_caps; 194 if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps)) 195 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); 196 197 return 0; 198 } 199 200 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 201 { 202 struct hnae3_handle *nic = &hdev->nic; 203 struct hclge_vf_to_pf_msg send_msg; 204 u8 resp_msg; 205 int ret; 206 207 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 208 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 209 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 210 sizeof(u8)); 211 if (ret) { 212 dev_err(&hdev->pdev->dev, 213 "VF request to get port based vlan state failed %d", 214 ret); 215 return ret; 216 } 217 218 nic->port_base_vlan_state = resp_msg; 219 220 return 0; 221 } 222 223 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 224 { 225 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 226 #define HCLGEVF_TQPS_ALLOC_OFFSET 0 227 #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 228 #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 229 230 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 231 struct hclge_vf_to_pf_msg send_msg; 232 int status; 233 234 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 235 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 236 HCLGEVF_TQPS_RSS_INFO_LEN); 237 if (status) { 238 dev_err(&hdev->pdev->dev, 239 "VF request to get tqp info from PF failed %d", 240 status); 241 return status; 242 } 243 244 memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], 245 sizeof(u16)); 246 memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], 247 sizeof(u16)); 248 memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], 249 sizeof(u16)); 250 251 return 0; 252 } 253 254 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 255 { 256 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 257 #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 258 #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 259 260 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 261 struct hclge_vf_to_pf_msg send_msg; 262 int ret; 263 264 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 265 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 266 HCLGEVF_TQPS_DEPTH_INFO_LEN); 267 if (ret) { 268 dev_err(&hdev->pdev->dev, 269 "VF request to get tqp depth info from PF failed %d", 270 ret); 271 return ret; 272 } 273 274 memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], 275 sizeof(u16)); 276 memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], 277 sizeof(u16)); 278 279 return 0; 280 } 281 282 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 283 { 284 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 285 struct hclge_vf_to_pf_msg send_msg; 286 u16 qid_in_pf = 0; 287 u8 resp_data[2]; 288 int ret; 289 290 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 291 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 292 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 293 sizeof(resp_data)); 294 if (!ret) 295 qid_in_pf = *(u16 *)resp_data; 296 297 return qid_in_pf; 298 } 299 300 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 301 { 302 struct hclge_vf_to_pf_msg send_msg; 303 u8 resp_msg[2]; 304 int ret; 305 306 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 307 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 308 sizeof(resp_msg)); 309 if (ret) { 310 dev_err(&hdev->pdev->dev, 311 "VF request to get the pf port media type failed %d", 312 ret); 313 return ret; 314 } 315 316 hdev->hw.mac.media_type = resp_msg[0]; 317 hdev->hw.mac.module_type = resp_msg[1]; 318 319 return 0; 320 } 321 322 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 323 { 324 struct hclge_comm_tqp *tqp; 325 int i; 326 327 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 328 sizeof(struct hclge_comm_tqp), GFP_KERNEL); 329 if (!hdev->htqp) 330 return -ENOMEM; 331 332 tqp = hdev->htqp; 333 334 for (i = 0; i < hdev->num_tqps; i++) { 335 tqp->dev = &hdev->pdev->dev; 336 tqp->index = i; 337 338 tqp->q.ae_algo = &ae_algovf; 339 tqp->q.buf_size = hdev->rx_buf_len; 340 tqp->q.tx_desc_num = hdev->num_tx_desc; 341 tqp->q.rx_desc_num = hdev->num_rx_desc; 342 343 /* need an extended offset to configure queues >= 344 * HCLGEVF_TQP_MAX_SIZE_DEV_V2. 345 */ 346 if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2) 347 tqp->q.io_base = hdev->hw.hw.io_base + 348 HCLGEVF_TQP_REG_OFFSET + 349 i * HCLGEVF_TQP_REG_SIZE; 350 else 351 tqp->q.io_base = hdev->hw.hw.io_base + 352 HCLGEVF_TQP_REG_OFFSET + 353 HCLGEVF_TQP_EXT_REG_OFFSET + 354 (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) * 355 HCLGEVF_TQP_REG_SIZE; 356 357 tqp++; 358 } 359 360 return 0; 361 } 362 363 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 364 { 365 struct hnae3_handle *nic = &hdev->nic; 366 struct hnae3_knic_private_info *kinfo; 367 u16 new_tqps = hdev->num_tqps; 368 unsigned int i; 369 u8 num_tc = 0; 370 371 kinfo = &nic->kinfo; 372 kinfo->num_tx_desc = hdev->num_tx_desc; 373 kinfo->num_rx_desc = hdev->num_rx_desc; 374 kinfo->rx_buf_len = hdev->rx_buf_len; 375 for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) 376 if (hdev->hw_tc_map & BIT(i)) 377 num_tc++; 378 379 num_tc = num_tc ? num_tc : 1; 380 kinfo->tc_info.num_tc = num_tc; 381 kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc); 382 new_tqps = kinfo->rss_size * num_tc; 383 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 384 385 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 386 sizeof(struct hnae3_queue *), GFP_KERNEL); 387 if (!kinfo->tqp) 388 return -ENOMEM; 389 390 for (i = 0; i < kinfo->num_tqps; i++) { 391 hdev->htqp[i].q.handle = &hdev->nic; 392 hdev->htqp[i].q.tqp_index = i; 393 kinfo->tqp[i] = &hdev->htqp[i].q; 394 } 395 396 /* after init the max rss_size and tqps, adjust the default tqp numbers 397 * and rss size with the actual vector numbers 398 */ 399 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 400 kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc, 401 kinfo->rss_size); 402 403 return 0; 404 } 405 406 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 407 { 408 struct hclge_vf_to_pf_msg send_msg; 409 int status; 410 411 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 412 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 413 if (status) 414 dev_err(&hdev->pdev->dev, 415 "VF failed to fetch link status(%d) from PF", status); 416 } 417 418 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 419 { 420 struct hnae3_handle *rhandle = &hdev->roce; 421 struct hnae3_handle *handle = &hdev->nic; 422 struct hnae3_client *rclient; 423 struct hnae3_client *client; 424 425 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 426 return; 427 428 client = handle->client; 429 rclient = hdev->roce_client; 430 431 link_state = 432 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 433 if (link_state != hdev->hw.mac.link) { 434 hdev->hw.mac.link = link_state; 435 client->ops->link_status_change(handle, !!link_state); 436 if (rclient && rclient->ops->link_status_change) 437 rclient->ops->link_status_change(rhandle, !!link_state); 438 } 439 440 clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 441 } 442 443 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 444 { 445 #define HCLGEVF_ADVERTISING 0 446 #define HCLGEVF_SUPPORTED 1 447 448 struct hclge_vf_to_pf_msg send_msg; 449 450 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 451 send_msg.data[0] = HCLGEVF_ADVERTISING; 452 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 453 send_msg.data[0] = HCLGEVF_SUPPORTED; 454 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 455 } 456 457 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 458 { 459 struct hnae3_handle *nic = &hdev->nic; 460 int ret; 461 462 nic->ae_algo = &ae_algovf; 463 nic->pdev = hdev->pdev; 464 nic->numa_node_mask = hdev->numa_node_mask; 465 nic->flags |= HNAE3_SUPPORT_VF; 466 nic->kinfo.io_base = hdev->hw.hw.io_base; 467 468 ret = hclgevf_knic_setup(hdev); 469 if (ret) 470 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 471 ret); 472 return ret; 473 } 474 475 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 476 { 477 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 478 dev_warn(&hdev->pdev->dev, 479 "vector(vector_id %d) has been freed.\n", vector_id); 480 return; 481 } 482 483 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 484 hdev->num_msi_left += 1; 485 hdev->num_msi_used -= 1; 486 } 487 488 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 489 struct hnae3_vector_info *vector_info) 490 { 491 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 492 struct hnae3_vector_info *vector = vector_info; 493 int alloc = 0; 494 int i, j; 495 496 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 497 vector_num = min(hdev->num_msi_left, vector_num); 498 499 for (j = 0; j < vector_num; j++) { 500 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 501 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 502 vector->vector = pci_irq_vector(hdev->pdev, i); 503 vector->io_addr = hdev->hw.hw.io_base + 504 HCLGEVF_VECTOR_REG_BASE + 505 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 506 hdev->vector_status[i] = 0; 507 hdev->vector_irq[i] = vector->vector; 508 509 vector++; 510 alloc++; 511 512 break; 513 } 514 } 515 } 516 hdev->num_msi_left -= alloc; 517 hdev->num_msi_used += alloc; 518 519 return alloc; 520 } 521 522 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 523 { 524 int i; 525 526 for (i = 0; i < hdev->num_msi; i++) 527 if (vector == hdev->vector_irq[i]) 528 return i; 529 530 return -EINVAL; 531 } 532 533 /* for revision 0x20, vf shared the same rss config with pf */ 534 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 535 { 536 #define HCLGEVF_RSS_MBX_RESP_LEN 8 537 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 538 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 539 struct hclge_vf_to_pf_msg send_msg; 540 u16 msg_num, hash_key_index; 541 u8 index; 542 int ret; 543 544 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 545 msg_num = (HCLGE_COMM_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 546 HCLGEVF_RSS_MBX_RESP_LEN; 547 for (index = 0; index < msg_num; index++) { 548 send_msg.data[0] = index; 549 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 550 HCLGEVF_RSS_MBX_RESP_LEN); 551 if (ret) { 552 dev_err(&hdev->pdev->dev, 553 "VF get rss hash key from PF failed, ret=%d", 554 ret); 555 return ret; 556 } 557 558 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 559 if (index == msg_num - 1) 560 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 561 &resp_msg[0], 562 HCLGE_COMM_RSS_KEY_SIZE - hash_key_index); 563 else 564 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 565 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 566 } 567 568 return 0; 569 } 570 571 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 572 u8 *hfunc) 573 { 574 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 575 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 576 int ret; 577 578 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 579 hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc); 580 } else { 581 if (hfunc) 582 *hfunc = ETH_RSS_HASH_TOP; 583 if (key) { 584 ret = hclgevf_get_rss_hash_key(hdev); 585 if (ret) 586 return ret; 587 memcpy(key, rss_cfg->rss_hash_key, 588 HCLGE_COMM_RSS_KEY_SIZE); 589 } 590 } 591 592 hclge_comm_get_rss_indir_tbl(rss_cfg, indir, 593 hdev->ae_dev->dev_specs.rss_ind_tbl_size); 594 595 return 0; 596 } 597 598 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 599 const u8 *key, const u8 hfunc) 600 { 601 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 602 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 603 int ret, i; 604 605 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 606 ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, 607 hfunc); 608 if (ret) 609 return ret; 610 } 611 612 /* update the shadow RSS table with user specified qids */ 613 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 614 rss_cfg->rss_indirection_tbl[i] = indir[i]; 615 616 /* update the hardware */ 617 return hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, 618 rss_cfg->rss_indirection_tbl); 619 } 620 621 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 622 struct ethtool_rxnfc *nfc) 623 { 624 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 625 int ret; 626 627 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 628 return -EOPNOTSUPP; 629 630 ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw, 631 &hdev->rss_cfg, nfc); 632 if (ret) 633 dev_err(&hdev->pdev->dev, 634 "failed to set rss tuple, ret = %d.\n", ret); 635 636 return ret; 637 } 638 639 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 640 struct ethtool_rxnfc *nfc) 641 { 642 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 643 u8 tuple_sets; 644 int ret; 645 646 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 647 return -EOPNOTSUPP; 648 649 nfc->data = 0; 650 651 ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type, 652 &tuple_sets); 653 if (ret || !tuple_sets) 654 return ret; 655 656 nfc->data = hclge_comm_convert_rss_tuple(tuple_sets); 657 658 return 0; 659 } 660 661 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 662 { 663 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 664 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 665 666 return rss_cfg->rss_size; 667 } 668 669 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 670 int vector_id, 671 struct hnae3_ring_chain_node *ring_chain) 672 { 673 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 674 struct hclge_vf_to_pf_msg send_msg; 675 struct hnae3_ring_chain_node *node; 676 int status; 677 int i = 0; 678 679 memset(&send_msg, 0, sizeof(send_msg)); 680 send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 681 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 682 send_msg.vector_id = vector_id; 683 684 for (node = ring_chain; node; node = node->next) { 685 send_msg.param[i].ring_type = 686 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 687 688 send_msg.param[i].tqp_index = node->tqp_index; 689 send_msg.param[i].int_gl_index = 690 hnae3_get_field(node->int_gl_idx, 691 HNAE3_RING_GL_IDX_M, 692 HNAE3_RING_GL_IDX_S); 693 694 i++; 695 if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 696 send_msg.ring_num = i; 697 698 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 699 NULL, 0); 700 if (status) { 701 dev_err(&hdev->pdev->dev, 702 "Map TQP fail, status is %d.\n", 703 status); 704 return status; 705 } 706 i = 0; 707 } 708 } 709 710 return 0; 711 } 712 713 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 714 struct hnae3_ring_chain_node *ring_chain) 715 { 716 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 717 int vector_id; 718 719 vector_id = hclgevf_get_vector_index(hdev, vector); 720 if (vector_id < 0) { 721 dev_err(&handle->pdev->dev, 722 "Get vector index fail. ret =%d\n", vector_id); 723 return vector_id; 724 } 725 726 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 727 } 728 729 static int hclgevf_unmap_ring_from_vector( 730 struct hnae3_handle *handle, 731 int vector, 732 struct hnae3_ring_chain_node *ring_chain) 733 { 734 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 735 int ret, vector_id; 736 737 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 738 return 0; 739 740 vector_id = hclgevf_get_vector_index(hdev, vector); 741 if (vector_id < 0) { 742 dev_err(&handle->pdev->dev, 743 "Get vector index fail. ret =%d\n", vector_id); 744 return vector_id; 745 } 746 747 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 748 if (ret) 749 dev_err(&handle->pdev->dev, 750 "Unmap ring from vector fail. vector=%d, ret =%d\n", 751 vector_id, 752 ret); 753 754 return ret; 755 } 756 757 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 758 { 759 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 760 int vector_id; 761 762 vector_id = hclgevf_get_vector_index(hdev, vector); 763 if (vector_id < 0) { 764 dev_err(&handle->pdev->dev, 765 "hclgevf_put_vector get vector index fail. ret =%d\n", 766 vector_id); 767 return vector_id; 768 } 769 770 hclgevf_free_vector(hdev, vector_id); 771 772 return 0; 773 } 774 775 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 776 bool en_uc_pmc, bool en_mc_pmc, 777 bool en_bc_pmc) 778 { 779 struct hnae3_handle *handle = &hdev->nic; 780 struct hclge_vf_to_pf_msg send_msg; 781 int ret; 782 783 memset(&send_msg, 0, sizeof(send_msg)); 784 send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 785 send_msg.en_bc = en_bc_pmc ? 1 : 0; 786 send_msg.en_uc = en_uc_pmc ? 1 : 0; 787 send_msg.en_mc = en_mc_pmc ? 1 : 0; 788 send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC, 789 &handle->priv_flags) ? 1 : 0; 790 791 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 792 if (ret) 793 dev_err(&hdev->pdev->dev, 794 "Set promisc mode fail, status is %d.\n", ret); 795 796 return ret; 797 } 798 799 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 800 bool en_mc_pmc) 801 { 802 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 803 bool en_bc_pmc; 804 805 en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2; 806 807 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 808 en_bc_pmc); 809 } 810 811 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) 812 { 813 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 814 815 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 816 hclgevf_task_schedule(hdev, 0); 817 } 818 819 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) 820 { 821 struct hnae3_handle *handle = &hdev->nic; 822 bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; 823 bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; 824 int ret; 825 826 if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { 827 ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); 828 if (!ret) 829 clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 830 } 831 } 832 833 static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id, 834 u16 stream_id, bool enable) 835 { 836 struct hclgevf_cfg_com_tqp_queue_cmd *req; 837 struct hclge_desc desc; 838 839 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 840 841 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 842 false); 843 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 844 req->stream_id = cpu_to_le16(stream_id); 845 if (enable) 846 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 847 848 return hclgevf_cmd_send(&hdev->hw, &desc, 1); 849 } 850 851 static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable) 852 { 853 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 854 int ret; 855 u16 i; 856 857 for (i = 0; i < handle->kinfo.num_tqps; i++) { 858 ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable); 859 if (ret) 860 return ret; 861 } 862 863 return 0; 864 } 865 866 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 867 { 868 struct hclge_vf_to_pf_msg send_msg; 869 u8 host_mac[ETH_ALEN]; 870 int status; 871 872 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 873 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 874 ETH_ALEN); 875 if (status) { 876 dev_err(&hdev->pdev->dev, 877 "fail to get VF MAC from host %d", status); 878 return status; 879 } 880 881 ether_addr_copy(p, host_mac); 882 883 return 0; 884 } 885 886 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 887 { 888 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 889 u8 host_mac_addr[ETH_ALEN]; 890 891 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 892 return; 893 894 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 895 if (hdev->has_pf_mac) 896 ether_addr_copy(p, host_mac_addr); 897 else 898 ether_addr_copy(p, hdev->hw.mac.mac_addr); 899 } 900 901 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p, 902 bool is_first) 903 { 904 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 905 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 906 struct hclge_vf_to_pf_msg send_msg; 907 u8 *new_mac_addr = (u8 *)p; 908 int status; 909 910 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 911 send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; 912 ether_addr_copy(send_msg.data, new_mac_addr); 913 if (is_first && !hdev->has_pf_mac) 914 eth_zero_addr(&send_msg.data[ETH_ALEN]); 915 else 916 ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 917 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 918 if (!status) 919 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 920 921 return status; 922 } 923 924 static struct hclgevf_mac_addr_node * 925 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) 926 { 927 struct hclgevf_mac_addr_node *mac_node, *tmp; 928 929 list_for_each_entry_safe(mac_node, tmp, list, node) 930 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 931 return mac_node; 932 933 return NULL; 934 } 935 936 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, 937 enum HCLGEVF_MAC_NODE_STATE state) 938 { 939 switch (state) { 940 /* from set_rx_mode or tmp_add_list */ 941 case HCLGEVF_MAC_TO_ADD: 942 if (mac_node->state == HCLGEVF_MAC_TO_DEL) 943 mac_node->state = HCLGEVF_MAC_ACTIVE; 944 break; 945 /* only from set_rx_mode */ 946 case HCLGEVF_MAC_TO_DEL: 947 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 948 list_del(&mac_node->node); 949 kfree(mac_node); 950 } else { 951 mac_node->state = HCLGEVF_MAC_TO_DEL; 952 } 953 break; 954 /* only from tmp_add_list, the mac_node->state won't be 955 * HCLGEVF_MAC_ACTIVE 956 */ 957 case HCLGEVF_MAC_ACTIVE: 958 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 959 mac_node->state = HCLGEVF_MAC_ACTIVE; 960 break; 961 } 962 } 963 964 static int hclgevf_update_mac_list(struct hnae3_handle *handle, 965 enum HCLGEVF_MAC_NODE_STATE state, 966 enum HCLGEVF_MAC_ADDR_TYPE mac_type, 967 const unsigned char *addr) 968 { 969 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 970 struct hclgevf_mac_addr_node *mac_node; 971 struct list_head *list; 972 973 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 974 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 975 976 spin_lock_bh(&hdev->mac_table.mac_list_lock); 977 978 /* if the mac addr is already in the mac list, no need to add a new 979 * one into it, just check the mac addr state, convert it to a new 980 * new state, or just remove it, or do nothing. 981 */ 982 mac_node = hclgevf_find_mac_node(list, addr); 983 if (mac_node) { 984 hclgevf_update_mac_node(mac_node, state); 985 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 986 return 0; 987 } 988 /* if this address is never added, unnecessary to delete */ 989 if (state == HCLGEVF_MAC_TO_DEL) { 990 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 991 return -ENOENT; 992 } 993 994 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 995 if (!mac_node) { 996 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 997 return -ENOMEM; 998 } 999 1000 mac_node->state = state; 1001 ether_addr_copy(mac_node->mac_addr, addr); 1002 list_add_tail(&mac_node->node, list); 1003 1004 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1005 return 0; 1006 } 1007 1008 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1009 const unsigned char *addr) 1010 { 1011 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1012 HCLGEVF_MAC_ADDR_UC, addr); 1013 } 1014 1015 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1016 const unsigned char *addr) 1017 { 1018 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1019 HCLGEVF_MAC_ADDR_UC, addr); 1020 } 1021 1022 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1023 const unsigned char *addr) 1024 { 1025 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1026 HCLGEVF_MAC_ADDR_MC, addr); 1027 } 1028 1029 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1030 const unsigned char *addr) 1031 { 1032 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1033 HCLGEVF_MAC_ADDR_MC, addr); 1034 } 1035 1036 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, 1037 struct hclgevf_mac_addr_node *mac_node, 1038 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1039 { 1040 struct hclge_vf_to_pf_msg send_msg; 1041 u8 code, subcode; 1042 1043 if (mac_type == HCLGEVF_MAC_ADDR_UC) { 1044 code = HCLGE_MBX_SET_UNICAST; 1045 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1046 subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; 1047 else 1048 subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; 1049 } else { 1050 code = HCLGE_MBX_SET_MULTICAST; 1051 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1052 subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; 1053 else 1054 subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; 1055 } 1056 1057 hclgevf_build_send_msg(&send_msg, code, subcode); 1058 ether_addr_copy(send_msg.data, mac_node->mac_addr); 1059 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1060 } 1061 1062 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, 1063 struct list_head *list, 1064 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1065 { 1066 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 1067 struct hclgevf_mac_addr_node *mac_node, *tmp; 1068 int ret; 1069 1070 list_for_each_entry_safe(mac_node, tmp, list, node) { 1071 ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); 1072 if (ret) { 1073 hnae3_format_mac_addr(format_mac_addr, 1074 mac_node->mac_addr); 1075 dev_err(&hdev->pdev->dev, 1076 "failed to configure mac %s, state = %d, ret = %d\n", 1077 format_mac_addr, mac_node->state, ret); 1078 return; 1079 } 1080 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1081 mac_node->state = HCLGEVF_MAC_ACTIVE; 1082 } else { 1083 list_del(&mac_node->node); 1084 kfree(mac_node); 1085 } 1086 } 1087 } 1088 1089 static void hclgevf_sync_from_add_list(struct list_head *add_list, 1090 struct list_head *mac_list) 1091 { 1092 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1093 1094 list_for_each_entry_safe(mac_node, tmp, add_list, node) { 1095 /* if the mac address from tmp_add_list is not in the 1096 * uc/mc_mac_list, it means have received a TO_DEL request 1097 * during the time window of sending mac config request to PF 1098 * If mac_node state is ACTIVE, then change its state to TO_DEL, 1099 * then it will be removed at next time. If is TO_ADD, it means 1100 * send TO_ADD request failed, so just remove the mac node. 1101 */ 1102 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1103 if (new_node) { 1104 hclgevf_update_mac_node(new_node, mac_node->state); 1105 list_del(&mac_node->node); 1106 kfree(mac_node); 1107 } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { 1108 mac_node->state = HCLGEVF_MAC_TO_DEL; 1109 list_move_tail(&mac_node->node, mac_list); 1110 } else { 1111 list_del(&mac_node->node); 1112 kfree(mac_node); 1113 } 1114 } 1115 } 1116 1117 static void hclgevf_sync_from_del_list(struct list_head *del_list, 1118 struct list_head *mac_list) 1119 { 1120 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1121 1122 list_for_each_entry_safe(mac_node, tmp, del_list, node) { 1123 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1124 if (new_node) { 1125 /* If the mac addr is exist in the mac list, it means 1126 * received a new request TO_ADD during the time window 1127 * of sending mac addr configurrequest to PF, so just 1128 * change the mac state to ACTIVE. 1129 */ 1130 new_node->state = HCLGEVF_MAC_ACTIVE; 1131 list_del(&mac_node->node); 1132 kfree(mac_node); 1133 } else { 1134 list_move_tail(&mac_node->node, mac_list); 1135 } 1136 } 1137 } 1138 1139 static void hclgevf_clear_list(struct list_head *list) 1140 { 1141 struct hclgevf_mac_addr_node *mac_node, *tmp; 1142 1143 list_for_each_entry_safe(mac_node, tmp, list, node) { 1144 list_del(&mac_node->node); 1145 kfree(mac_node); 1146 } 1147 } 1148 1149 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, 1150 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1151 { 1152 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1153 struct list_head tmp_add_list, tmp_del_list; 1154 struct list_head *list; 1155 1156 INIT_LIST_HEAD(&tmp_add_list); 1157 INIT_LIST_HEAD(&tmp_del_list); 1158 1159 /* move the mac addr to the tmp_add_list and tmp_del_list, then 1160 * we can add/delete these mac addr outside the spin lock 1161 */ 1162 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1163 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1164 1165 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1166 1167 list_for_each_entry_safe(mac_node, tmp, list, node) { 1168 switch (mac_node->state) { 1169 case HCLGEVF_MAC_TO_DEL: 1170 list_move_tail(&mac_node->node, &tmp_del_list); 1171 break; 1172 case HCLGEVF_MAC_TO_ADD: 1173 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 1174 if (!new_node) 1175 goto stop_traverse; 1176 1177 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 1178 new_node->state = mac_node->state; 1179 list_add_tail(&new_node->node, &tmp_add_list); 1180 break; 1181 default: 1182 break; 1183 } 1184 } 1185 1186 stop_traverse: 1187 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1188 1189 /* delete first, in order to get max mac table space for adding */ 1190 hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); 1191 hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); 1192 1193 /* if some mac addresses were added/deleted fail, move back to the 1194 * mac_list, and retry at next time. 1195 */ 1196 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1197 1198 hclgevf_sync_from_del_list(&tmp_del_list, list); 1199 hclgevf_sync_from_add_list(&tmp_add_list, list); 1200 1201 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1202 } 1203 1204 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) 1205 { 1206 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); 1207 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); 1208 } 1209 1210 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) 1211 { 1212 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1213 1214 hclgevf_clear_list(&hdev->mac_table.uc_mac_list); 1215 hclgevf_clear_list(&hdev->mac_table.mc_mac_list); 1216 1217 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1218 } 1219 1220 static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 1221 { 1222 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1223 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 1224 struct hclge_vf_to_pf_msg send_msg; 1225 1226 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) 1227 return -EOPNOTSUPP; 1228 1229 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1230 HCLGE_MBX_ENABLE_VLAN_FILTER); 1231 send_msg.data[0] = enable ? 1 : 0; 1232 1233 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1234 } 1235 1236 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1237 __be16 proto, u16 vlan_id, 1238 bool is_kill) 1239 { 1240 #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 1241 #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 1242 #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 1243 1244 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1245 struct hclge_vf_to_pf_msg send_msg; 1246 int ret; 1247 1248 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1249 return -EINVAL; 1250 1251 if (proto != htons(ETH_P_8021Q)) 1252 return -EPROTONOSUPPORT; 1253 1254 /* When device is resetting or reset failed, firmware is unable to 1255 * handle mailbox. Just record the vlan id, and remove it after 1256 * reset finished. 1257 */ 1258 if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 1259 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { 1260 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1261 return -EBUSY; 1262 } 1263 1264 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1265 HCLGE_MBX_VLAN_FILTER); 1266 send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; 1267 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, 1268 sizeof(vlan_id)); 1269 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, 1270 sizeof(proto)); 1271 /* when remove hw vlan filter failed, record the vlan id, 1272 * and try to remove it from hw later, to be consistence 1273 * with stack. 1274 */ 1275 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1276 if (is_kill && ret) 1277 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1278 1279 return ret; 1280 } 1281 1282 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1283 { 1284 #define HCLGEVF_MAX_SYNC_COUNT 60 1285 struct hnae3_handle *handle = &hdev->nic; 1286 int ret, sync_cnt = 0; 1287 u16 vlan_id; 1288 1289 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1290 while (vlan_id != VLAN_N_VID) { 1291 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1292 vlan_id, true); 1293 if (ret) 1294 return; 1295 1296 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1297 sync_cnt++; 1298 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1299 return; 1300 1301 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1302 } 1303 } 1304 1305 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1306 { 1307 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1308 struct hclge_vf_to_pf_msg send_msg; 1309 1310 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1311 HCLGE_MBX_VLAN_RX_OFF_CFG); 1312 send_msg.data[0] = enable ? 1 : 0; 1313 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1314 } 1315 1316 static int hclgevf_reset_tqp(struct hnae3_handle *handle) 1317 { 1318 #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U 1319 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1320 struct hclge_vf_to_pf_msg send_msg; 1321 u8 return_status = 0; 1322 int ret; 1323 u16 i; 1324 1325 /* disable vf queue before send queue reset msg to PF */ 1326 ret = hclgevf_tqp_enable(handle, false); 1327 if (ret) { 1328 dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n", 1329 ret); 1330 return ret; 1331 } 1332 1333 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1334 1335 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status, 1336 sizeof(return_status)); 1337 if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE) 1338 return ret; 1339 1340 for (i = 1; i < handle->kinfo.num_tqps; i++) { 1341 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1342 memcpy(send_msg.data, &i, sizeof(i)); 1343 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1344 if (ret) 1345 return ret; 1346 } 1347 1348 return 0; 1349 } 1350 1351 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1352 { 1353 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1354 struct hclge_vf_to_pf_msg send_msg; 1355 1356 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1357 memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); 1358 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1359 } 1360 1361 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1362 enum hnae3_reset_notify_type type) 1363 { 1364 struct hnae3_client *client = hdev->nic_client; 1365 struct hnae3_handle *handle = &hdev->nic; 1366 int ret; 1367 1368 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1369 !client) 1370 return 0; 1371 1372 if (!client->ops->reset_notify) 1373 return -EOPNOTSUPP; 1374 1375 ret = client->ops->reset_notify(handle, type); 1376 if (ret) 1377 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1378 type, ret); 1379 1380 return ret; 1381 } 1382 1383 static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev, 1384 enum hnae3_reset_notify_type type) 1385 { 1386 struct hnae3_client *client = hdev->roce_client; 1387 struct hnae3_handle *handle = &hdev->roce; 1388 int ret; 1389 1390 if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client) 1391 return 0; 1392 1393 if (!client->ops->reset_notify) 1394 return -EOPNOTSUPP; 1395 1396 ret = client->ops->reset_notify(handle, type); 1397 if (ret) 1398 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", 1399 type, ret); 1400 return ret; 1401 } 1402 1403 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1404 { 1405 #define HCLGEVF_RESET_WAIT_US 20000 1406 #define HCLGEVF_RESET_WAIT_CNT 2000 1407 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1408 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1409 1410 u32 val; 1411 int ret; 1412 1413 if (hdev->reset_type == HNAE3_VF_RESET) 1414 ret = readl_poll_timeout(hdev->hw.hw.io_base + 1415 HCLGEVF_VF_RST_ING, val, 1416 !(val & HCLGEVF_VF_RST_ING_BIT), 1417 HCLGEVF_RESET_WAIT_US, 1418 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1419 else 1420 ret = readl_poll_timeout(hdev->hw.hw.io_base + 1421 HCLGEVF_RST_ING, val, 1422 !(val & HCLGEVF_RST_ING_BITS), 1423 HCLGEVF_RESET_WAIT_US, 1424 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1425 1426 /* hardware completion status should be available by this time */ 1427 if (ret) { 1428 dev_err(&hdev->pdev->dev, 1429 "couldn't get reset done status from h/w, timeout!\n"); 1430 return ret; 1431 } 1432 1433 /* we will wait a bit more to let reset of the stack to complete. This 1434 * might happen in case reset assertion was made by PF. Yes, this also 1435 * means we might end up waiting bit more even for VF reset. 1436 */ 1437 msleep(5000); 1438 1439 return 0; 1440 } 1441 1442 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 1443 { 1444 u32 reg_val; 1445 1446 reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); 1447 if (enable) 1448 reg_val |= HCLGEVF_NIC_SW_RST_RDY; 1449 else 1450 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 1451 1452 hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 1453 reg_val); 1454 } 1455 1456 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1457 { 1458 int ret; 1459 1460 /* uninitialize the nic client */ 1461 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1462 if (ret) 1463 return ret; 1464 1465 /* re-initialize the hclge device */ 1466 ret = hclgevf_reset_hdev(hdev); 1467 if (ret) { 1468 dev_err(&hdev->pdev->dev, 1469 "hclge device re-init failed, VF is disabled!\n"); 1470 return ret; 1471 } 1472 1473 /* bring up the nic client again */ 1474 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1475 if (ret) 1476 return ret; 1477 1478 /* clear handshake status with IMP */ 1479 hclgevf_reset_handshake(hdev, false); 1480 1481 /* bring up the nic to enable TX/RX again */ 1482 return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1483 } 1484 1485 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1486 { 1487 #define HCLGEVF_RESET_SYNC_TIME 100 1488 1489 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1490 struct hclge_vf_to_pf_msg send_msg; 1491 int ret; 1492 1493 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1494 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1495 if (ret) { 1496 dev_err(&hdev->pdev->dev, 1497 "failed to assert VF reset, ret = %d\n", ret); 1498 return ret; 1499 } 1500 hdev->rst_stats.vf_func_rst_cnt++; 1501 } 1502 1503 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 1504 /* inform hardware that preparatory work is done */ 1505 msleep(HCLGEVF_RESET_SYNC_TIME); 1506 hclgevf_reset_handshake(hdev, true); 1507 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n", 1508 hdev->reset_type); 1509 1510 return 0; 1511 } 1512 1513 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 1514 { 1515 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 1516 hdev->rst_stats.vf_func_rst_cnt); 1517 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1518 hdev->rst_stats.flr_rst_cnt); 1519 dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 1520 hdev->rst_stats.vf_rst_cnt); 1521 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1522 hdev->rst_stats.rst_done_cnt); 1523 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1524 hdev->rst_stats.hw_rst_done_cnt); 1525 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1526 hdev->rst_stats.rst_cnt); 1527 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1528 hdev->rst_stats.rst_fail_cnt); 1529 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1530 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 1531 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 1532 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG)); 1533 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1534 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG)); 1535 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1536 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 1537 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1538 } 1539 1540 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1541 { 1542 /* recover handshake status with IMP when reset fail */ 1543 hclgevf_reset_handshake(hdev, true); 1544 hdev->rst_stats.rst_fail_cnt++; 1545 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1546 hdev->rst_stats.rst_fail_cnt); 1547 1548 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1549 set_bit(hdev->reset_type, &hdev->reset_pending); 1550 1551 if (hclgevf_is_reset_pending(hdev)) { 1552 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1553 hclgevf_reset_task_schedule(hdev); 1554 } else { 1555 set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1556 hclgevf_dump_rst_info(hdev); 1557 } 1558 } 1559 1560 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 1561 { 1562 int ret; 1563 1564 hdev->rst_stats.rst_cnt++; 1565 1566 /* perform reset of the stack & ae device for a client */ 1567 ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); 1568 if (ret) 1569 return ret; 1570 1571 rtnl_lock(); 1572 /* bring down the nic to stop any ongoing TX/RX */ 1573 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1574 rtnl_unlock(); 1575 if (ret) 1576 return ret; 1577 1578 return hclgevf_reset_prepare_wait(hdev); 1579 } 1580 1581 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 1582 { 1583 int ret; 1584 1585 hdev->rst_stats.hw_rst_done_cnt++; 1586 ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); 1587 if (ret) 1588 return ret; 1589 1590 rtnl_lock(); 1591 /* now, re-initialize the nic client and ae device */ 1592 ret = hclgevf_reset_stack(hdev); 1593 rtnl_unlock(); 1594 if (ret) { 1595 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1596 return ret; 1597 } 1598 1599 ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT); 1600 /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1 1601 * times 1602 */ 1603 if (ret && 1604 hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1) 1605 return ret; 1606 1607 ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT); 1608 if (ret) 1609 return ret; 1610 1611 hdev->last_reset_time = jiffies; 1612 hdev->rst_stats.rst_done_cnt++; 1613 hdev->rst_stats.rst_fail_cnt = 0; 1614 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1615 1616 return 0; 1617 } 1618 1619 static void hclgevf_reset(struct hclgevf_dev *hdev) 1620 { 1621 if (hclgevf_reset_prepare(hdev)) 1622 goto err_reset; 1623 1624 /* check if VF could successfully fetch the hardware reset completion 1625 * status from the hardware 1626 */ 1627 if (hclgevf_reset_wait(hdev)) { 1628 /* can't do much in this situation, will disable VF */ 1629 dev_err(&hdev->pdev->dev, 1630 "failed to fetch H/W reset completion status\n"); 1631 goto err_reset; 1632 } 1633 1634 if (hclgevf_reset_rebuild(hdev)) 1635 goto err_reset; 1636 1637 return; 1638 1639 err_reset: 1640 hclgevf_reset_err_handle(hdev); 1641 } 1642 1643 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1644 unsigned long *addr) 1645 { 1646 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1647 1648 /* return the highest priority reset level amongst all */ 1649 if (test_bit(HNAE3_VF_RESET, addr)) { 1650 rst_level = HNAE3_VF_RESET; 1651 clear_bit(HNAE3_VF_RESET, addr); 1652 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1653 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1654 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1655 rst_level = HNAE3_VF_FULL_RESET; 1656 clear_bit(HNAE3_VF_FULL_RESET, addr); 1657 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1658 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1659 rst_level = HNAE3_VF_PF_FUNC_RESET; 1660 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1661 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1662 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1663 rst_level = HNAE3_VF_FUNC_RESET; 1664 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1665 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1666 rst_level = HNAE3_FLR_RESET; 1667 clear_bit(HNAE3_FLR_RESET, addr); 1668 } 1669 1670 return rst_level; 1671 } 1672 1673 static void hclgevf_reset_event(struct pci_dev *pdev, 1674 struct hnae3_handle *handle) 1675 { 1676 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1677 struct hclgevf_dev *hdev = ae_dev->priv; 1678 1679 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1680 1681 if (hdev->default_reset_request) 1682 hdev->reset_level = 1683 hclgevf_get_reset_level(hdev, 1684 &hdev->default_reset_request); 1685 else 1686 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1687 1688 /* reset of this VF requested */ 1689 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1690 hclgevf_reset_task_schedule(hdev); 1691 1692 hdev->last_reset_time = jiffies; 1693 } 1694 1695 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1696 enum hnae3_reset_type rst_type) 1697 { 1698 struct hclgevf_dev *hdev = ae_dev->priv; 1699 1700 set_bit(rst_type, &hdev->default_reset_request); 1701 } 1702 1703 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1704 { 1705 writel(en ? 1 : 0, vector->addr); 1706 } 1707 1708 static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev, 1709 enum hnae3_reset_type rst_type) 1710 { 1711 #define HCLGEVF_RESET_RETRY_WAIT_MS 500 1712 #define HCLGEVF_RESET_RETRY_CNT 5 1713 1714 struct hclgevf_dev *hdev = ae_dev->priv; 1715 int retry_cnt = 0; 1716 int ret; 1717 1718 while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) { 1719 down(&hdev->reset_sem); 1720 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1721 hdev->reset_type = rst_type; 1722 ret = hclgevf_reset_prepare(hdev); 1723 if (!ret && !hdev->reset_pending) 1724 break; 1725 1726 dev_err(&hdev->pdev->dev, 1727 "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n", 1728 ret, hdev->reset_pending, retry_cnt); 1729 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1730 up(&hdev->reset_sem); 1731 msleep(HCLGEVF_RESET_RETRY_WAIT_MS); 1732 } 1733 1734 /* disable misc vector before reset done */ 1735 hclgevf_enable_vector(&hdev->misc_vector, false); 1736 1737 if (hdev->reset_type == HNAE3_FLR_RESET) 1738 hdev->rst_stats.flr_rst_cnt++; 1739 } 1740 1741 static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev) 1742 { 1743 struct hclgevf_dev *hdev = ae_dev->priv; 1744 int ret; 1745 1746 hclgevf_enable_vector(&hdev->misc_vector, true); 1747 1748 ret = hclgevf_reset_rebuild(hdev); 1749 if (ret) 1750 dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 1751 ret); 1752 1753 hdev->reset_type = HNAE3_NONE_RESET; 1754 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1755 up(&hdev->reset_sem); 1756 } 1757 1758 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1759 { 1760 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1761 1762 return hdev->fw_version; 1763 } 1764 1765 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1766 { 1767 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1768 1769 vector->vector_irq = pci_irq_vector(hdev->pdev, 1770 HCLGEVF_MISC_VECTOR_NUM); 1771 vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1772 /* vector status always valid for Vector 0 */ 1773 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1774 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1775 1776 hdev->num_msi_left -= 1; 1777 hdev->num_msi_used += 1; 1778 } 1779 1780 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1781 { 1782 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 1783 test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) && 1784 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 1785 &hdev->state)) 1786 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 1787 } 1788 1789 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1790 { 1791 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 1792 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 1793 &hdev->state)) 1794 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 1795 } 1796 1797 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 1798 unsigned long delay) 1799 { 1800 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 1801 !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 1802 mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 1803 } 1804 1805 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 1806 { 1807 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 1808 1809 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 1810 return; 1811 1812 down(&hdev->reset_sem); 1813 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1814 1815 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1816 &hdev->reset_state)) { 1817 /* PF has intimated that it is about to reset the hardware. 1818 * We now have to poll & check if hardware has actually 1819 * completed the reset sequence. On hardware reset completion, 1820 * VF needs to reset the client and ae device. 1821 */ 1822 hdev->reset_attempts = 0; 1823 1824 hdev->last_reset_time = jiffies; 1825 hdev->reset_type = 1826 hclgevf_get_reset_level(hdev, &hdev->reset_pending); 1827 if (hdev->reset_type != HNAE3_NONE_RESET) 1828 hclgevf_reset(hdev); 1829 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1830 &hdev->reset_state)) { 1831 /* we could be here when either of below happens: 1832 * 1. reset was initiated due to watchdog timeout caused by 1833 * a. IMP was earlier reset and our TX got choked down and 1834 * which resulted in watchdog reacting and inducing VF 1835 * reset. This also means our cmdq would be unreliable. 1836 * b. problem in TX due to other lower layer(example link 1837 * layer not functioning properly etc.) 1838 * 2. VF reset might have been initiated due to some config 1839 * change. 1840 * 1841 * NOTE: Theres no clear way to detect above cases than to react 1842 * to the response of PF for this reset request. PF will ack the 1843 * 1b and 2. cases but we will not get any intimation about 1a 1844 * from PF as cmdq would be in unreliable state i.e. mailbox 1845 * communication between PF and VF would be broken. 1846 * 1847 * if we are never geting into pending state it means either: 1848 * 1. PF is not receiving our request which could be due to IMP 1849 * reset 1850 * 2. PF is screwed 1851 * We cannot do much for 2. but to check first we can try reset 1852 * our PCIe + stack and see if it alleviates the problem. 1853 */ 1854 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 1855 /* prepare for full reset of stack + pcie interface */ 1856 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1857 1858 /* "defer" schedule the reset task again */ 1859 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1860 } else { 1861 hdev->reset_attempts++; 1862 1863 set_bit(hdev->reset_level, &hdev->reset_pending); 1864 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1865 } 1866 hclgevf_reset_task_schedule(hdev); 1867 } 1868 1869 hdev->reset_type = HNAE3_NONE_RESET; 1870 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1871 up(&hdev->reset_sem); 1872 } 1873 1874 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 1875 { 1876 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 1877 return; 1878 1879 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1880 return; 1881 1882 hclgevf_mbx_async_handler(hdev); 1883 1884 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1885 } 1886 1887 static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 1888 { 1889 struct hclge_vf_to_pf_msg send_msg; 1890 int ret; 1891 1892 if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) 1893 return; 1894 1895 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 1896 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1897 if (ret) 1898 dev_err(&hdev->pdev->dev, 1899 "VF sends keep alive cmd failed(=%d)\n", ret); 1900 } 1901 1902 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 1903 { 1904 unsigned long delta = round_jiffies_relative(HZ); 1905 struct hnae3_handle *handle = &hdev->nic; 1906 1907 if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 1908 return; 1909 1910 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 1911 delta = jiffies - hdev->last_serv_processed; 1912 1913 if (delta < round_jiffies_relative(HZ)) { 1914 delta = round_jiffies_relative(HZ) - delta; 1915 goto out; 1916 } 1917 } 1918 1919 hdev->serv_processed_cnt++; 1920 if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 1921 hclgevf_keep_alive(hdev); 1922 1923 if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 1924 hdev->last_serv_processed = jiffies; 1925 goto out; 1926 } 1927 1928 if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 1929 hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); 1930 1931 /* VF does not need to request link status when this bit is set, because 1932 * PF will push its link status to VFs when link status changed. 1933 */ 1934 if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state)) 1935 hclgevf_request_link_info(hdev); 1936 1937 hclgevf_update_link_mode(hdev); 1938 1939 hclgevf_sync_vlan_filter(hdev); 1940 1941 hclgevf_sync_mac_table(hdev); 1942 1943 hclgevf_sync_promisc_mode(hdev); 1944 1945 hdev->last_serv_processed = jiffies; 1946 1947 out: 1948 hclgevf_task_schedule(hdev, delta); 1949 } 1950 1951 static void hclgevf_service_task(struct work_struct *work) 1952 { 1953 struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 1954 service_task.work); 1955 1956 hclgevf_reset_service_task(hdev); 1957 hclgevf_mailbox_service_task(hdev); 1958 hclgevf_periodic_service_task(hdev); 1959 1960 /* Handle reset and mbx again in case periodical task delays the 1961 * handling by calling hclgevf_task_schedule() in 1962 * hclgevf_periodic_service_task() 1963 */ 1964 hclgevf_reset_service_task(hdev); 1965 hclgevf_mailbox_service_task(hdev); 1966 } 1967 1968 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1969 { 1970 hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr); 1971 } 1972 1973 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1974 u32 *clearval) 1975 { 1976 u32 val, cmdq_stat_reg, rst_ing_reg; 1977 1978 /* fetch the events from their corresponding regs */ 1979 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 1980 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG); 1981 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 1982 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1983 dev_info(&hdev->pdev->dev, 1984 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1985 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1986 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1987 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 1988 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 1989 hdev->rst_stats.vf_rst_cnt++; 1990 /* set up VF hardware reset status, its PF will clear 1991 * this status when PF has initialized done. 1992 */ 1993 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 1994 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 1995 val | HCLGEVF_VF_RST_ING_BIT); 1996 return HCLGEVF_VECTOR0_EVENT_RST; 1997 } 1998 1999 /* check for vector0 mailbox(=CMDQ RX) event source */ 2000 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 2001 /* for revision 0x21, clearing interrupt is writing bit 0 2002 * to the clear register, writing bit 1 means to keep the 2003 * old value. 2004 * for revision 0x20, the clear register is a read & write 2005 * register, so we should just write 0 to the bit we are 2006 * handling, and keep other bits as cmdq_stat_reg. 2007 */ 2008 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) 2009 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2010 else 2011 *clearval = cmdq_stat_reg & 2012 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2013 2014 return HCLGEVF_VECTOR0_EVENT_MBX; 2015 } 2016 2017 /* print other vector0 event source */ 2018 dev_info(&hdev->pdev->dev, 2019 "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2020 cmdq_stat_reg); 2021 2022 return HCLGEVF_VECTOR0_EVENT_OTHER; 2023 } 2024 2025 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2026 { 2027 enum hclgevf_evt_cause event_cause; 2028 struct hclgevf_dev *hdev = data; 2029 u32 clearval; 2030 2031 hclgevf_enable_vector(&hdev->misc_vector, false); 2032 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2033 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) 2034 hclgevf_clear_event_cause(hdev, clearval); 2035 2036 switch (event_cause) { 2037 case HCLGEVF_VECTOR0_EVENT_RST: 2038 hclgevf_reset_task_schedule(hdev); 2039 break; 2040 case HCLGEVF_VECTOR0_EVENT_MBX: 2041 hclgevf_mbx_handler(hdev); 2042 break; 2043 default: 2044 break; 2045 } 2046 2047 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) 2048 hclgevf_enable_vector(&hdev->misc_vector, true); 2049 2050 return IRQ_HANDLED; 2051 } 2052 2053 static int hclgevf_configure(struct hclgevf_dev *hdev) 2054 { 2055 int ret; 2056 2057 hdev->gro_en = true; 2058 2059 ret = hclgevf_get_basic_info(hdev); 2060 if (ret) 2061 return ret; 2062 2063 /* get current port based vlan state from PF */ 2064 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2065 if (ret) 2066 return ret; 2067 2068 /* get queue configuration from PF */ 2069 ret = hclgevf_get_queue_info(hdev); 2070 if (ret) 2071 return ret; 2072 2073 /* get queue depth info from PF */ 2074 ret = hclgevf_get_queue_depth(hdev); 2075 if (ret) 2076 return ret; 2077 2078 return hclgevf_get_pf_media_type(hdev); 2079 } 2080 2081 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 2082 { 2083 struct pci_dev *pdev = ae_dev->pdev; 2084 struct hclgevf_dev *hdev; 2085 2086 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 2087 if (!hdev) 2088 return -ENOMEM; 2089 2090 hdev->pdev = pdev; 2091 hdev->ae_dev = ae_dev; 2092 ae_dev->priv = hdev; 2093 2094 return 0; 2095 } 2096 2097 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2098 { 2099 struct hnae3_handle *roce = &hdev->roce; 2100 struct hnae3_handle *nic = &hdev->nic; 2101 2102 roce->rinfo.num_vectors = hdev->num_roce_msix; 2103 2104 if (hdev->num_msi_left < roce->rinfo.num_vectors || 2105 hdev->num_msi_left == 0) 2106 return -EINVAL; 2107 2108 roce->rinfo.base_vector = hdev->roce_base_msix_offset; 2109 2110 roce->rinfo.netdev = nic->kinfo.netdev; 2111 roce->rinfo.roce_io_base = hdev->hw.hw.io_base; 2112 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; 2113 2114 roce->pdev = nic->pdev; 2115 roce->ae_algo = nic->ae_algo; 2116 roce->numa_node_mask = nic->numa_node_mask; 2117 2118 return 0; 2119 } 2120 2121 static int hclgevf_config_gro(struct hclgevf_dev *hdev) 2122 { 2123 struct hclgevf_cfg_gro_status_cmd *req; 2124 struct hclge_desc desc; 2125 int ret; 2126 2127 if (!hnae3_dev_gro_supported(hdev)) 2128 return 0; 2129 2130 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2131 false); 2132 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2133 2134 req->gro_en = hdev->gro_en ? 1 : 0; 2135 2136 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2137 if (ret) 2138 dev_err(&hdev->pdev->dev, 2139 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2140 2141 return ret; 2142 } 2143 2144 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2145 { 2146 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 2147 u16 tc_offset[HCLGE_COMM_MAX_TC_NUM]; 2148 u16 tc_valid[HCLGE_COMM_MAX_TC_NUM]; 2149 u16 tc_size[HCLGE_COMM_MAX_TC_NUM]; 2150 int ret; 2151 2152 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2153 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, 2154 rss_cfg->rss_algo, 2155 rss_cfg->rss_hash_key); 2156 if (ret) 2157 return ret; 2158 2159 ret = hclge_comm_set_rss_input_tuple(&hdev->nic, &hdev->hw.hw, 2160 false, rss_cfg); 2161 if (ret) 2162 return ret; 2163 } 2164 2165 ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, 2166 rss_cfg->rss_indirection_tbl); 2167 if (ret) 2168 return ret; 2169 2170 hclge_comm_get_rss_tc_info(rss_cfg->rss_size, hdev->hw_tc_map, 2171 tc_offset, tc_valid, tc_size); 2172 2173 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, 2174 tc_valid, tc_size); 2175 } 2176 2177 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2178 { 2179 struct hnae3_handle *nic = &hdev->nic; 2180 int ret; 2181 2182 ret = hclgevf_en_hw_strip_rxvtag(nic, true); 2183 if (ret) { 2184 dev_err(&hdev->pdev->dev, 2185 "failed to enable rx vlan offload, ret = %d\n", ret); 2186 return ret; 2187 } 2188 2189 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2190 false); 2191 } 2192 2193 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2194 { 2195 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2196 2197 unsigned long last = hdev->serv_processed_cnt; 2198 int i = 0; 2199 2200 while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2201 i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2202 last == hdev->serv_processed_cnt) 2203 usleep_range(1, 1); 2204 } 2205 2206 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2207 { 2208 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2209 2210 if (enable) { 2211 hclgevf_task_schedule(hdev, 0); 2212 } else { 2213 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2214 2215 /* flush memory to make sure DOWN is seen by service task */ 2216 smp_mb__before_atomic(); 2217 hclgevf_flush_link_update(hdev); 2218 } 2219 } 2220 2221 static int hclgevf_ae_start(struct hnae3_handle *handle) 2222 { 2223 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2224 2225 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2226 clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state); 2227 2228 hclge_comm_reset_tqp_stats(handle); 2229 2230 hclgevf_request_link_info(hdev); 2231 2232 hclgevf_update_link_mode(hdev); 2233 2234 return 0; 2235 } 2236 2237 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2238 { 2239 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2240 2241 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2242 2243 if (hdev->reset_type != HNAE3_VF_RESET) 2244 hclgevf_reset_tqp(handle); 2245 2246 hclge_comm_reset_tqp_stats(handle); 2247 hclgevf_update_link_status(hdev, 0); 2248 } 2249 2250 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2251 { 2252 #define HCLGEVF_STATE_ALIVE 1 2253 #define HCLGEVF_STATE_NOT_ALIVE 0 2254 2255 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2256 struct hclge_vf_to_pf_msg send_msg; 2257 2258 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2259 send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2260 HCLGEVF_STATE_NOT_ALIVE; 2261 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2262 } 2263 2264 static int hclgevf_client_start(struct hnae3_handle *handle) 2265 { 2266 return hclgevf_set_alive(handle, true); 2267 } 2268 2269 static void hclgevf_client_stop(struct hnae3_handle *handle) 2270 { 2271 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2272 int ret; 2273 2274 ret = hclgevf_set_alive(handle, false); 2275 if (ret) 2276 dev_warn(&hdev->pdev->dev, 2277 "%s failed %d\n", __func__, ret); 2278 } 2279 2280 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2281 { 2282 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2283 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2284 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2285 2286 INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 2287 2288 mutex_init(&hdev->mbx_resp.mbx_mutex); 2289 sema_init(&hdev->reset_sem, 1); 2290 2291 spin_lock_init(&hdev->mac_table.mac_list_lock); 2292 INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); 2293 INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); 2294 2295 /* bring the device down */ 2296 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2297 } 2298 2299 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2300 { 2301 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2302 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2303 2304 if (hdev->service_task.work.func) 2305 cancel_delayed_work_sync(&hdev->service_task); 2306 2307 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2308 } 2309 2310 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2311 { 2312 struct pci_dev *pdev = hdev->pdev; 2313 int vectors; 2314 int i; 2315 2316 if (hnae3_dev_roce_supported(hdev)) 2317 vectors = pci_alloc_irq_vectors(pdev, 2318 hdev->roce_base_msix_offset + 1, 2319 hdev->num_msi, 2320 PCI_IRQ_MSIX); 2321 else 2322 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2323 hdev->num_msi, 2324 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2325 2326 if (vectors < 0) { 2327 dev_err(&pdev->dev, 2328 "failed(%d) to allocate MSI/MSI-X vectors\n", 2329 vectors); 2330 return vectors; 2331 } 2332 if (vectors < hdev->num_msi) 2333 dev_warn(&hdev->pdev->dev, 2334 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2335 hdev->num_msi, vectors); 2336 2337 hdev->num_msi = vectors; 2338 hdev->num_msi_left = vectors; 2339 2340 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2341 sizeof(u16), GFP_KERNEL); 2342 if (!hdev->vector_status) { 2343 pci_free_irq_vectors(pdev); 2344 return -ENOMEM; 2345 } 2346 2347 for (i = 0; i < hdev->num_msi; i++) 2348 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2349 2350 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2351 sizeof(int), GFP_KERNEL); 2352 if (!hdev->vector_irq) { 2353 devm_kfree(&pdev->dev, hdev->vector_status); 2354 pci_free_irq_vectors(pdev); 2355 return -ENOMEM; 2356 } 2357 2358 return 0; 2359 } 2360 2361 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2362 { 2363 struct pci_dev *pdev = hdev->pdev; 2364 2365 devm_kfree(&pdev->dev, hdev->vector_status); 2366 devm_kfree(&pdev->dev, hdev->vector_irq); 2367 pci_free_irq_vectors(pdev); 2368 } 2369 2370 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2371 { 2372 int ret; 2373 2374 hclgevf_get_misc_vector(hdev); 2375 2376 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2377 HCLGEVF_NAME, pci_name(hdev->pdev)); 2378 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2379 0, hdev->misc_vector.name, hdev); 2380 if (ret) { 2381 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2382 hdev->misc_vector.vector_irq); 2383 return ret; 2384 } 2385 2386 hclgevf_clear_event_cause(hdev, 0); 2387 2388 /* enable misc. vector(vector 0) */ 2389 hclgevf_enable_vector(&hdev->misc_vector, true); 2390 2391 return ret; 2392 } 2393 2394 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2395 { 2396 /* disable misc vector(vector 0) */ 2397 hclgevf_enable_vector(&hdev->misc_vector, false); 2398 synchronize_irq(hdev->misc_vector.vector_irq); 2399 free_irq(hdev->misc_vector.vector_irq, hdev); 2400 hclgevf_free_vector(hdev, 0); 2401 } 2402 2403 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2404 { 2405 struct device *dev = &hdev->pdev->dev; 2406 2407 dev_info(dev, "VF info begin:\n"); 2408 2409 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2410 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2411 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2412 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2413 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2414 dev_info(dev, "PF media type of this VF: %u\n", 2415 hdev->hw.mac.media_type); 2416 2417 dev_info(dev, "VF info end.\n"); 2418 } 2419 2420 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2421 struct hnae3_client *client) 2422 { 2423 struct hclgevf_dev *hdev = ae_dev->priv; 2424 int rst_cnt = hdev->rst_stats.rst_cnt; 2425 int ret; 2426 2427 ret = client->ops->init_instance(&hdev->nic); 2428 if (ret) 2429 return ret; 2430 2431 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2432 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 2433 rst_cnt != hdev->rst_stats.rst_cnt) { 2434 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2435 2436 client->ops->uninit_instance(&hdev->nic, 0); 2437 return -EBUSY; 2438 } 2439 2440 hnae3_set_client_init_flag(client, ae_dev, 1); 2441 2442 if (netif_msg_drv(&hdev->nic)) 2443 hclgevf_info_show(hdev); 2444 2445 return 0; 2446 } 2447 2448 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2449 struct hnae3_client *client) 2450 { 2451 struct hclgevf_dev *hdev = ae_dev->priv; 2452 int ret; 2453 2454 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2455 !hdev->nic_client) 2456 return 0; 2457 2458 ret = hclgevf_init_roce_base_info(hdev); 2459 if (ret) 2460 return ret; 2461 2462 ret = client->ops->init_instance(&hdev->roce); 2463 if (ret) 2464 return ret; 2465 2466 set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2467 hnae3_set_client_init_flag(client, ae_dev, 1); 2468 2469 return 0; 2470 } 2471 2472 static int hclgevf_init_client_instance(struct hnae3_client *client, 2473 struct hnae3_ae_dev *ae_dev) 2474 { 2475 struct hclgevf_dev *hdev = ae_dev->priv; 2476 int ret; 2477 2478 switch (client->type) { 2479 case HNAE3_CLIENT_KNIC: 2480 hdev->nic_client = client; 2481 hdev->nic.client = client; 2482 2483 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2484 if (ret) 2485 goto clear_nic; 2486 2487 ret = hclgevf_init_roce_client_instance(ae_dev, 2488 hdev->roce_client); 2489 if (ret) 2490 goto clear_roce; 2491 2492 break; 2493 case HNAE3_CLIENT_ROCE: 2494 if (hnae3_dev_roce_supported(hdev)) { 2495 hdev->roce_client = client; 2496 hdev->roce.client = client; 2497 } 2498 2499 ret = hclgevf_init_roce_client_instance(ae_dev, client); 2500 if (ret) 2501 goto clear_roce; 2502 2503 break; 2504 default: 2505 return -EINVAL; 2506 } 2507 2508 return 0; 2509 2510 clear_nic: 2511 hdev->nic_client = NULL; 2512 hdev->nic.client = NULL; 2513 return ret; 2514 clear_roce: 2515 hdev->roce_client = NULL; 2516 hdev->roce.client = NULL; 2517 return ret; 2518 } 2519 2520 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2521 struct hnae3_ae_dev *ae_dev) 2522 { 2523 struct hclgevf_dev *hdev = ae_dev->priv; 2524 2525 /* un-init roce, if it exists */ 2526 if (hdev->roce_client) { 2527 while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 2528 msleep(HCLGEVF_WAIT_RESET_DONE); 2529 clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2530 2531 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2532 hdev->roce_client = NULL; 2533 hdev->roce.client = NULL; 2534 } 2535 2536 /* un-init nic/unic, if this was not called by roce client */ 2537 if (client->ops->uninit_instance && hdev->nic_client && 2538 client->type != HNAE3_CLIENT_ROCE) { 2539 while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 2540 msleep(HCLGEVF_WAIT_RESET_DONE); 2541 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2542 2543 client->ops->uninit_instance(&hdev->nic, 0); 2544 hdev->nic_client = NULL; 2545 hdev->nic.client = NULL; 2546 } 2547 } 2548 2549 static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev) 2550 { 2551 #define HCLGEVF_MEM_BAR 4 2552 2553 struct pci_dev *pdev = hdev->pdev; 2554 struct hclgevf_hw *hw = &hdev->hw; 2555 2556 /* for device does not have device memory, return directly */ 2557 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR))) 2558 return 0; 2559 2560 hw->hw.mem_base = 2561 devm_ioremap_wc(&pdev->dev, 2562 pci_resource_start(pdev, HCLGEVF_MEM_BAR), 2563 pci_resource_len(pdev, HCLGEVF_MEM_BAR)); 2564 if (!hw->hw.mem_base) { 2565 dev_err(&pdev->dev, "failed to map device memory\n"); 2566 return -EFAULT; 2567 } 2568 2569 return 0; 2570 } 2571 2572 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2573 { 2574 struct pci_dev *pdev = hdev->pdev; 2575 struct hclgevf_hw *hw; 2576 int ret; 2577 2578 ret = pci_enable_device(pdev); 2579 if (ret) { 2580 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2581 return ret; 2582 } 2583 2584 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2585 if (ret) { 2586 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2587 goto err_disable_device; 2588 } 2589 2590 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2591 if (ret) { 2592 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2593 goto err_disable_device; 2594 } 2595 2596 pci_set_master(pdev); 2597 hw = &hdev->hw; 2598 hw->hw.io_base = pci_iomap(pdev, 2, 0); 2599 if (!hw->hw.io_base) { 2600 dev_err(&pdev->dev, "can't map configuration register space\n"); 2601 ret = -ENOMEM; 2602 goto err_clr_master; 2603 } 2604 2605 ret = hclgevf_dev_mem_map(hdev); 2606 if (ret) 2607 goto err_unmap_io_base; 2608 2609 return 0; 2610 2611 err_unmap_io_base: 2612 pci_iounmap(pdev, hdev->hw.hw.io_base); 2613 err_clr_master: 2614 pci_clear_master(pdev); 2615 pci_release_regions(pdev); 2616 err_disable_device: 2617 pci_disable_device(pdev); 2618 2619 return ret; 2620 } 2621 2622 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2623 { 2624 struct pci_dev *pdev = hdev->pdev; 2625 2626 if (hdev->hw.hw.mem_base) 2627 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); 2628 2629 pci_iounmap(pdev, hdev->hw.hw.io_base); 2630 pci_clear_master(pdev); 2631 pci_release_regions(pdev); 2632 pci_disable_device(pdev); 2633 } 2634 2635 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2636 { 2637 struct hclgevf_query_res_cmd *req; 2638 struct hclge_desc desc; 2639 int ret; 2640 2641 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2642 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2643 if (ret) { 2644 dev_err(&hdev->pdev->dev, 2645 "query vf resource failed, ret = %d.\n", ret); 2646 return ret; 2647 } 2648 2649 req = (struct hclgevf_query_res_cmd *)desc.data; 2650 2651 if (hnae3_dev_roce_supported(hdev)) { 2652 hdev->roce_base_msix_offset = 2653 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 2654 HCLGEVF_MSIX_OFT_ROCEE_M, 2655 HCLGEVF_MSIX_OFT_ROCEE_S); 2656 hdev->num_roce_msix = 2657 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 2658 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2659 2660 /* nic's msix numbers is always equals to the roce's. */ 2661 hdev->num_nic_msix = hdev->num_roce_msix; 2662 2663 /* VF should have NIC vectors and Roce vectors, NIC vectors 2664 * are queued before Roce vectors. The offset is fixed to 64. 2665 */ 2666 hdev->num_msi = hdev->num_roce_msix + 2667 hdev->roce_base_msix_offset; 2668 } else { 2669 hdev->num_msi = 2670 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 2671 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2672 2673 hdev->num_nic_msix = hdev->num_msi; 2674 } 2675 2676 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 2677 dev_err(&hdev->pdev->dev, 2678 "Just %u msi resources, not enough for vf(min:2).\n", 2679 hdev->num_nic_msix); 2680 return -EINVAL; 2681 } 2682 2683 return 0; 2684 } 2685 2686 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) 2687 { 2688 #define HCLGEVF_MAX_NON_TSO_BD_NUM 8U 2689 2690 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 2691 2692 ae_dev->dev_specs.max_non_tso_bd_num = 2693 HCLGEVF_MAX_NON_TSO_BD_NUM; 2694 ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 2695 ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; 2696 ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 2697 ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME; 2698 } 2699 2700 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, 2701 struct hclge_desc *desc) 2702 { 2703 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 2704 struct hclgevf_dev_specs_0_cmd *req0; 2705 struct hclgevf_dev_specs_1_cmd *req1; 2706 2707 req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data; 2708 req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data; 2709 2710 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; 2711 ae_dev->dev_specs.rss_ind_tbl_size = 2712 le16_to_cpu(req0->rss_ind_tbl_size); 2713 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); 2714 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); 2715 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); 2716 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); 2717 } 2718 2719 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) 2720 { 2721 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; 2722 2723 if (!dev_specs->max_non_tso_bd_num) 2724 dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM; 2725 if (!dev_specs->rss_ind_tbl_size) 2726 dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 2727 if (!dev_specs->rss_key_size) 2728 dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; 2729 if (!dev_specs->max_int_gl) 2730 dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 2731 if (!dev_specs->max_frm_size) 2732 dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME; 2733 } 2734 2735 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) 2736 { 2737 struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; 2738 int ret; 2739 int i; 2740 2741 /* set default specifications as devices lower than version V3 do not 2742 * support querying specifications from firmware. 2743 */ 2744 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 2745 hclgevf_set_default_dev_specs(hdev); 2746 return 0; 2747 } 2748 2749 for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 2750 hclgevf_cmd_setup_basic_desc(&desc[i], 2751 HCLGEVF_OPC_QUERY_DEV_SPECS, true); 2752 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2753 } 2754 hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS, 2755 true); 2756 2757 ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM); 2758 if (ret) 2759 return ret; 2760 2761 hclgevf_parse_dev_specs(hdev, desc); 2762 hclgevf_check_dev_specs(hdev); 2763 2764 return 0; 2765 } 2766 2767 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2768 { 2769 struct pci_dev *pdev = hdev->pdev; 2770 int ret = 0; 2771 2772 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2773 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2774 hclgevf_misc_irq_uninit(hdev); 2775 hclgevf_uninit_msi(hdev); 2776 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2777 } 2778 2779 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2780 pci_set_master(pdev); 2781 ret = hclgevf_init_msi(hdev); 2782 if (ret) { 2783 dev_err(&pdev->dev, 2784 "failed(%d) to init MSI/MSI-X\n", ret); 2785 return ret; 2786 } 2787 2788 ret = hclgevf_misc_irq_init(hdev); 2789 if (ret) { 2790 hclgevf_uninit_msi(hdev); 2791 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2792 ret); 2793 return ret; 2794 } 2795 2796 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2797 } 2798 2799 return ret; 2800 } 2801 2802 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) 2803 { 2804 struct hclge_vf_to_pf_msg send_msg; 2805 2806 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, 2807 HCLGE_MBX_VPORT_LIST_CLEAR); 2808 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2809 } 2810 2811 static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev) 2812 { 2813 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 2814 hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1); 2815 } 2816 2817 static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev) 2818 { 2819 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 2820 hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0); 2821 } 2822 2823 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2824 { 2825 struct pci_dev *pdev = hdev->pdev; 2826 int ret; 2827 2828 ret = hclgevf_pci_reset(hdev); 2829 if (ret) { 2830 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2831 return ret; 2832 } 2833 2834 hclgevf_arq_init(hdev); 2835 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, 2836 &hdev->fw_version, false, 2837 hdev->reset_pending); 2838 if (ret) { 2839 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2840 return ret; 2841 } 2842 2843 ret = hclgevf_rss_init_hw(hdev); 2844 if (ret) { 2845 dev_err(&hdev->pdev->dev, 2846 "failed(%d) to initialize RSS\n", ret); 2847 return ret; 2848 } 2849 2850 ret = hclgevf_config_gro(hdev); 2851 if (ret) 2852 return ret; 2853 2854 ret = hclgevf_init_vlan_config(hdev); 2855 if (ret) { 2856 dev_err(&hdev->pdev->dev, 2857 "failed(%d) to initialize VLAN config\n", ret); 2858 return ret; 2859 } 2860 2861 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 2862 2863 hclgevf_init_rxd_adv_layout(hdev); 2864 2865 dev_info(&hdev->pdev->dev, "Reset done\n"); 2866 2867 return 0; 2868 } 2869 2870 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2871 { 2872 struct pci_dev *pdev = hdev->pdev; 2873 int ret; 2874 2875 ret = hclgevf_pci_init(hdev); 2876 if (ret) 2877 return ret; 2878 2879 ret = hclgevf_devlink_init(hdev); 2880 if (ret) 2881 goto err_devlink_init; 2882 2883 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); 2884 if (ret) 2885 goto err_cmd_queue_init; 2886 2887 hclgevf_arq_init(hdev); 2888 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, 2889 &hdev->fw_version, false, 2890 hdev->reset_pending); 2891 if (ret) 2892 goto err_cmd_init; 2893 2894 /* Get vf resource */ 2895 ret = hclgevf_query_vf_resource(hdev); 2896 if (ret) 2897 goto err_cmd_init; 2898 2899 ret = hclgevf_query_dev_specs(hdev); 2900 if (ret) { 2901 dev_err(&pdev->dev, 2902 "failed to query dev specifications, ret = %d\n", ret); 2903 goto err_cmd_init; 2904 } 2905 2906 ret = hclgevf_init_msi(hdev); 2907 if (ret) { 2908 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2909 goto err_cmd_init; 2910 } 2911 2912 hclgevf_state_init(hdev); 2913 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2914 hdev->reset_type = HNAE3_NONE_RESET; 2915 2916 ret = hclgevf_misc_irq_init(hdev); 2917 if (ret) 2918 goto err_misc_irq_init; 2919 2920 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2921 2922 ret = hclgevf_configure(hdev); 2923 if (ret) { 2924 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2925 goto err_config; 2926 } 2927 2928 ret = hclgevf_alloc_tqps(hdev); 2929 if (ret) { 2930 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2931 goto err_config; 2932 } 2933 2934 ret = hclgevf_set_handle_info(hdev); 2935 if (ret) 2936 goto err_config; 2937 2938 ret = hclgevf_config_gro(hdev); 2939 if (ret) 2940 goto err_config; 2941 2942 /* Initialize RSS for this VF */ 2943 ret = hclge_comm_rss_init_cfg(&hdev->nic, hdev->ae_dev, 2944 &hdev->rss_cfg); 2945 if (ret) { 2946 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); 2947 goto err_config; 2948 } 2949 2950 ret = hclgevf_rss_init_hw(hdev); 2951 if (ret) { 2952 dev_err(&hdev->pdev->dev, 2953 "failed(%d) to initialize RSS\n", ret); 2954 goto err_config; 2955 } 2956 2957 /* ensure vf tbl list as empty before init*/ 2958 ret = hclgevf_clear_vport_list(hdev); 2959 if (ret) { 2960 dev_err(&pdev->dev, 2961 "failed to clear tbl list configuration, ret = %d.\n", 2962 ret); 2963 goto err_config; 2964 } 2965 2966 ret = hclgevf_init_vlan_config(hdev); 2967 if (ret) { 2968 dev_err(&hdev->pdev->dev, 2969 "failed(%d) to initialize VLAN config\n", ret); 2970 goto err_config; 2971 } 2972 2973 hclgevf_init_rxd_adv_layout(hdev); 2974 2975 set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state); 2976 2977 hdev->last_reset_time = jiffies; 2978 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 2979 HCLGEVF_DRIVER_NAME); 2980 2981 hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 2982 2983 return 0; 2984 2985 err_config: 2986 hclgevf_misc_irq_uninit(hdev); 2987 err_misc_irq_init: 2988 hclgevf_state_uninit(hdev); 2989 hclgevf_uninit_msi(hdev); 2990 err_cmd_init: 2991 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 2992 err_cmd_queue_init: 2993 hclgevf_devlink_uninit(hdev); 2994 err_devlink_init: 2995 hclgevf_pci_uninit(hdev); 2996 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2997 return ret; 2998 } 2999 3000 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 3001 { 3002 struct hclge_vf_to_pf_msg send_msg; 3003 3004 hclgevf_state_uninit(hdev); 3005 hclgevf_uninit_rxd_adv_layout(hdev); 3006 3007 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 3008 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3009 3010 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3011 hclgevf_misc_irq_uninit(hdev); 3012 hclgevf_uninit_msi(hdev); 3013 } 3014 3015 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 3016 hclgevf_devlink_uninit(hdev); 3017 hclgevf_pci_uninit(hdev); 3018 hclgevf_uninit_mac_list(hdev); 3019 } 3020 3021 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 3022 { 3023 struct pci_dev *pdev = ae_dev->pdev; 3024 int ret; 3025 3026 ret = hclgevf_alloc_hdev(ae_dev); 3027 if (ret) { 3028 dev_err(&pdev->dev, "hclge device allocation failed\n"); 3029 return ret; 3030 } 3031 3032 ret = hclgevf_init_hdev(ae_dev->priv); 3033 if (ret) { 3034 dev_err(&pdev->dev, "hclge device initialization failed\n"); 3035 return ret; 3036 } 3037 3038 return 0; 3039 } 3040 3041 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 3042 { 3043 struct hclgevf_dev *hdev = ae_dev->priv; 3044 3045 hclgevf_uninit_hdev(hdev); 3046 ae_dev->priv = NULL; 3047 } 3048 3049 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3050 { 3051 struct hnae3_handle *nic = &hdev->nic; 3052 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3053 3054 return min_t(u32, hdev->rss_size_max, 3055 hdev->num_tqps / kinfo->tc_info.num_tc); 3056 } 3057 3058 /** 3059 * hclgevf_get_channels - Get the current channels enabled and max supported. 3060 * @handle: hardware information for network interface 3061 * @ch: ethtool channels structure 3062 * 3063 * We don't support separate tx and rx queues as channels. The other count 3064 * represents how many queues are being used for control. max_combined counts 3065 * how many queue pairs we can support. They may not be mapped 1 to 1 with 3066 * q_vectors since we support a lot more queue pairs than q_vectors. 3067 **/ 3068 static void hclgevf_get_channels(struct hnae3_handle *handle, 3069 struct ethtool_channels *ch) 3070 { 3071 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3072 3073 ch->max_combined = hclgevf_get_max_channels(hdev); 3074 ch->other_count = 0; 3075 ch->max_other = 0; 3076 ch->combined_count = handle->kinfo.rss_size; 3077 } 3078 3079 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 3080 u16 *alloc_tqps, u16 *max_rss_size) 3081 { 3082 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3083 3084 *alloc_tqps = hdev->num_tqps; 3085 *max_rss_size = hdev->rss_size_max; 3086 } 3087 3088 static void hclgevf_update_rss_size(struct hnae3_handle *handle, 3089 u32 new_tqps_num) 3090 { 3091 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3092 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3093 u16 max_rss_size; 3094 3095 kinfo->req_rss_size = new_tqps_num; 3096 3097 max_rss_size = min_t(u16, hdev->rss_size_max, 3098 hdev->num_tqps / kinfo->tc_info.num_tc); 3099 3100 /* Use the user's configuration when it is not larger than 3101 * max_rss_size, otherwise, use the maximum specification value. 3102 */ 3103 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 3104 kinfo->req_rss_size <= max_rss_size) 3105 kinfo->rss_size = kinfo->req_rss_size; 3106 else if (kinfo->rss_size > max_rss_size || 3107 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 3108 kinfo->rss_size = max_rss_size; 3109 3110 kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size; 3111 } 3112 3113 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 3114 bool rxfh_configured) 3115 { 3116 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3117 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3118 u16 tc_offset[HCLGE_COMM_MAX_TC_NUM]; 3119 u16 tc_valid[HCLGE_COMM_MAX_TC_NUM]; 3120 u16 tc_size[HCLGE_COMM_MAX_TC_NUM]; 3121 u16 cur_rss_size = kinfo->rss_size; 3122 u16 cur_tqps = kinfo->num_tqps; 3123 u32 *rss_indir; 3124 unsigned int i; 3125 int ret; 3126 3127 hclgevf_update_rss_size(handle, new_tqps_num); 3128 3129 hclge_comm_get_rss_tc_info(cur_rss_size, hdev->hw_tc_map, 3130 tc_offset, tc_valid, tc_size); 3131 ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, 3132 tc_valid, tc_size); 3133 if (ret) 3134 return ret; 3135 3136 /* RSS indirection table has been configured by user */ 3137 if (rxfh_configured) 3138 goto out; 3139 3140 /* Reinitializes the rss indirect table according to the new RSS size */ 3141 rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size, 3142 sizeof(u32), GFP_KERNEL); 3143 if (!rss_indir) 3144 return -ENOMEM; 3145 3146 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 3147 rss_indir[i] = i % kinfo->rss_size; 3148 3149 hdev->rss_cfg.rss_size = kinfo->rss_size; 3150 3151 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 3152 if (ret) 3153 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 3154 ret); 3155 3156 kfree(rss_indir); 3157 3158 out: 3159 if (!ret) 3160 dev_info(&hdev->pdev->dev, 3161 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 3162 cur_rss_size, kinfo->rss_size, 3163 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); 3164 3165 return ret; 3166 } 3167 3168 static int hclgevf_get_status(struct hnae3_handle *handle) 3169 { 3170 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3171 3172 return hdev->hw.mac.link; 3173 } 3174 3175 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 3176 u8 *auto_neg, u32 *speed, 3177 u8 *duplex) 3178 { 3179 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3180 3181 if (speed) 3182 *speed = hdev->hw.mac.speed; 3183 if (duplex) 3184 *duplex = hdev->hw.mac.duplex; 3185 if (auto_neg) 3186 *auto_neg = AUTONEG_DISABLE; 3187 } 3188 3189 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 3190 u8 duplex) 3191 { 3192 hdev->hw.mac.speed = speed; 3193 hdev->hw.mac.duplex = duplex; 3194 } 3195 3196 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 3197 { 3198 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3199 bool gro_en_old = hdev->gro_en; 3200 int ret; 3201 3202 hdev->gro_en = enable; 3203 ret = hclgevf_config_gro(hdev); 3204 if (ret) 3205 hdev->gro_en = gro_en_old; 3206 3207 return ret; 3208 } 3209 3210 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 3211 u8 *module_type) 3212 { 3213 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3214 3215 if (media_type) 3216 *media_type = hdev->hw.mac.media_type; 3217 3218 if (module_type) 3219 *module_type = hdev->hw.mac.module_type; 3220 } 3221 3222 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 3223 { 3224 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3225 3226 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 3227 } 3228 3229 static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle) 3230 { 3231 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3232 3233 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 3234 } 3235 3236 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 3237 { 3238 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3239 3240 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 3241 } 3242 3243 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 3244 { 3245 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3246 3247 return hdev->rst_stats.hw_rst_done_cnt; 3248 } 3249 3250 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 3251 unsigned long *supported, 3252 unsigned long *advertising) 3253 { 3254 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3255 3256 *supported = hdev->hw.mac.supported; 3257 *advertising = hdev->hw.mac.advertising; 3258 } 3259 3260 #define MAX_SEPARATE_NUM 4 3261 #define SEPARATOR_VALUE 0xFDFCFBFA 3262 #define REG_NUM_PER_LINE 4 3263 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 3264 3265 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 3266 { 3267 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 3268 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3269 3270 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 3271 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 3272 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 3273 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 3274 3275 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 3276 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 3277 } 3278 3279 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 3280 void *data) 3281 { 3282 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3283 int i, j, reg_um, separator_num; 3284 u32 *reg = data; 3285 3286 *version = hdev->fw_version; 3287 3288 /* fetching per-VF registers values from VF PCIe register space */ 3289 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 3290 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3291 for (i = 0; i < reg_um; i++) 3292 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 3293 for (i = 0; i < separator_num; i++) 3294 *reg++ = SEPARATOR_VALUE; 3295 3296 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 3297 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3298 for (i = 0; i < reg_um; i++) 3299 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 3300 for (i = 0; i < separator_num; i++) 3301 *reg++ = SEPARATOR_VALUE; 3302 3303 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 3304 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3305 for (j = 0; j < hdev->num_tqps; j++) { 3306 for (i = 0; i < reg_um; i++) 3307 *reg++ = hclgevf_read_dev(&hdev->hw, 3308 ring_reg_addr_list[i] + 3309 0x200 * j); 3310 for (i = 0; i < separator_num; i++) 3311 *reg++ = SEPARATOR_VALUE; 3312 } 3313 3314 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 3315 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3316 for (j = 0; j < hdev->num_msi_used - 1; j++) { 3317 for (i = 0; i < reg_um; i++) 3318 *reg++ = hclgevf_read_dev(&hdev->hw, 3319 tqp_intr_reg_addr_list[i] + 3320 4 * j); 3321 for (i = 0; i < separator_num; i++) 3322 *reg++ = SEPARATOR_VALUE; 3323 } 3324 } 3325 3326 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 3327 u8 *port_base_vlan_info, u8 data_size) 3328 { 3329 struct hnae3_handle *nic = &hdev->nic; 3330 struct hclge_vf_to_pf_msg send_msg; 3331 int ret; 3332 3333 rtnl_lock(); 3334 3335 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 3336 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { 3337 dev_warn(&hdev->pdev->dev, 3338 "is resetting when updating port based vlan info\n"); 3339 rtnl_unlock(); 3340 return; 3341 } 3342 3343 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3344 if (ret) { 3345 rtnl_unlock(); 3346 return; 3347 } 3348 3349 /* send msg to PF and wait update port based vlan info */ 3350 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3351 HCLGE_MBX_PORT_BASE_VLAN_CFG); 3352 memcpy(send_msg.data, port_base_vlan_info, data_size); 3353 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3354 if (!ret) { 3355 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3356 nic->port_base_vlan_state = state; 3357 else 3358 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3359 } 3360 3361 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 3362 rtnl_unlock(); 3363 } 3364 3365 static const struct hnae3_ae_ops hclgevf_ops = { 3366 .init_ae_dev = hclgevf_init_ae_dev, 3367 .uninit_ae_dev = hclgevf_uninit_ae_dev, 3368 .reset_prepare = hclgevf_reset_prepare_general, 3369 .reset_done = hclgevf_reset_done, 3370 .init_client_instance = hclgevf_init_client_instance, 3371 .uninit_client_instance = hclgevf_uninit_client_instance, 3372 .start = hclgevf_ae_start, 3373 .stop = hclgevf_ae_stop, 3374 .client_start = hclgevf_client_start, 3375 .client_stop = hclgevf_client_stop, 3376 .map_ring_to_vector = hclgevf_map_ring_to_vector, 3377 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3378 .get_vector = hclgevf_get_vector, 3379 .put_vector = hclgevf_put_vector, 3380 .reset_queue = hclgevf_reset_tqp, 3381 .get_mac_addr = hclgevf_get_mac_addr, 3382 .set_mac_addr = hclgevf_set_mac_addr, 3383 .add_uc_addr = hclgevf_add_uc_addr, 3384 .rm_uc_addr = hclgevf_rm_uc_addr, 3385 .add_mc_addr = hclgevf_add_mc_addr, 3386 .rm_mc_addr = hclgevf_rm_mc_addr, 3387 .get_stats = hclgevf_get_stats, 3388 .update_stats = hclgevf_update_stats, 3389 .get_strings = hclgevf_get_strings, 3390 .get_sset_count = hclgevf_get_sset_count, 3391 .get_rss_key_size = hclge_comm_get_rss_key_size, 3392 .get_rss = hclgevf_get_rss, 3393 .set_rss = hclgevf_set_rss, 3394 .get_rss_tuple = hclgevf_get_rss_tuple, 3395 .set_rss_tuple = hclgevf_set_rss_tuple, 3396 .get_tc_size = hclgevf_get_tc_size, 3397 .get_fw_version = hclgevf_get_fw_version, 3398 .set_vlan_filter = hclgevf_set_vlan_filter, 3399 .enable_vlan_filter = hclgevf_enable_vlan_filter, 3400 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 3401 .reset_event = hclgevf_reset_event, 3402 .set_default_reset_request = hclgevf_set_def_reset_request, 3403 .set_channels = hclgevf_set_channels, 3404 .get_channels = hclgevf_get_channels, 3405 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 3406 .get_regs_len = hclgevf_get_regs_len, 3407 .get_regs = hclgevf_get_regs, 3408 .get_status = hclgevf_get_status, 3409 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3410 .get_media_type = hclgevf_get_media_type, 3411 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 3412 .ae_dev_resetting = hclgevf_ae_dev_resetting, 3413 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 3414 .set_gro_en = hclgevf_gro_en, 3415 .set_mtu = hclgevf_set_mtu, 3416 .get_global_queue_id = hclgevf_get_qid_global, 3417 .set_timer_task = hclgevf_set_timer_task, 3418 .get_link_mode = hclgevf_get_link_mode, 3419 .set_promisc_mode = hclgevf_set_promisc_mode, 3420 .request_update_promisc_mode = hclgevf_request_update_promisc_mode, 3421 .get_cmdq_stat = hclgevf_get_cmdq_stat, 3422 }; 3423 3424 static struct hnae3_ae_algo ae_algovf = { 3425 .ops = &hclgevf_ops, 3426 .pdev_id_table = ae_algovf_pci_tbl, 3427 }; 3428 3429 static int hclgevf_init(void) 3430 { 3431 pr_info("%s is initializing\n", HCLGEVF_NAME); 3432 3433 hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME); 3434 if (!hclgevf_wq) { 3435 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 3436 return -ENOMEM; 3437 } 3438 3439 hnae3_register_ae_algo(&ae_algovf); 3440 3441 return 0; 3442 } 3443 3444 static void hclgevf_exit(void) 3445 { 3446 hnae3_unregister_ae_algo(&ae_algovf); 3447 destroy_workqueue(hclgevf_wq); 3448 } 3449 module_init(hclgevf_init); 3450 module_exit(hclgevf_exit); 3451 3452 MODULE_LICENSE("GPL"); 3453 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3454 MODULE_DESCRIPTION("HCLGEVF Driver"); 3455 MODULE_VERSION(HCLGEVF_MOD_VERSION); 3456