1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 #include "hclgevf_devlink.h" 12 13 #define HCLGEVF_NAME "hclgevf" 14 15 #define HCLGEVF_RESET_MAX_FAIL_CNT 5 16 17 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 18 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 19 unsigned long delay); 20 21 static struct hnae3_ae_algo ae_algovf; 22 23 static struct workqueue_struct *hclgevf_wq; 24 25 static const struct pci_device_id ae_algovf_pci_tbl[] = { 26 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, 27 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 28 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 29 /* required last entry */ 30 {0, } 31 }; 32 33 static const u8 hclgevf_hash_key[] = { 34 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 35 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 36 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 37 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 38 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 39 }; 40 41 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 42 43 static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 44 HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 45 HCLGE_COMM_NIC_CSQ_DEPTH_REG, 46 HCLGE_COMM_NIC_CSQ_TAIL_REG, 47 HCLGE_COMM_NIC_CSQ_HEAD_REG, 48 HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 49 HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 50 HCLGE_COMM_NIC_CRQ_DEPTH_REG, 51 HCLGE_COMM_NIC_CRQ_TAIL_REG, 52 HCLGE_COMM_NIC_CRQ_HEAD_REG, 53 HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, 54 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG, 55 HCLGE_COMM_CMDQ_INTR_EN_REG, 56 HCLGE_COMM_CMDQ_INTR_GEN_REG}; 57 58 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 59 HCLGEVF_RST_ING, 60 HCLGEVF_GRO_EN_REG}; 61 62 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 63 HCLGEVF_RING_RX_ADDR_H_REG, 64 HCLGEVF_RING_RX_BD_NUM_REG, 65 HCLGEVF_RING_RX_BD_LENGTH_REG, 66 HCLGEVF_RING_RX_MERGE_EN_REG, 67 HCLGEVF_RING_RX_TAIL_REG, 68 HCLGEVF_RING_RX_HEAD_REG, 69 HCLGEVF_RING_RX_FBD_NUM_REG, 70 HCLGEVF_RING_RX_OFFSET_REG, 71 HCLGEVF_RING_RX_FBD_OFFSET_REG, 72 HCLGEVF_RING_RX_STASH_REG, 73 HCLGEVF_RING_RX_BD_ERR_REG, 74 HCLGEVF_RING_TX_ADDR_L_REG, 75 HCLGEVF_RING_TX_ADDR_H_REG, 76 HCLGEVF_RING_TX_BD_NUM_REG, 77 HCLGEVF_RING_TX_PRIORITY_REG, 78 HCLGEVF_RING_TX_TC_REG, 79 HCLGEVF_RING_TX_MERGE_EN_REG, 80 HCLGEVF_RING_TX_TAIL_REG, 81 HCLGEVF_RING_TX_HEAD_REG, 82 HCLGEVF_RING_TX_FBD_NUM_REG, 83 HCLGEVF_RING_TX_OFFSET_REG, 84 HCLGEVF_RING_TX_EBD_NUM_REG, 85 HCLGEVF_RING_TX_EBD_OFFSET_REG, 86 HCLGEVF_RING_TX_BD_ERR_REG, 87 HCLGEVF_RING_EN_REG}; 88 89 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 90 HCLGEVF_TQP_INTR_GL0_REG, 91 HCLGEVF_TQP_INTR_GL1_REG, 92 HCLGEVF_TQP_INTR_GL2_REG, 93 HCLGEVF_TQP_INTR_RL_REG}; 94 95 /* hclgevf_cmd_send - send command to command queue 96 * @hw: pointer to the hw struct 97 * @desc: prefilled descriptor for describing the command 98 * @num : the number of descriptors to be sent 99 * 100 * This is the main send command for command queue, it 101 * sends the queue, cleans the queue, etc 102 */ 103 int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num) 104 { 105 return hclge_comm_cmd_send(&hw->hw, desc, num, false); 106 } 107 108 void hclgevf_arq_init(struct hclgevf_dev *hdev) 109 { 110 struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq; 111 112 spin_lock(&cmdq->crq.lock); 113 /* initialize the pointers of async rx queue of mailbox */ 114 hdev->arq.hdev = hdev; 115 hdev->arq.head = 0; 116 hdev->arq.tail = 0; 117 atomic_set(&hdev->arq.count, 0); 118 spin_unlock(&cmdq->crq.lock); 119 } 120 121 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 122 { 123 if (!handle->client) 124 return container_of(handle, struct hclgevf_dev, nic); 125 else if (handle->client->type == HNAE3_CLIENT_ROCE) 126 return container_of(handle, struct hclgevf_dev, roce); 127 else 128 return container_of(handle, struct hclgevf_dev, nic); 129 } 130 131 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 132 { 133 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 134 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 135 struct hclge_desc desc; 136 struct hclgevf_tqp *tqp; 137 int status; 138 int i; 139 140 for (i = 0; i < kinfo->num_tqps; i++) { 141 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 142 hclgevf_cmd_setup_basic_desc(&desc, 143 HCLGEVF_OPC_QUERY_RX_STATUS, 144 true); 145 146 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 147 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 148 if (status) { 149 dev_err(&hdev->pdev->dev, 150 "Query tqp stat fail, status = %d,queue = %d\n", 151 status, i); 152 return status; 153 } 154 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 155 le32_to_cpu(desc.data[1]); 156 157 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 158 true); 159 160 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 161 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 162 if (status) { 163 dev_err(&hdev->pdev->dev, 164 "Query tqp stat fail, status = %d,queue = %d\n", 165 status, i); 166 return status; 167 } 168 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 169 le32_to_cpu(desc.data[1]); 170 } 171 172 return 0; 173 } 174 175 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 176 { 177 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 178 struct hclgevf_tqp *tqp; 179 u64 *buff = data; 180 int i; 181 182 for (i = 0; i < kinfo->num_tqps; i++) { 183 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 184 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 185 } 186 for (i = 0; i < kinfo->num_tqps; i++) { 187 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 188 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 189 } 190 191 return buff; 192 } 193 194 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 195 { 196 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 197 198 return kinfo->num_tqps * 2; 199 } 200 201 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 202 { 203 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 204 u8 *buff = data; 205 int i; 206 207 for (i = 0; i < kinfo->num_tqps; i++) { 208 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 209 struct hclgevf_tqp, q); 210 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", 211 tqp->index); 212 buff += ETH_GSTRING_LEN; 213 } 214 215 for (i = 0; i < kinfo->num_tqps; i++) { 216 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 217 struct hclgevf_tqp, q); 218 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", 219 tqp->index); 220 buff += ETH_GSTRING_LEN; 221 } 222 223 return buff; 224 } 225 226 static void hclgevf_update_stats(struct hnae3_handle *handle, 227 struct net_device_stats *net_stats) 228 { 229 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 230 int status; 231 232 status = hclgevf_tqps_update_stats(handle); 233 if (status) 234 dev_err(&hdev->pdev->dev, 235 "VF update of TQPS stats fail, status = %d.\n", 236 status); 237 } 238 239 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 240 { 241 if (strset == ETH_SS_TEST) 242 return -EOPNOTSUPP; 243 else if (strset == ETH_SS_STATS) 244 return hclgevf_tqps_get_sset_count(handle, strset); 245 246 return 0; 247 } 248 249 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 250 u8 *data) 251 { 252 u8 *p = (char *)data; 253 254 if (strset == ETH_SS_STATS) 255 p = hclgevf_tqps_get_strings(handle, p); 256 } 257 258 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 259 { 260 hclgevf_tqps_get_stats(handle, data); 261 } 262 263 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 264 u8 subcode) 265 { 266 if (msg) { 267 memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 268 msg->code = code; 269 msg->subcode = subcode; 270 } 271 } 272 273 static int hclgevf_get_basic_info(struct hclgevf_dev *hdev) 274 { 275 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 276 u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE]; 277 struct hclge_basic_info *basic_info; 278 struct hclge_vf_to_pf_msg send_msg; 279 unsigned long caps; 280 int status; 281 282 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0); 283 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 284 sizeof(resp_msg)); 285 if (status) { 286 dev_err(&hdev->pdev->dev, 287 "failed to get basic info from pf, ret = %d", status); 288 return status; 289 } 290 291 basic_info = (struct hclge_basic_info *)resp_msg; 292 293 hdev->hw_tc_map = basic_info->hw_tc_map; 294 hdev->mbx_api_version = basic_info->mbx_api_version; 295 caps = basic_info->pf_caps; 296 if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps)) 297 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); 298 299 return 0; 300 } 301 302 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 303 { 304 struct hnae3_handle *nic = &hdev->nic; 305 struct hclge_vf_to_pf_msg send_msg; 306 u8 resp_msg; 307 int ret; 308 309 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 310 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 311 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 312 sizeof(u8)); 313 if (ret) { 314 dev_err(&hdev->pdev->dev, 315 "VF request to get port based vlan state failed %d", 316 ret); 317 return ret; 318 } 319 320 nic->port_base_vlan_state = resp_msg; 321 322 return 0; 323 } 324 325 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 326 { 327 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 328 #define HCLGEVF_TQPS_ALLOC_OFFSET 0 329 #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 330 #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 331 332 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 333 struct hclge_vf_to_pf_msg send_msg; 334 int status; 335 336 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 337 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 338 HCLGEVF_TQPS_RSS_INFO_LEN); 339 if (status) { 340 dev_err(&hdev->pdev->dev, 341 "VF request to get tqp info from PF failed %d", 342 status); 343 return status; 344 } 345 346 memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], 347 sizeof(u16)); 348 memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], 349 sizeof(u16)); 350 memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], 351 sizeof(u16)); 352 353 return 0; 354 } 355 356 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 357 { 358 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 359 #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 360 #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 361 362 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 363 struct hclge_vf_to_pf_msg send_msg; 364 int ret; 365 366 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 367 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 368 HCLGEVF_TQPS_DEPTH_INFO_LEN); 369 if (ret) { 370 dev_err(&hdev->pdev->dev, 371 "VF request to get tqp depth info from PF failed %d", 372 ret); 373 return ret; 374 } 375 376 memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], 377 sizeof(u16)); 378 memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], 379 sizeof(u16)); 380 381 return 0; 382 } 383 384 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 385 { 386 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 387 struct hclge_vf_to_pf_msg send_msg; 388 u16 qid_in_pf = 0; 389 u8 resp_data[2]; 390 int ret; 391 392 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 393 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 394 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 395 sizeof(resp_data)); 396 if (!ret) 397 qid_in_pf = *(u16 *)resp_data; 398 399 return qid_in_pf; 400 } 401 402 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 403 { 404 struct hclge_vf_to_pf_msg send_msg; 405 u8 resp_msg[2]; 406 int ret; 407 408 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 409 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 410 sizeof(resp_msg)); 411 if (ret) { 412 dev_err(&hdev->pdev->dev, 413 "VF request to get the pf port media type failed %d", 414 ret); 415 return ret; 416 } 417 418 hdev->hw.mac.media_type = resp_msg[0]; 419 hdev->hw.mac.module_type = resp_msg[1]; 420 421 return 0; 422 } 423 424 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 425 { 426 struct hclgevf_tqp *tqp; 427 int i; 428 429 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 430 sizeof(struct hclgevf_tqp), GFP_KERNEL); 431 if (!hdev->htqp) 432 return -ENOMEM; 433 434 tqp = hdev->htqp; 435 436 for (i = 0; i < hdev->num_tqps; i++) { 437 tqp->dev = &hdev->pdev->dev; 438 tqp->index = i; 439 440 tqp->q.ae_algo = &ae_algovf; 441 tqp->q.buf_size = hdev->rx_buf_len; 442 tqp->q.tx_desc_num = hdev->num_tx_desc; 443 tqp->q.rx_desc_num = hdev->num_rx_desc; 444 445 /* need an extended offset to configure queues >= 446 * HCLGEVF_TQP_MAX_SIZE_DEV_V2. 447 */ 448 if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2) 449 tqp->q.io_base = hdev->hw.hw.io_base + 450 HCLGEVF_TQP_REG_OFFSET + 451 i * HCLGEVF_TQP_REG_SIZE; 452 else 453 tqp->q.io_base = hdev->hw.hw.io_base + 454 HCLGEVF_TQP_REG_OFFSET + 455 HCLGEVF_TQP_EXT_REG_OFFSET + 456 (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) * 457 HCLGEVF_TQP_REG_SIZE; 458 459 tqp++; 460 } 461 462 return 0; 463 } 464 465 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 466 { 467 struct hnae3_handle *nic = &hdev->nic; 468 struct hnae3_knic_private_info *kinfo; 469 u16 new_tqps = hdev->num_tqps; 470 unsigned int i; 471 u8 num_tc = 0; 472 473 kinfo = &nic->kinfo; 474 kinfo->num_tx_desc = hdev->num_tx_desc; 475 kinfo->num_rx_desc = hdev->num_rx_desc; 476 kinfo->rx_buf_len = hdev->rx_buf_len; 477 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 478 if (hdev->hw_tc_map & BIT(i)) 479 num_tc++; 480 481 num_tc = num_tc ? num_tc : 1; 482 kinfo->tc_info.num_tc = num_tc; 483 kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc); 484 new_tqps = kinfo->rss_size * num_tc; 485 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 486 487 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 488 sizeof(struct hnae3_queue *), GFP_KERNEL); 489 if (!kinfo->tqp) 490 return -ENOMEM; 491 492 for (i = 0; i < kinfo->num_tqps; i++) { 493 hdev->htqp[i].q.handle = &hdev->nic; 494 hdev->htqp[i].q.tqp_index = i; 495 kinfo->tqp[i] = &hdev->htqp[i].q; 496 } 497 498 /* after init the max rss_size and tqps, adjust the default tqp numbers 499 * and rss size with the actual vector numbers 500 */ 501 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 502 kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc, 503 kinfo->rss_size); 504 505 return 0; 506 } 507 508 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 509 { 510 struct hclge_vf_to_pf_msg send_msg; 511 int status; 512 513 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 514 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 515 if (status) 516 dev_err(&hdev->pdev->dev, 517 "VF failed to fetch link status(%d) from PF", status); 518 } 519 520 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 521 { 522 struct hnae3_handle *rhandle = &hdev->roce; 523 struct hnae3_handle *handle = &hdev->nic; 524 struct hnae3_client *rclient; 525 struct hnae3_client *client; 526 527 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 528 return; 529 530 client = handle->client; 531 rclient = hdev->roce_client; 532 533 link_state = 534 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 535 if (link_state != hdev->hw.mac.link) { 536 hdev->hw.mac.link = link_state; 537 client->ops->link_status_change(handle, !!link_state); 538 if (rclient && rclient->ops->link_status_change) 539 rclient->ops->link_status_change(rhandle, !!link_state); 540 } 541 542 clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 543 } 544 545 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 546 { 547 #define HCLGEVF_ADVERTISING 0 548 #define HCLGEVF_SUPPORTED 1 549 550 struct hclge_vf_to_pf_msg send_msg; 551 552 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 553 send_msg.data[0] = HCLGEVF_ADVERTISING; 554 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 555 send_msg.data[0] = HCLGEVF_SUPPORTED; 556 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 557 } 558 559 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 560 { 561 struct hnae3_handle *nic = &hdev->nic; 562 int ret; 563 564 nic->ae_algo = &ae_algovf; 565 nic->pdev = hdev->pdev; 566 nic->numa_node_mask = hdev->numa_node_mask; 567 nic->flags |= HNAE3_SUPPORT_VF; 568 nic->kinfo.io_base = hdev->hw.hw.io_base; 569 570 ret = hclgevf_knic_setup(hdev); 571 if (ret) 572 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 573 ret); 574 return ret; 575 } 576 577 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 578 { 579 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 580 dev_warn(&hdev->pdev->dev, 581 "vector(vector_id %d) has been freed.\n", vector_id); 582 return; 583 } 584 585 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 586 hdev->num_msi_left += 1; 587 hdev->num_msi_used -= 1; 588 } 589 590 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 591 struct hnae3_vector_info *vector_info) 592 { 593 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 594 struct hnae3_vector_info *vector = vector_info; 595 int alloc = 0; 596 int i, j; 597 598 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 599 vector_num = min(hdev->num_msi_left, vector_num); 600 601 for (j = 0; j < vector_num; j++) { 602 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 603 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 604 vector->vector = pci_irq_vector(hdev->pdev, i); 605 vector->io_addr = hdev->hw.hw.io_base + 606 HCLGEVF_VECTOR_REG_BASE + 607 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 608 hdev->vector_status[i] = 0; 609 hdev->vector_irq[i] = vector->vector; 610 611 vector++; 612 alloc++; 613 614 break; 615 } 616 } 617 } 618 hdev->num_msi_left -= alloc; 619 hdev->num_msi_used += alloc; 620 621 return alloc; 622 } 623 624 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 625 { 626 int i; 627 628 for (i = 0; i < hdev->num_msi; i++) 629 if (vector == hdev->vector_irq[i]) 630 return i; 631 632 return -EINVAL; 633 } 634 635 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 636 const u8 hfunc, const u8 *key) 637 { 638 struct hclgevf_rss_config_cmd *req; 639 unsigned int key_offset = 0; 640 struct hclge_desc desc; 641 int key_counts; 642 int key_size; 643 int ret; 644 645 key_counts = HCLGEVF_RSS_KEY_SIZE; 646 req = (struct hclgevf_rss_config_cmd *)desc.data; 647 648 while (key_counts) { 649 hclgevf_cmd_setup_basic_desc(&desc, 650 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 651 false); 652 653 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 654 req->hash_config |= 655 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 656 657 key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 658 memcpy(req->hash_key, 659 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 660 661 key_counts -= key_size; 662 key_offset++; 663 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 664 if (ret) { 665 dev_err(&hdev->pdev->dev, 666 "Configure RSS config fail, status = %d\n", 667 ret); 668 return ret; 669 } 670 } 671 672 return 0; 673 } 674 675 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 676 { 677 return HCLGEVF_RSS_KEY_SIZE; 678 } 679 680 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 681 { 682 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 683 struct hclgevf_rss_indirection_table_cmd *req; 684 struct hclge_desc desc; 685 int rss_cfg_tbl_num; 686 int status; 687 int i, j; 688 689 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 690 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size / 691 HCLGEVF_RSS_CFG_TBL_SIZE; 692 693 for (i = 0; i < rss_cfg_tbl_num; i++) { 694 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 695 false); 696 req->start_table_index = 697 cpu_to_le16(i * HCLGEVF_RSS_CFG_TBL_SIZE); 698 req->rss_set_bitmap = cpu_to_le16(HCLGEVF_RSS_SET_BITMAP_MSK); 699 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 700 req->rss_result[j] = 701 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 702 703 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 704 if (status) { 705 dev_err(&hdev->pdev->dev, 706 "VF failed(=%d) to set RSS indirection table\n", 707 status); 708 return status; 709 } 710 } 711 712 return 0; 713 } 714 715 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 716 { 717 struct hclgevf_rss_tc_mode_cmd *req; 718 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 719 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 720 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 721 struct hclge_desc desc; 722 u16 roundup_size; 723 unsigned int i; 724 int status; 725 726 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 727 728 roundup_size = roundup_pow_of_two(rss_size); 729 roundup_size = ilog2(roundup_size); 730 731 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 732 tc_valid[i] = 1; 733 tc_size[i] = roundup_size; 734 tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0; 735 } 736 737 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 738 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 739 u16 mode = 0; 740 741 hnae3_set_bit(mode, HCLGEVF_RSS_TC_VALID_B, 742 (tc_valid[i] & 0x1)); 743 hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M, 744 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 745 hnae3_set_bit(mode, HCLGEVF_RSS_TC_SIZE_MSB_B, 746 tc_size[i] >> HCLGEVF_RSS_TC_SIZE_MSB_OFFSET & 747 0x1); 748 hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M, 749 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 750 751 req->rss_tc_mode[i] = cpu_to_le16(mode); 752 } 753 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 754 if (status) 755 dev_err(&hdev->pdev->dev, 756 "VF failed(=%d) to set rss tc mode\n", status); 757 758 return status; 759 } 760 761 /* for revision 0x20, vf shared the same rss config with pf */ 762 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 763 { 764 #define HCLGEVF_RSS_MBX_RESP_LEN 8 765 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 766 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 767 struct hclge_vf_to_pf_msg send_msg; 768 u16 msg_num, hash_key_index; 769 u8 index; 770 int ret; 771 772 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 773 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 774 HCLGEVF_RSS_MBX_RESP_LEN; 775 for (index = 0; index < msg_num; index++) { 776 send_msg.data[0] = index; 777 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 778 HCLGEVF_RSS_MBX_RESP_LEN); 779 if (ret) { 780 dev_err(&hdev->pdev->dev, 781 "VF get rss hash key from PF failed, ret=%d", 782 ret); 783 return ret; 784 } 785 786 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 787 if (index == msg_num - 1) 788 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 789 &resp_msg[0], 790 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 791 else 792 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 793 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 794 } 795 796 return 0; 797 } 798 799 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 800 u8 *hfunc) 801 { 802 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 803 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 804 int i, ret; 805 806 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 807 /* Get hash algorithm */ 808 if (hfunc) { 809 switch (rss_cfg->hash_algo) { 810 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 811 *hfunc = ETH_RSS_HASH_TOP; 812 break; 813 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 814 *hfunc = ETH_RSS_HASH_XOR; 815 break; 816 default: 817 *hfunc = ETH_RSS_HASH_UNKNOWN; 818 break; 819 } 820 } 821 822 /* Get the RSS Key required by the user */ 823 if (key) 824 memcpy(key, rss_cfg->rss_hash_key, 825 HCLGEVF_RSS_KEY_SIZE); 826 } else { 827 if (hfunc) 828 *hfunc = ETH_RSS_HASH_TOP; 829 if (key) { 830 ret = hclgevf_get_rss_hash_key(hdev); 831 if (ret) 832 return ret; 833 memcpy(key, rss_cfg->rss_hash_key, 834 HCLGEVF_RSS_KEY_SIZE); 835 } 836 } 837 838 if (indir) 839 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 840 indir[i] = rss_cfg->rss_indirection_tbl[i]; 841 842 return 0; 843 } 844 845 static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc, 846 u8 *hash_algo) 847 { 848 switch (hfunc) { 849 case ETH_RSS_HASH_TOP: 850 *hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 851 return 0; 852 case ETH_RSS_HASH_XOR: 853 *hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 854 return 0; 855 case ETH_RSS_HASH_NO_CHANGE: 856 *hash_algo = hdev->rss_cfg.hash_algo; 857 return 0; 858 default: 859 return -EINVAL; 860 } 861 } 862 863 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 864 const u8 *key, const u8 hfunc) 865 { 866 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 867 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 868 u8 hash_algo; 869 int ret, i; 870 871 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 872 ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo); 873 if (ret) 874 return ret; 875 876 /* Set the RSS Hash Key if specififed by the user */ 877 if (key) { 878 ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key); 879 if (ret) { 880 dev_err(&hdev->pdev->dev, 881 "invalid hfunc type %u\n", hfunc); 882 return ret; 883 } 884 885 /* Update the shadow RSS key with user specified qids */ 886 memcpy(rss_cfg->rss_hash_key, key, 887 HCLGEVF_RSS_KEY_SIZE); 888 } else { 889 ret = hclgevf_set_rss_algo_key(hdev, hash_algo, 890 rss_cfg->rss_hash_key); 891 if (ret) 892 return ret; 893 } 894 rss_cfg->hash_algo = hash_algo; 895 } 896 897 /* update the shadow RSS table with user specified qids */ 898 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 899 rss_cfg->rss_indirection_tbl[i] = indir[i]; 900 901 /* update the hardware */ 902 return hclgevf_set_rss_indir_table(hdev); 903 } 904 905 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 906 { 907 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 908 909 if (nfc->data & RXH_L4_B_2_3) 910 hash_sets |= HCLGEVF_D_PORT_BIT; 911 else 912 hash_sets &= ~HCLGEVF_D_PORT_BIT; 913 914 if (nfc->data & RXH_IP_SRC) 915 hash_sets |= HCLGEVF_S_IP_BIT; 916 else 917 hash_sets &= ~HCLGEVF_S_IP_BIT; 918 919 if (nfc->data & RXH_IP_DST) 920 hash_sets |= HCLGEVF_D_IP_BIT; 921 else 922 hash_sets &= ~HCLGEVF_D_IP_BIT; 923 924 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 925 hash_sets |= HCLGEVF_V_TAG_BIT; 926 927 return hash_sets; 928 } 929 930 static int hclgevf_init_rss_tuple_cmd(struct hnae3_handle *handle, 931 struct ethtool_rxnfc *nfc, 932 struct hclgevf_rss_input_tuple_cmd *req) 933 { 934 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 935 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 936 u8 tuple_sets; 937 938 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 939 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 940 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 941 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 942 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 943 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 944 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 945 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 946 947 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 948 switch (nfc->flow_type) { 949 case TCP_V4_FLOW: 950 req->ipv4_tcp_en = tuple_sets; 951 break; 952 case TCP_V6_FLOW: 953 req->ipv6_tcp_en = tuple_sets; 954 break; 955 case UDP_V4_FLOW: 956 req->ipv4_udp_en = tuple_sets; 957 break; 958 case UDP_V6_FLOW: 959 req->ipv6_udp_en = tuple_sets; 960 break; 961 case SCTP_V4_FLOW: 962 req->ipv4_sctp_en = tuple_sets; 963 break; 964 case SCTP_V6_FLOW: 965 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && 966 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))) 967 return -EINVAL; 968 969 req->ipv6_sctp_en = tuple_sets; 970 break; 971 case IPV4_FLOW: 972 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 973 break; 974 case IPV6_FLOW: 975 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 976 break; 977 default: 978 return -EINVAL; 979 } 980 981 return 0; 982 } 983 984 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 985 struct ethtool_rxnfc *nfc) 986 { 987 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 988 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 989 struct hclgevf_rss_input_tuple_cmd *req; 990 struct hclge_desc desc; 991 int ret; 992 993 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 994 return -EOPNOTSUPP; 995 996 if (nfc->data & 997 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 998 return -EINVAL; 999 1000 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 1001 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 1002 1003 ret = hclgevf_init_rss_tuple_cmd(handle, nfc, req); 1004 if (ret) { 1005 dev_err(&hdev->pdev->dev, 1006 "failed to init rss tuple cmd, ret = %d\n", ret); 1007 return ret; 1008 } 1009 1010 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1011 if (ret) { 1012 dev_err(&hdev->pdev->dev, 1013 "Set rss tuple fail, status = %d\n", ret); 1014 return ret; 1015 } 1016 1017 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 1018 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 1019 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 1020 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 1021 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 1022 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 1023 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 1024 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 1025 return 0; 1026 } 1027 1028 static int hclgevf_get_rss_tuple_by_flow_type(struct hclgevf_dev *hdev, 1029 int flow_type, u8 *tuple_sets) 1030 { 1031 switch (flow_type) { 1032 case TCP_V4_FLOW: 1033 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_tcp_en; 1034 break; 1035 case UDP_V4_FLOW: 1036 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_udp_en; 1037 break; 1038 case TCP_V6_FLOW: 1039 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_tcp_en; 1040 break; 1041 case UDP_V6_FLOW: 1042 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_udp_en; 1043 break; 1044 case SCTP_V4_FLOW: 1045 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_sctp_en; 1046 break; 1047 case SCTP_V6_FLOW: 1048 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_sctp_en; 1049 break; 1050 case IPV4_FLOW: 1051 case IPV6_FLOW: 1052 *tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 1053 break; 1054 default: 1055 return -EINVAL; 1056 } 1057 1058 return 0; 1059 } 1060 1061 static u64 hclgevf_convert_rss_tuple(u8 tuple_sets) 1062 { 1063 u64 tuple_data = 0; 1064 1065 if (tuple_sets & HCLGEVF_D_PORT_BIT) 1066 tuple_data |= RXH_L4_B_2_3; 1067 if (tuple_sets & HCLGEVF_S_PORT_BIT) 1068 tuple_data |= RXH_L4_B_0_1; 1069 if (tuple_sets & HCLGEVF_D_IP_BIT) 1070 tuple_data |= RXH_IP_DST; 1071 if (tuple_sets & HCLGEVF_S_IP_BIT) 1072 tuple_data |= RXH_IP_SRC; 1073 1074 return tuple_data; 1075 } 1076 1077 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 1078 struct ethtool_rxnfc *nfc) 1079 { 1080 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1081 u8 tuple_sets; 1082 int ret; 1083 1084 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 1085 return -EOPNOTSUPP; 1086 1087 nfc->data = 0; 1088 1089 ret = hclgevf_get_rss_tuple_by_flow_type(hdev, nfc->flow_type, 1090 &tuple_sets); 1091 if (ret || !tuple_sets) 1092 return ret; 1093 1094 nfc->data = hclgevf_convert_rss_tuple(tuple_sets); 1095 1096 return 0; 1097 } 1098 1099 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 1100 struct hclgevf_rss_cfg *rss_cfg) 1101 { 1102 struct hclgevf_rss_input_tuple_cmd *req; 1103 struct hclge_desc desc; 1104 int ret; 1105 1106 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 1107 1108 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 1109 1110 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 1111 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 1112 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 1113 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 1114 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 1115 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 1116 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 1117 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 1118 1119 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1120 if (ret) 1121 dev_err(&hdev->pdev->dev, 1122 "Configure rss input fail, status = %d\n", ret); 1123 return ret; 1124 } 1125 1126 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 1127 { 1128 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1129 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1130 1131 return rss_cfg->rss_size; 1132 } 1133 1134 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 1135 int vector_id, 1136 struct hnae3_ring_chain_node *ring_chain) 1137 { 1138 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1139 struct hclge_vf_to_pf_msg send_msg; 1140 struct hnae3_ring_chain_node *node; 1141 int status; 1142 int i = 0; 1143 1144 memset(&send_msg, 0, sizeof(send_msg)); 1145 send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 1146 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1147 send_msg.vector_id = vector_id; 1148 1149 for (node = ring_chain; node; node = node->next) { 1150 send_msg.param[i].ring_type = 1151 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1152 1153 send_msg.param[i].tqp_index = node->tqp_index; 1154 send_msg.param[i].int_gl_index = 1155 hnae3_get_field(node->int_gl_idx, 1156 HNAE3_RING_GL_IDX_M, 1157 HNAE3_RING_GL_IDX_S); 1158 1159 i++; 1160 if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 1161 send_msg.ring_num = i; 1162 1163 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 1164 NULL, 0); 1165 if (status) { 1166 dev_err(&hdev->pdev->dev, 1167 "Map TQP fail, status is %d.\n", 1168 status); 1169 return status; 1170 } 1171 i = 0; 1172 } 1173 } 1174 1175 return 0; 1176 } 1177 1178 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1179 struct hnae3_ring_chain_node *ring_chain) 1180 { 1181 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1182 int vector_id; 1183 1184 vector_id = hclgevf_get_vector_index(hdev, vector); 1185 if (vector_id < 0) { 1186 dev_err(&handle->pdev->dev, 1187 "Get vector index fail. ret =%d\n", vector_id); 1188 return vector_id; 1189 } 1190 1191 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1192 } 1193 1194 static int hclgevf_unmap_ring_from_vector( 1195 struct hnae3_handle *handle, 1196 int vector, 1197 struct hnae3_ring_chain_node *ring_chain) 1198 { 1199 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1200 int ret, vector_id; 1201 1202 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1203 return 0; 1204 1205 vector_id = hclgevf_get_vector_index(hdev, vector); 1206 if (vector_id < 0) { 1207 dev_err(&handle->pdev->dev, 1208 "Get vector index fail. ret =%d\n", vector_id); 1209 return vector_id; 1210 } 1211 1212 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1213 if (ret) 1214 dev_err(&handle->pdev->dev, 1215 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1216 vector_id, 1217 ret); 1218 1219 return ret; 1220 } 1221 1222 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1223 { 1224 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1225 int vector_id; 1226 1227 vector_id = hclgevf_get_vector_index(hdev, vector); 1228 if (vector_id < 0) { 1229 dev_err(&handle->pdev->dev, 1230 "hclgevf_put_vector get vector index fail. ret =%d\n", 1231 vector_id); 1232 return vector_id; 1233 } 1234 1235 hclgevf_free_vector(hdev, vector_id); 1236 1237 return 0; 1238 } 1239 1240 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1241 bool en_uc_pmc, bool en_mc_pmc, 1242 bool en_bc_pmc) 1243 { 1244 struct hnae3_handle *handle = &hdev->nic; 1245 struct hclge_vf_to_pf_msg send_msg; 1246 int ret; 1247 1248 memset(&send_msg, 0, sizeof(send_msg)); 1249 send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 1250 send_msg.en_bc = en_bc_pmc ? 1 : 0; 1251 send_msg.en_uc = en_uc_pmc ? 1 : 0; 1252 send_msg.en_mc = en_mc_pmc ? 1 : 0; 1253 send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC, 1254 &handle->priv_flags) ? 1 : 0; 1255 1256 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1257 if (ret) 1258 dev_err(&hdev->pdev->dev, 1259 "Set promisc mode fail, status is %d.\n", ret); 1260 1261 return ret; 1262 } 1263 1264 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 1265 bool en_mc_pmc) 1266 { 1267 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1268 bool en_bc_pmc; 1269 1270 en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2; 1271 1272 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 1273 en_bc_pmc); 1274 } 1275 1276 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) 1277 { 1278 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1279 1280 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1281 hclgevf_task_schedule(hdev, 0); 1282 } 1283 1284 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) 1285 { 1286 struct hnae3_handle *handle = &hdev->nic; 1287 bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; 1288 bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; 1289 int ret; 1290 1291 if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { 1292 ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); 1293 if (!ret) 1294 clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1295 } 1296 } 1297 1298 static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id, 1299 u16 stream_id, bool enable) 1300 { 1301 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1302 struct hclge_desc desc; 1303 1304 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1305 1306 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1307 false); 1308 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1309 req->stream_id = cpu_to_le16(stream_id); 1310 if (enable) 1311 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1312 1313 return hclgevf_cmd_send(&hdev->hw, &desc, 1); 1314 } 1315 1316 static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable) 1317 { 1318 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1319 int ret; 1320 u16 i; 1321 1322 for (i = 0; i < handle->kinfo.num_tqps; i++) { 1323 ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable); 1324 if (ret) 1325 return ret; 1326 } 1327 1328 return 0; 1329 } 1330 1331 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1332 { 1333 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1334 struct hclgevf_tqp *tqp; 1335 int i; 1336 1337 for (i = 0; i < kinfo->num_tqps; i++) { 1338 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1339 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1340 } 1341 } 1342 1343 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 1344 { 1345 struct hclge_vf_to_pf_msg send_msg; 1346 u8 host_mac[ETH_ALEN]; 1347 int status; 1348 1349 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 1350 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 1351 ETH_ALEN); 1352 if (status) { 1353 dev_err(&hdev->pdev->dev, 1354 "fail to get VF MAC from host %d", status); 1355 return status; 1356 } 1357 1358 ether_addr_copy(p, host_mac); 1359 1360 return 0; 1361 } 1362 1363 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1364 { 1365 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1366 u8 host_mac_addr[ETH_ALEN]; 1367 1368 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 1369 return; 1370 1371 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 1372 if (hdev->has_pf_mac) 1373 ether_addr_copy(p, host_mac_addr); 1374 else 1375 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1376 } 1377 1378 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p, 1379 bool is_first) 1380 { 1381 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1382 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1383 struct hclge_vf_to_pf_msg send_msg; 1384 u8 *new_mac_addr = (u8 *)p; 1385 int status; 1386 1387 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 1388 send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1389 ether_addr_copy(send_msg.data, new_mac_addr); 1390 if (is_first && !hdev->has_pf_mac) 1391 eth_zero_addr(&send_msg.data[ETH_ALEN]); 1392 else 1393 ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 1394 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1395 if (!status) 1396 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1397 1398 return status; 1399 } 1400 1401 static struct hclgevf_mac_addr_node * 1402 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) 1403 { 1404 struct hclgevf_mac_addr_node *mac_node, *tmp; 1405 1406 list_for_each_entry_safe(mac_node, tmp, list, node) 1407 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 1408 return mac_node; 1409 1410 return NULL; 1411 } 1412 1413 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, 1414 enum HCLGEVF_MAC_NODE_STATE state) 1415 { 1416 switch (state) { 1417 /* from set_rx_mode or tmp_add_list */ 1418 case HCLGEVF_MAC_TO_ADD: 1419 if (mac_node->state == HCLGEVF_MAC_TO_DEL) 1420 mac_node->state = HCLGEVF_MAC_ACTIVE; 1421 break; 1422 /* only from set_rx_mode */ 1423 case HCLGEVF_MAC_TO_DEL: 1424 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1425 list_del(&mac_node->node); 1426 kfree(mac_node); 1427 } else { 1428 mac_node->state = HCLGEVF_MAC_TO_DEL; 1429 } 1430 break; 1431 /* only from tmp_add_list, the mac_node->state won't be 1432 * HCLGEVF_MAC_ACTIVE 1433 */ 1434 case HCLGEVF_MAC_ACTIVE: 1435 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1436 mac_node->state = HCLGEVF_MAC_ACTIVE; 1437 break; 1438 } 1439 } 1440 1441 static int hclgevf_update_mac_list(struct hnae3_handle *handle, 1442 enum HCLGEVF_MAC_NODE_STATE state, 1443 enum HCLGEVF_MAC_ADDR_TYPE mac_type, 1444 const unsigned char *addr) 1445 { 1446 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1447 struct hclgevf_mac_addr_node *mac_node; 1448 struct list_head *list; 1449 1450 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1451 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1452 1453 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1454 1455 /* if the mac addr is already in the mac list, no need to add a new 1456 * one into it, just check the mac addr state, convert it to a new 1457 * new state, or just remove it, or do nothing. 1458 */ 1459 mac_node = hclgevf_find_mac_node(list, addr); 1460 if (mac_node) { 1461 hclgevf_update_mac_node(mac_node, state); 1462 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1463 return 0; 1464 } 1465 /* if this address is never added, unnecessary to delete */ 1466 if (state == HCLGEVF_MAC_TO_DEL) { 1467 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1468 return -ENOENT; 1469 } 1470 1471 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 1472 if (!mac_node) { 1473 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1474 return -ENOMEM; 1475 } 1476 1477 mac_node->state = state; 1478 ether_addr_copy(mac_node->mac_addr, addr); 1479 list_add_tail(&mac_node->node, list); 1480 1481 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1482 return 0; 1483 } 1484 1485 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1486 const unsigned char *addr) 1487 { 1488 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1489 HCLGEVF_MAC_ADDR_UC, addr); 1490 } 1491 1492 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1493 const unsigned char *addr) 1494 { 1495 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1496 HCLGEVF_MAC_ADDR_UC, addr); 1497 } 1498 1499 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1500 const unsigned char *addr) 1501 { 1502 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1503 HCLGEVF_MAC_ADDR_MC, addr); 1504 } 1505 1506 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1507 const unsigned char *addr) 1508 { 1509 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1510 HCLGEVF_MAC_ADDR_MC, addr); 1511 } 1512 1513 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, 1514 struct hclgevf_mac_addr_node *mac_node, 1515 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1516 { 1517 struct hclge_vf_to_pf_msg send_msg; 1518 u8 code, subcode; 1519 1520 if (mac_type == HCLGEVF_MAC_ADDR_UC) { 1521 code = HCLGE_MBX_SET_UNICAST; 1522 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1523 subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; 1524 else 1525 subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; 1526 } else { 1527 code = HCLGE_MBX_SET_MULTICAST; 1528 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1529 subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; 1530 else 1531 subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; 1532 } 1533 1534 hclgevf_build_send_msg(&send_msg, code, subcode); 1535 ether_addr_copy(send_msg.data, mac_node->mac_addr); 1536 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1537 } 1538 1539 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, 1540 struct list_head *list, 1541 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1542 { 1543 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 1544 struct hclgevf_mac_addr_node *mac_node, *tmp; 1545 int ret; 1546 1547 list_for_each_entry_safe(mac_node, tmp, list, node) { 1548 ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); 1549 if (ret) { 1550 hnae3_format_mac_addr(format_mac_addr, 1551 mac_node->mac_addr); 1552 dev_err(&hdev->pdev->dev, 1553 "failed to configure mac %s, state = %d, ret = %d\n", 1554 format_mac_addr, mac_node->state, ret); 1555 return; 1556 } 1557 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1558 mac_node->state = HCLGEVF_MAC_ACTIVE; 1559 } else { 1560 list_del(&mac_node->node); 1561 kfree(mac_node); 1562 } 1563 } 1564 } 1565 1566 static void hclgevf_sync_from_add_list(struct list_head *add_list, 1567 struct list_head *mac_list) 1568 { 1569 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1570 1571 list_for_each_entry_safe(mac_node, tmp, add_list, node) { 1572 /* if the mac address from tmp_add_list is not in the 1573 * uc/mc_mac_list, it means have received a TO_DEL request 1574 * during the time window of sending mac config request to PF 1575 * If mac_node state is ACTIVE, then change its state to TO_DEL, 1576 * then it will be removed at next time. If is TO_ADD, it means 1577 * send TO_ADD request failed, so just remove the mac node. 1578 */ 1579 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1580 if (new_node) { 1581 hclgevf_update_mac_node(new_node, mac_node->state); 1582 list_del(&mac_node->node); 1583 kfree(mac_node); 1584 } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { 1585 mac_node->state = HCLGEVF_MAC_TO_DEL; 1586 list_move_tail(&mac_node->node, mac_list); 1587 } else { 1588 list_del(&mac_node->node); 1589 kfree(mac_node); 1590 } 1591 } 1592 } 1593 1594 static void hclgevf_sync_from_del_list(struct list_head *del_list, 1595 struct list_head *mac_list) 1596 { 1597 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1598 1599 list_for_each_entry_safe(mac_node, tmp, del_list, node) { 1600 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1601 if (new_node) { 1602 /* If the mac addr is exist in the mac list, it means 1603 * received a new request TO_ADD during the time window 1604 * of sending mac addr configurrequest to PF, so just 1605 * change the mac state to ACTIVE. 1606 */ 1607 new_node->state = HCLGEVF_MAC_ACTIVE; 1608 list_del(&mac_node->node); 1609 kfree(mac_node); 1610 } else { 1611 list_move_tail(&mac_node->node, mac_list); 1612 } 1613 } 1614 } 1615 1616 static void hclgevf_clear_list(struct list_head *list) 1617 { 1618 struct hclgevf_mac_addr_node *mac_node, *tmp; 1619 1620 list_for_each_entry_safe(mac_node, tmp, list, node) { 1621 list_del(&mac_node->node); 1622 kfree(mac_node); 1623 } 1624 } 1625 1626 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, 1627 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1628 { 1629 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1630 struct list_head tmp_add_list, tmp_del_list; 1631 struct list_head *list; 1632 1633 INIT_LIST_HEAD(&tmp_add_list); 1634 INIT_LIST_HEAD(&tmp_del_list); 1635 1636 /* move the mac addr to the tmp_add_list and tmp_del_list, then 1637 * we can add/delete these mac addr outside the spin lock 1638 */ 1639 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1640 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1641 1642 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1643 1644 list_for_each_entry_safe(mac_node, tmp, list, node) { 1645 switch (mac_node->state) { 1646 case HCLGEVF_MAC_TO_DEL: 1647 list_move_tail(&mac_node->node, &tmp_del_list); 1648 break; 1649 case HCLGEVF_MAC_TO_ADD: 1650 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 1651 if (!new_node) 1652 goto stop_traverse; 1653 1654 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 1655 new_node->state = mac_node->state; 1656 list_add_tail(&new_node->node, &tmp_add_list); 1657 break; 1658 default: 1659 break; 1660 } 1661 } 1662 1663 stop_traverse: 1664 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1665 1666 /* delete first, in order to get max mac table space for adding */ 1667 hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); 1668 hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); 1669 1670 /* if some mac addresses were added/deleted fail, move back to the 1671 * mac_list, and retry at next time. 1672 */ 1673 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1674 1675 hclgevf_sync_from_del_list(&tmp_del_list, list); 1676 hclgevf_sync_from_add_list(&tmp_add_list, list); 1677 1678 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1679 } 1680 1681 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) 1682 { 1683 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); 1684 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); 1685 } 1686 1687 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) 1688 { 1689 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1690 1691 hclgevf_clear_list(&hdev->mac_table.uc_mac_list); 1692 hclgevf_clear_list(&hdev->mac_table.mc_mac_list); 1693 1694 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1695 } 1696 1697 static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 1698 { 1699 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1700 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 1701 struct hclge_vf_to_pf_msg send_msg; 1702 1703 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) 1704 return -EOPNOTSUPP; 1705 1706 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1707 HCLGE_MBX_ENABLE_VLAN_FILTER); 1708 send_msg.data[0] = enable ? 1 : 0; 1709 1710 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1711 } 1712 1713 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1714 __be16 proto, u16 vlan_id, 1715 bool is_kill) 1716 { 1717 #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 1718 #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 1719 #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 1720 1721 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1722 struct hclge_vf_to_pf_msg send_msg; 1723 int ret; 1724 1725 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1726 return -EINVAL; 1727 1728 if (proto != htons(ETH_P_8021Q)) 1729 return -EPROTONOSUPPORT; 1730 1731 /* When device is resetting or reset failed, firmware is unable to 1732 * handle mailbox. Just record the vlan id, and remove it after 1733 * reset finished. 1734 */ 1735 if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 1736 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { 1737 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1738 return -EBUSY; 1739 } 1740 1741 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1742 HCLGE_MBX_VLAN_FILTER); 1743 send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; 1744 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, 1745 sizeof(vlan_id)); 1746 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, 1747 sizeof(proto)); 1748 /* when remove hw vlan filter failed, record the vlan id, 1749 * and try to remove it from hw later, to be consistence 1750 * with stack. 1751 */ 1752 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1753 if (is_kill && ret) 1754 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1755 1756 return ret; 1757 } 1758 1759 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1760 { 1761 #define HCLGEVF_MAX_SYNC_COUNT 60 1762 struct hnae3_handle *handle = &hdev->nic; 1763 int ret, sync_cnt = 0; 1764 u16 vlan_id; 1765 1766 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1767 while (vlan_id != VLAN_N_VID) { 1768 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1769 vlan_id, true); 1770 if (ret) 1771 return; 1772 1773 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1774 sync_cnt++; 1775 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1776 return; 1777 1778 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1779 } 1780 } 1781 1782 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1783 { 1784 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1785 struct hclge_vf_to_pf_msg send_msg; 1786 1787 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1788 HCLGE_MBX_VLAN_RX_OFF_CFG); 1789 send_msg.data[0] = enable ? 1 : 0; 1790 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1791 } 1792 1793 static int hclgevf_reset_tqp(struct hnae3_handle *handle) 1794 { 1795 #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U 1796 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1797 struct hclge_vf_to_pf_msg send_msg; 1798 u8 return_status = 0; 1799 int ret; 1800 u16 i; 1801 1802 /* disable vf queue before send queue reset msg to PF */ 1803 ret = hclgevf_tqp_enable(handle, false); 1804 if (ret) { 1805 dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n", 1806 ret); 1807 return ret; 1808 } 1809 1810 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1811 1812 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status, 1813 sizeof(return_status)); 1814 if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE) 1815 return ret; 1816 1817 for (i = 1; i < handle->kinfo.num_tqps; i++) { 1818 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1819 memcpy(send_msg.data, &i, sizeof(i)); 1820 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1821 if (ret) 1822 return ret; 1823 } 1824 1825 return 0; 1826 } 1827 1828 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1829 { 1830 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1831 struct hclge_vf_to_pf_msg send_msg; 1832 1833 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1834 memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); 1835 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1836 } 1837 1838 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1839 enum hnae3_reset_notify_type type) 1840 { 1841 struct hnae3_client *client = hdev->nic_client; 1842 struct hnae3_handle *handle = &hdev->nic; 1843 int ret; 1844 1845 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1846 !client) 1847 return 0; 1848 1849 if (!client->ops->reset_notify) 1850 return -EOPNOTSUPP; 1851 1852 ret = client->ops->reset_notify(handle, type); 1853 if (ret) 1854 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1855 type, ret); 1856 1857 return ret; 1858 } 1859 1860 static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev, 1861 enum hnae3_reset_notify_type type) 1862 { 1863 struct hnae3_client *client = hdev->roce_client; 1864 struct hnae3_handle *handle = &hdev->roce; 1865 int ret; 1866 1867 if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client) 1868 return 0; 1869 1870 if (!client->ops->reset_notify) 1871 return -EOPNOTSUPP; 1872 1873 ret = client->ops->reset_notify(handle, type); 1874 if (ret) 1875 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", 1876 type, ret); 1877 return ret; 1878 } 1879 1880 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1881 { 1882 #define HCLGEVF_RESET_WAIT_US 20000 1883 #define HCLGEVF_RESET_WAIT_CNT 2000 1884 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1885 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1886 1887 u32 val; 1888 int ret; 1889 1890 if (hdev->reset_type == HNAE3_VF_RESET) 1891 ret = readl_poll_timeout(hdev->hw.hw.io_base + 1892 HCLGEVF_VF_RST_ING, val, 1893 !(val & HCLGEVF_VF_RST_ING_BIT), 1894 HCLGEVF_RESET_WAIT_US, 1895 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1896 else 1897 ret = readl_poll_timeout(hdev->hw.hw.io_base + 1898 HCLGEVF_RST_ING, val, 1899 !(val & HCLGEVF_RST_ING_BITS), 1900 HCLGEVF_RESET_WAIT_US, 1901 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1902 1903 /* hardware completion status should be available by this time */ 1904 if (ret) { 1905 dev_err(&hdev->pdev->dev, 1906 "couldn't get reset done status from h/w, timeout!\n"); 1907 return ret; 1908 } 1909 1910 /* we will wait a bit more to let reset of the stack to complete. This 1911 * might happen in case reset assertion was made by PF. Yes, this also 1912 * means we might end up waiting bit more even for VF reset. 1913 */ 1914 msleep(5000); 1915 1916 return 0; 1917 } 1918 1919 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 1920 { 1921 u32 reg_val; 1922 1923 reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); 1924 if (enable) 1925 reg_val |= HCLGEVF_NIC_SW_RST_RDY; 1926 else 1927 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 1928 1929 hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 1930 reg_val); 1931 } 1932 1933 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1934 { 1935 int ret; 1936 1937 /* uninitialize the nic client */ 1938 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1939 if (ret) 1940 return ret; 1941 1942 /* re-initialize the hclge device */ 1943 ret = hclgevf_reset_hdev(hdev); 1944 if (ret) { 1945 dev_err(&hdev->pdev->dev, 1946 "hclge device re-init failed, VF is disabled!\n"); 1947 return ret; 1948 } 1949 1950 /* bring up the nic client again */ 1951 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1952 if (ret) 1953 return ret; 1954 1955 /* clear handshake status with IMP */ 1956 hclgevf_reset_handshake(hdev, false); 1957 1958 /* bring up the nic to enable TX/RX again */ 1959 return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1960 } 1961 1962 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1963 { 1964 #define HCLGEVF_RESET_SYNC_TIME 100 1965 1966 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1967 struct hclge_vf_to_pf_msg send_msg; 1968 int ret; 1969 1970 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1971 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1972 if (ret) { 1973 dev_err(&hdev->pdev->dev, 1974 "failed to assert VF reset, ret = %d\n", ret); 1975 return ret; 1976 } 1977 hdev->rst_stats.vf_func_rst_cnt++; 1978 } 1979 1980 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 1981 /* inform hardware that preparatory work is done */ 1982 msleep(HCLGEVF_RESET_SYNC_TIME); 1983 hclgevf_reset_handshake(hdev, true); 1984 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n", 1985 hdev->reset_type); 1986 1987 return 0; 1988 } 1989 1990 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 1991 { 1992 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 1993 hdev->rst_stats.vf_func_rst_cnt); 1994 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1995 hdev->rst_stats.flr_rst_cnt); 1996 dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 1997 hdev->rst_stats.vf_rst_cnt); 1998 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1999 hdev->rst_stats.rst_done_cnt); 2000 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 2001 hdev->rst_stats.hw_rst_done_cnt); 2002 dev_info(&hdev->pdev->dev, "reset count: %u\n", 2003 hdev->rst_stats.rst_cnt); 2004 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 2005 hdev->rst_stats.rst_fail_cnt); 2006 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 2007 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 2008 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 2009 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG)); 2010 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 2011 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG)); 2012 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 2013 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 2014 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 2015 } 2016 2017 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 2018 { 2019 /* recover handshake status with IMP when reset fail */ 2020 hclgevf_reset_handshake(hdev, true); 2021 hdev->rst_stats.rst_fail_cnt++; 2022 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 2023 hdev->rst_stats.rst_fail_cnt); 2024 2025 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 2026 set_bit(hdev->reset_type, &hdev->reset_pending); 2027 2028 if (hclgevf_is_reset_pending(hdev)) { 2029 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2030 hclgevf_reset_task_schedule(hdev); 2031 } else { 2032 set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2033 hclgevf_dump_rst_info(hdev); 2034 } 2035 } 2036 2037 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 2038 { 2039 int ret; 2040 2041 hdev->rst_stats.rst_cnt++; 2042 2043 /* perform reset of the stack & ae device for a client */ 2044 ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); 2045 if (ret) 2046 return ret; 2047 2048 rtnl_lock(); 2049 /* bring down the nic to stop any ongoing TX/RX */ 2050 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 2051 rtnl_unlock(); 2052 if (ret) 2053 return ret; 2054 2055 return hclgevf_reset_prepare_wait(hdev); 2056 } 2057 2058 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 2059 { 2060 int ret; 2061 2062 hdev->rst_stats.hw_rst_done_cnt++; 2063 ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); 2064 if (ret) 2065 return ret; 2066 2067 rtnl_lock(); 2068 /* now, re-initialize the nic client and ae device */ 2069 ret = hclgevf_reset_stack(hdev); 2070 rtnl_unlock(); 2071 if (ret) { 2072 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 2073 return ret; 2074 } 2075 2076 ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT); 2077 /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1 2078 * times 2079 */ 2080 if (ret && 2081 hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1) 2082 return ret; 2083 2084 ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT); 2085 if (ret) 2086 return ret; 2087 2088 hdev->last_reset_time = jiffies; 2089 hdev->rst_stats.rst_done_cnt++; 2090 hdev->rst_stats.rst_fail_cnt = 0; 2091 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2092 2093 return 0; 2094 } 2095 2096 static void hclgevf_reset(struct hclgevf_dev *hdev) 2097 { 2098 if (hclgevf_reset_prepare(hdev)) 2099 goto err_reset; 2100 2101 /* check if VF could successfully fetch the hardware reset completion 2102 * status from the hardware 2103 */ 2104 if (hclgevf_reset_wait(hdev)) { 2105 /* can't do much in this situation, will disable VF */ 2106 dev_err(&hdev->pdev->dev, 2107 "failed to fetch H/W reset completion status\n"); 2108 goto err_reset; 2109 } 2110 2111 if (hclgevf_reset_rebuild(hdev)) 2112 goto err_reset; 2113 2114 return; 2115 2116 err_reset: 2117 hclgevf_reset_err_handle(hdev); 2118 } 2119 2120 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 2121 unsigned long *addr) 2122 { 2123 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 2124 2125 /* return the highest priority reset level amongst all */ 2126 if (test_bit(HNAE3_VF_RESET, addr)) { 2127 rst_level = HNAE3_VF_RESET; 2128 clear_bit(HNAE3_VF_RESET, addr); 2129 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 2130 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2131 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 2132 rst_level = HNAE3_VF_FULL_RESET; 2133 clear_bit(HNAE3_VF_FULL_RESET, addr); 2134 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2135 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 2136 rst_level = HNAE3_VF_PF_FUNC_RESET; 2137 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 2138 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2139 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 2140 rst_level = HNAE3_VF_FUNC_RESET; 2141 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2142 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 2143 rst_level = HNAE3_FLR_RESET; 2144 clear_bit(HNAE3_FLR_RESET, addr); 2145 } 2146 2147 return rst_level; 2148 } 2149 2150 static void hclgevf_reset_event(struct pci_dev *pdev, 2151 struct hnae3_handle *handle) 2152 { 2153 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2154 struct hclgevf_dev *hdev = ae_dev->priv; 2155 2156 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 2157 2158 if (hdev->default_reset_request) 2159 hdev->reset_level = 2160 hclgevf_get_reset_level(hdev, 2161 &hdev->default_reset_request); 2162 else 2163 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2164 2165 /* reset of this VF requested */ 2166 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 2167 hclgevf_reset_task_schedule(hdev); 2168 2169 hdev->last_reset_time = jiffies; 2170 } 2171 2172 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 2173 enum hnae3_reset_type rst_type) 2174 { 2175 struct hclgevf_dev *hdev = ae_dev->priv; 2176 2177 set_bit(rst_type, &hdev->default_reset_request); 2178 } 2179 2180 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 2181 { 2182 writel(en ? 1 : 0, vector->addr); 2183 } 2184 2185 static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev, 2186 enum hnae3_reset_type rst_type) 2187 { 2188 #define HCLGEVF_RESET_RETRY_WAIT_MS 500 2189 #define HCLGEVF_RESET_RETRY_CNT 5 2190 2191 struct hclgevf_dev *hdev = ae_dev->priv; 2192 int retry_cnt = 0; 2193 int ret; 2194 2195 while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) { 2196 down(&hdev->reset_sem); 2197 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2198 hdev->reset_type = rst_type; 2199 ret = hclgevf_reset_prepare(hdev); 2200 if (!ret && !hdev->reset_pending) 2201 break; 2202 2203 dev_err(&hdev->pdev->dev, 2204 "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n", 2205 ret, hdev->reset_pending, retry_cnt); 2206 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2207 up(&hdev->reset_sem); 2208 msleep(HCLGEVF_RESET_RETRY_WAIT_MS); 2209 } 2210 2211 /* disable misc vector before reset done */ 2212 hclgevf_enable_vector(&hdev->misc_vector, false); 2213 2214 if (hdev->reset_type == HNAE3_FLR_RESET) 2215 hdev->rst_stats.flr_rst_cnt++; 2216 } 2217 2218 static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev) 2219 { 2220 struct hclgevf_dev *hdev = ae_dev->priv; 2221 int ret; 2222 2223 hclgevf_enable_vector(&hdev->misc_vector, true); 2224 2225 ret = hclgevf_reset_rebuild(hdev); 2226 if (ret) 2227 dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 2228 ret); 2229 2230 hdev->reset_type = HNAE3_NONE_RESET; 2231 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2232 up(&hdev->reset_sem); 2233 } 2234 2235 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 2236 { 2237 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2238 2239 return hdev->fw_version; 2240 } 2241 2242 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 2243 { 2244 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 2245 2246 vector->vector_irq = pci_irq_vector(hdev->pdev, 2247 HCLGEVF_MISC_VECTOR_NUM); 2248 vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 2249 /* vector status always valid for Vector 0 */ 2250 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 2251 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 2252 2253 hdev->num_msi_left -= 1; 2254 hdev->num_msi_used += 1; 2255 } 2256 2257 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 2258 { 2259 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2260 test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) && 2261 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 2262 &hdev->state)) 2263 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2264 } 2265 2266 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 2267 { 2268 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2269 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 2270 &hdev->state)) 2271 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2272 } 2273 2274 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 2275 unsigned long delay) 2276 { 2277 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2278 !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2279 mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 2280 } 2281 2282 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 2283 { 2284 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 2285 2286 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 2287 return; 2288 2289 down(&hdev->reset_sem); 2290 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2291 2292 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 2293 &hdev->reset_state)) { 2294 /* PF has intimated that it is about to reset the hardware. 2295 * We now have to poll & check if hardware has actually 2296 * completed the reset sequence. On hardware reset completion, 2297 * VF needs to reset the client and ae device. 2298 */ 2299 hdev->reset_attempts = 0; 2300 2301 hdev->last_reset_time = jiffies; 2302 hdev->reset_type = 2303 hclgevf_get_reset_level(hdev, &hdev->reset_pending); 2304 if (hdev->reset_type != HNAE3_NONE_RESET) 2305 hclgevf_reset(hdev); 2306 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 2307 &hdev->reset_state)) { 2308 /* we could be here when either of below happens: 2309 * 1. reset was initiated due to watchdog timeout caused by 2310 * a. IMP was earlier reset and our TX got choked down and 2311 * which resulted in watchdog reacting and inducing VF 2312 * reset. This also means our cmdq would be unreliable. 2313 * b. problem in TX due to other lower layer(example link 2314 * layer not functioning properly etc.) 2315 * 2. VF reset might have been initiated due to some config 2316 * change. 2317 * 2318 * NOTE: Theres no clear way to detect above cases than to react 2319 * to the response of PF for this reset request. PF will ack the 2320 * 1b and 2. cases but we will not get any intimation about 1a 2321 * from PF as cmdq would be in unreliable state i.e. mailbox 2322 * communication between PF and VF would be broken. 2323 * 2324 * if we are never geting into pending state it means either: 2325 * 1. PF is not receiving our request which could be due to IMP 2326 * reset 2327 * 2. PF is screwed 2328 * We cannot do much for 2. but to check first we can try reset 2329 * our PCIe + stack and see if it alleviates the problem. 2330 */ 2331 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 2332 /* prepare for full reset of stack + pcie interface */ 2333 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 2334 2335 /* "defer" schedule the reset task again */ 2336 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2337 } else { 2338 hdev->reset_attempts++; 2339 2340 set_bit(hdev->reset_level, &hdev->reset_pending); 2341 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2342 } 2343 hclgevf_reset_task_schedule(hdev); 2344 } 2345 2346 hdev->reset_type = HNAE3_NONE_RESET; 2347 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2348 up(&hdev->reset_sem); 2349 } 2350 2351 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 2352 { 2353 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2354 return; 2355 2356 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 2357 return; 2358 2359 hclgevf_mbx_async_handler(hdev); 2360 2361 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2362 } 2363 2364 static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 2365 { 2366 struct hclge_vf_to_pf_msg send_msg; 2367 int ret; 2368 2369 if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) 2370 return; 2371 2372 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 2373 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2374 if (ret) 2375 dev_err(&hdev->pdev->dev, 2376 "VF sends keep alive cmd failed(=%d)\n", ret); 2377 } 2378 2379 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 2380 { 2381 unsigned long delta = round_jiffies_relative(HZ); 2382 struct hnae3_handle *handle = &hdev->nic; 2383 2384 if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2385 return; 2386 2387 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 2388 delta = jiffies - hdev->last_serv_processed; 2389 2390 if (delta < round_jiffies_relative(HZ)) { 2391 delta = round_jiffies_relative(HZ) - delta; 2392 goto out; 2393 } 2394 } 2395 2396 hdev->serv_processed_cnt++; 2397 if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 2398 hclgevf_keep_alive(hdev); 2399 2400 if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 2401 hdev->last_serv_processed = jiffies; 2402 goto out; 2403 } 2404 2405 if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 2406 hclgevf_tqps_update_stats(handle); 2407 2408 /* VF does not need to request link status when this bit is set, because 2409 * PF will push its link status to VFs when link status changed. 2410 */ 2411 if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state)) 2412 hclgevf_request_link_info(hdev); 2413 2414 hclgevf_update_link_mode(hdev); 2415 2416 hclgevf_sync_vlan_filter(hdev); 2417 2418 hclgevf_sync_mac_table(hdev); 2419 2420 hclgevf_sync_promisc_mode(hdev); 2421 2422 hdev->last_serv_processed = jiffies; 2423 2424 out: 2425 hclgevf_task_schedule(hdev, delta); 2426 } 2427 2428 static void hclgevf_service_task(struct work_struct *work) 2429 { 2430 struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 2431 service_task.work); 2432 2433 hclgevf_reset_service_task(hdev); 2434 hclgevf_mailbox_service_task(hdev); 2435 hclgevf_periodic_service_task(hdev); 2436 2437 /* Handle reset and mbx again in case periodical task delays the 2438 * handling by calling hclgevf_task_schedule() in 2439 * hclgevf_periodic_service_task() 2440 */ 2441 hclgevf_reset_service_task(hdev); 2442 hclgevf_mailbox_service_task(hdev); 2443 } 2444 2445 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 2446 { 2447 hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr); 2448 } 2449 2450 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 2451 u32 *clearval) 2452 { 2453 u32 val, cmdq_stat_reg, rst_ing_reg; 2454 2455 /* fetch the events from their corresponding regs */ 2456 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 2457 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG); 2458 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 2459 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2460 dev_info(&hdev->pdev->dev, 2461 "receive reset interrupt 0x%x!\n", rst_ing_reg); 2462 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 2463 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2464 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 2465 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 2466 hdev->rst_stats.vf_rst_cnt++; 2467 /* set up VF hardware reset status, its PF will clear 2468 * this status when PF has initialized done. 2469 */ 2470 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 2471 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 2472 val | HCLGEVF_VF_RST_ING_BIT); 2473 return HCLGEVF_VECTOR0_EVENT_RST; 2474 } 2475 2476 /* check for vector0 mailbox(=CMDQ RX) event source */ 2477 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 2478 /* for revision 0x21, clearing interrupt is writing bit 0 2479 * to the clear register, writing bit 1 means to keep the 2480 * old value. 2481 * for revision 0x20, the clear register is a read & write 2482 * register, so we should just write 0 to the bit we are 2483 * handling, and keep other bits as cmdq_stat_reg. 2484 */ 2485 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) 2486 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2487 else 2488 *clearval = cmdq_stat_reg & 2489 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2490 2491 return HCLGEVF_VECTOR0_EVENT_MBX; 2492 } 2493 2494 /* print other vector0 event source */ 2495 dev_info(&hdev->pdev->dev, 2496 "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2497 cmdq_stat_reg); 2498 2499 return HCLGEVF_VECTOR0_EVENT_OTHER; 2500 } 2501 2502 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2503 { 2504 enum hclgevf_evt_cause event_cause; 2505 struct hclgevf_dev *hdev = data; 2506 u32 clearval; 2507 2508 hclgevf_enable_vector(&hdev->misc_vector, false); 2509 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2510 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) 2511 hclgevf_clear_event_cause(hdev, clearval); 2512 2513 switch (event_cause) { 2514 case HCLGEVF_VECTOR0_EVENT_RST: 2515 hclgevf_reset_task_schedule(hdev); 2516 break; 2517 case HCLGEVF_VECTOR0_EVENT_MBX: 2518 hclgevf_mbx_handler(hdev); 2519 break; 2520 default: 2521 break; 2522 } 2523 2524 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) 2525 hclgevf_enable_vector(&hdev->misc_vector, true); 2526 2527 return IRQ_HANDLED; 2528 } 2529 2530 static int hclgevf_configure(struct hclgevf_dev *hdev) 2531 { 2532 int ret; 2533 2534 hdev->gro_en = true; 2535 2536 ret = hclgevf_get_basic_info(hdev); 2537 if (ret) 2538 return ret; 2539 2540 /* get current port based vlan state from PF */ 2541 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2542 if (ret) 2543 return ret; 2544 2545 /* get queue configuration from PF */ 2546 ret = hclgevf_get_queue_info(hdev); 2547 if (ret) 2548 return ret; 2549 2550 /* get queue depth info from PF */ 2551 ret = hclgevf_get_queue_depth(hdev); 2552 if (ret) 2553 return ret; 2554 2555 return hclgevf_get_pf_media_type(hdev); 2556 } 2557 2558 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 2559 { 2560 struct pci_dev *pdev = ae_dev->pdev; 2561 struct hclgevf_dev *hdev; 2562 2563 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 2564 if (!hdev) 2565 return -ENOMEM; 2566 2567 hdev->pdev = pdev; 2568 hdev->ae_dev = ae_dev; 2569 ae_dev->priv = hdev; 2570 2571 return 0; 2572 } 2573 2574 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2575 { 2576 struct hnae3_handle *roce = &hdev->roce; 2577 struct hnae3_handle *nic = &hdev->nic; 2578 2579 roce->rinfo.num_vectors = hdev->num_roce_msix; 2580 2581 if (hdev->num_msi_left < roce->rinfo.num_vectors || 2582 hdev->num_msi_left == 0) 2583 return -EINVAL; 2584 2585 roce->rinfo.base_vector = hdev->roce_base_msix_offset; 2586 2587 roce->rinfo.netdev = nic->kinfo.netdev; 2588 roce->rinfo.roce_io_base = hdev->hw.hw.io_base; 2589 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; 2590 2591 roce->pdev = nic->pdev; 2592 roce->ae_algo = nic->ae_algo; 2593 roce->numa_node_mask = nic->numa_node_mask; 2594 2595 return 0; 2596 } 2597 2598 static int hclgevf_config_gro(struct hclgevf_dev *hdev) 2599 { 2600 struct hclgevf_cfg_gro_status_cmd *req; 2601 struct hclge_desc desc; 2602 int ret; 2603 2604 if (!hnae3_dev_gro_supported(hdev)) 2605 return 0; 2606 2607 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2608 false); 2609 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2610 2611 req->gro_en = hdev->gro_en ? 1 : 0; 2612 2613 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2614 if (ret) 2615 dev_err(&hdev->pdev->dev, 2616 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2617 2618 return ret; 2619 } 2620 2621 static int hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) 2622 { 2623 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size; 2624 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2625 struct hclgevf_rss_tuple_cfg *tuple_sets; 2626 u32 i; 2627 2628 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 2629 rss_cfg->rss_size = hdev->nic.kinfo.rss_size; 2630 tuple_sets = &rss_cfg->rss_tuple_sets; 2631 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2632 u8 *rss_ind_tbl; 2633 2634 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 2635 2636 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size, 2637 sizeof(*rss_ind_tbl), GFP_KERNEL); 2638 if (!rss_ind_tbl) 2639 return -ENOMEM; 2640 2641 rss_cfg->rss_indirection_tbl = rss_ind_tbl; 2642 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2643 HCLGEVF_RSS_KEY_SIZE); 2644 2645 tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2646 tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2647 tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2648 tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2649 tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2650 tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2651 tuple_sets->ipv6_sctp_en = 2652 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ? 2653 HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT : 2654 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2655 tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2656 } 2657 2658 /* Initialize RSS indirect table */ 2659 for (i = 0; i < rss_ind_tbl_size; i++) 2660 rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 2661 2662 return 0; 2663 } 2664 2665 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2666 { 2667 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2668 int ret; 2669 2670 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2671 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 2672 rss_cfg->rss_hash_key); 2673 if (ret) 2674 return ret; 2675 2676 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2677 if (ret) 2678 return ret; 2679 } 2680 2681 ret = hclgevf_set_rss_indir_table(hdev); 2682 if (ret) 2683 return ret; 2684 2685 return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); 2686 } 2687 2688 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2689 { 2690 struct hnae3_handle *nic = &hdev->nic; 2691 int ret; 2692 2693 ret = hclgevf_en_hw_strip_rxvtag(nic, true); 2694 if (ret) { 2695 dev_err(&hdev->pdev->dev, 2696 "failed to enable rx vlan offload, ret = %d\n", ret); 2697 return ret; 2698 } 2699 2700 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2701 false); 2702 } 2703 2704 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2705 { 2706 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2707 2708 unsigned long last = hdev->serv_processed_cnt; 2709 int i = 0; 2710 2711 while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2712 i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2713 last == hdev->serv_processed_cnt) 2714 usleep_range(1, 1); 2715 } 2716 2717 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2718 { 2719 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2720 2721 if (enable) { 2722 hclgevf_task_schedule(hdev, 0); 2723 } else { 2724 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2725 2726 /* flush memory to make sure DOWN is seen by service task */ 2727 smp_mb__before_atomic(); 2728 hclgevf_flush_link_update(hdev); 2729 } 2730 } 2731 2732 static int hclgevf_ae_start(struct hnae3_handle *handle) 2733 { 2734 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2735 2736 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2737 clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state); 2738 2739 hclgevf_reset_tqp_stats(handle); 2740 2741 hclgevf_request_link_info(hdev); 2742 2743 hclgevf_update_link_mode(hdev); 2744 2745 return 0; 2746 } 2747 2748 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2749 { 2750 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2751 2752 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2753 2754 if (hdev->reset_type != HNAE3_VF_RESET) 2755 hclgevf_reset_tqp(handle); 2756 2757 hclgevf_reset_tqp_stats(handle); 2758 hclgevf_update_link_status(hdev, 0); 2759 } 2760 2761 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2762 { 2763 #define HCLGEVF_STATE_ALIVE 1 2764 #define HCLGEVF_STATE_NOT_ALIVE 0 2765 2766 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2767 struct hclge_vf_to_pf_msg send_msg; 2768 2769 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2770 send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2771 HCLGEVF_STATE_NOT_ALIVE; 2772 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2773 } 2774 2775 static int hclgevf_client_start(struct hnae3_handle *handle) 2776 { 2777 return hclgevf_set_alive(handle, true); 2778 } 2779 2780 static void hclgevf_client_stop(struct hnae3_handle *handle) 2781 { 2782 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2783 int ret; 2784 2785 ret = hclgevf_set_alive(handle, false); 2786 if (ret) 2787 dev_warn(&hdev->pdev->dev, 2788 "%s failed %d\n", __func__, ret); 2789 } 2790 2791 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2792 { 2793 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2794 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2795 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2796 2797 INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 2798 2799 mutex_init(&hdev->mbx_resp.mbx_mutex); 2800 sema_init(&hdev->reset_sem, 1); 2801 2802 spin_lock_init(&hdev->mac_table.mac_list_lock); 2803 INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); 2804 INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); 2805 2806 /* bring the device down */ 2807 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2808 } 2809 2810 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2811 { 2812 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2813 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2814 2815 if (hdev->service_task.work.func) 2816 cancel_delayed_work_sync(&hdev->service_task); 2817 2818 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2819 } 2820 2821 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2822 { 2823 struct pci_dev *pdev = hdev->pdev; 2824 int vectors; 2825 int i; 2826 2827 if (hnae3_dev_roce_supported(hdev)) 2828 vectors = pci_alloc_irq_vectors(pdev, 2829 hdev->roce_base_msix_offset + 1, 2830 hdev->num_msi, 2831 PCI_IRQ_MSIX); 2832 else 2833 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2834 hdev->num_msi, 2835 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2836 2837 if (vectors < 0) { 2838 dev_err(&pdev->dev, 2839 "failed(%d) to allocate MSI/MSI-X vectors\n", 2840 vectors); 2841 return vectors; 2842 } 2843 if (vectors < hdev->num_msi) 2844 dev_warn(&hdev->pdev->dev, 2845 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2846 hdev->num_msi, vectors); 2847 2848 hdev->num_msi = vectors; 2849 hdev->num_msi_left = vectors; 2850 2851 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2852 sizeof(u16), GFP_KERNEL); 2853 if (!hdev->vector_status) { 2854 pci_free_irq_vectors(pdev); 2855 return -ENOMEM; 2856 } 2857 2858 for (i = 0; i < hdev->num_msi; i++) 2859 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2860 2861 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2862 sizeof(int), GFP_KERNEL); 2863 if (!hdev->vector_irq) { 2864 devm_kfree(&pdev->dev, hdev->vector_status); 2865 pci_free_irq_vectors(pdev); 2866 return -ENOMEM; 2867 } 2868 2869 return 0; 2870 } 2871 2872 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2873 { 2874 struct pci_dev *pdev = hdev->pdev; 2875 2876 devm_kfree(&pdev->dev, hdev->vector_status); 2877 devm_kfree(&pdev->dev, hdev->vector_irq); 2878 pci_free_irq_vectors(pdev); 2879 } 2880 2881 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2882 { 2883 int ret; 2884 2885 hclgevf_get_misc_vector(hdev); 2886 2887 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2888 HCLGEVF_NAME, pci_name(hdev->pdev)); 2889 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2890 0, hdev->misc_vector.name, hdev); 2891 if (ret) { 2892 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2893 hdev->misc_vector.vector_irq); 2894 return ret; 2895 } 2896 2897 hclgevf_clear_event_cause(hdev, 0); 2898 2899 /* enable misc. vector(vector 0) */ 2900 hclgevf_enable_vector(&hdev->misc_vector, true); 2901 2902 return ret; 2903 } 2904 2905 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2906 { 2907 /* disable misc vector(vector 0) */ 2908 hclgevf_enable_vector(&hdev->misc_vector, false); 2909 synchronize_irq(hdev->misc_vector.vector_irq); 2910 free_irq(hdev->misc_vector.vector_irq, hdev); 2911 hclgevf_free_vector(hdev, 0); 2912 } 2913 2914 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2915 { 2916 struct device *dev = &hdev->pdev->dev; 2917 2918 dev_info(dev, "VF info begin:\n"); 2919 2920 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2921 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2922 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2923 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2924 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2925 dev_info(dev, "PF media type of this VF: %u\n", 2926 hdev->hw.mac.media_type); 2927 2928 dev_info(dev, "VF info end.\n"); 2929 } 2930 2931 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2932 struct hnae3_client *client) 2933 { 2934 struct hclgevf_dev *hdev = ae_dev->priv; 2935 int rst_cnt = hdev->rst_stats.rst_cnt; 2936 int ret; 2937 2938 ret = client->ops->init_instance(&hdev->nic); 2939 if (ret) 2940 return ret; 2941 2942 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2943 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 2944 rst_cnt != hdev->rst_stats.rst_cnt) { 2945 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2946 2947 client->ops->uninit_instance(&hdev->nic, 0); 2948 return -EBUSY; 2949 } 2950 2951 hnae3_set_client_init_flag(client, ae_dev, 1); 2952 2953 if (netif_msg_drv(&hdev->nic)) 2954 hclgevf_info_show(hdev); 2955 2956 return 0; 2957 } 2958 2959 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2960 struct hnae3_client *client) 2961 { 2962 struct hclgevf_dev *hdev = ae_dev->priv; 2963 int ret; 2964 2965 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2966 !hdev->nic_client) 2967 return 0; 2968 2969 ret = hclgevf_init_roce_base_info(hdev); 2970 if (ret) 2971 return ret; 2972 2973 ret = client->ops->init_instance(&hdev->roce); 2974 if (ret) 2975 return ret; 2976 2977 set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2978 hnae3_set_client_init_flag(client, ae_dev, 1); 2979 2980 return 0; 2981 } 2982 2983 static int hclgevf_init_client_instance(struct hnae3_client *client, 2984 struct hnae3_ae_dev *ae_dev) 2985 { 2986 struct hclgevf_dev *hdev = ae_dev->priv; 2987 int ret; 2988 2989 switch (client->type) { 2990 case HNAE3_CLIENT_KNIC: 2991 hdev->nic_client = client; 2992 hdev->nic.client = client; 2993 2994 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2995 if (ret) 2996 goto clear_nic; 2997 2998 ret = hclgevf_init_roce_client_instance(ae_dev, 2999 hdev->roce_client); 3000 if (ret) 3001 goto clear_roce; 3002 3003 break; 3004 case HNAE3_CLIENT_ROCE: 3005 if (hnae3_dev_roce_supported(hdev)) { 3006 hdev->roce_client = client; 3007 hdev->roce.client = client; 3008 } 3009 3010 ret = hclgevf_init_roce_client_instance(ae_dev, client); 3011 if (ret) 3012 goto clear_roce; 3013 3014 break; 3015 default: 3016 return -EINVAL; 3017 } 3018 3019 return 0; 3020 3021 clear_nic: 3022 hdev->nic_client = NULL; 3023 hdev->nic.client = NULL; 3024 return ret; 3025 clear_roce: 3026 hdev->roce_client = NULL; 3027 hdev->roce.client = NULL; 3028 return ret; 3029 } 3030 3031 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 3032 struct hnae3_ae_dev *ae_dev) 3033 { 3034 struct hclgevf_dev *hdev = ae_dev->priv; 3035 3036 /* un-init roce, if it exists */ 3037 if (hdev->roce_client) { 3038 while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 3039 msleep(HCLGEVF_WAIT_RESET_DONE); 3040 clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 3041 3042 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 3043 hdev->roce_client = NULL; 3044 hdev->roce.client = NULL; 3045 } 3046 3047 /* un-init nic/unic, if this was not called by roce client */ 3048 if (client->ops->uninit_instance && hdev->nic_client && 3049 client->type != HNAE3_CLIENT_ROCE) { 3050 while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 3051 msleep(HCLGEVF_WAIT_RESET_DONE); 3052 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 3053 3054 client->ops->uninit_instance(&hdev->nic, 0); 3055 hdev->nic_client = NULL; 3056 hdev->nic.client = NULL; 3057 } 3058 } 3059 3060 static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev) 3061 { 3062 #define HCLGEVF_MEM_BAR 4 3063 3064 struct pci_dev *pdev = hdev->pdev; 3065 struct hclgevf_hw *hw = &hdev->hw; 3066 3067 /* for device does not have device memory, return directly */ 3068 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR))) 3069 return 0; 3070 3071 hw->hw.mem_base = 3072 devm_ioremap_wc(&pdev->dev, 3073 pci_resource_start(pdev, HCLGEVF_MEM_BAR), 3074 pci_resource_len(pdev, HCLGEVF_MEM_BAR)); 3075 if (!hw->hw.mem_base) { 3076 dev_err(&pdev->dev, "failed to map device memory\n"); 3077 return -EFAULT; 3078 } 3079 3080 return 0; 3081 } 3082 3083 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 3084 { 3085 struct pci_dev *pdev = hdev->pdev; 3086 struct hclgevf_hw *hw; 3087 int ret; 3088 3089 ret = pci_enable_device(pdev); 3090 if (ret) { 3091 dev_err(&pdev->dev, "failed to enable PCI device\n"); 3092 return ret; 3093 } 3094 3095 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3096 if (ret) { 3097 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 3098 goto err_disable_device; 3099 } 3100 3101 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 3102 if (ret) { 3103 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 3104 goto err_disable_device; 3105 } 3106 3107 pci_set_master(pdev); 3108 hw = &hdev->hw; 3109 hw->hw.io_base = pci_iomap(pdev, 2, 0); 3110 if (!hw->hw.io_base) { 3111 dev_err(&pdev->dev, "can't map configuration register space\n"); 3112 ret = -ENOMEM; 3113 goto err_clr_master; 3114 } 3115 3116 ret = hclgevf_dev_mem_map(hdev); 3117 if (ret) 3118 goto err_unmap_io_base; 3119 3120 return 0; 3121 3122 err_unmap_io_base: 3123 pci_iounmap(pdev, hdev->hw.hw.io_base); 3124 err_clr_master: 3125 pci_clear_master(pdev); 3126 pci_release_regions(pdev); 3127 err_disable_device: 3128 pci_disable_device(pdev); 3129 3130 return ret; 3131 } 3132 3133 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 3134 { 3135 struct pci_dev *pdev = hdev->pdev; 3136 3137 if (hdev->hw.hw.mem_base) 3138 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); 3139 3140 pci_iounmap(pdev, hdev->hw.hw.io_base); 3141 pci_clear_master(pdev); 3142 pci_release_regions(pdev); 3143 pci_disable_device(pdev); 3144 } 3145 3146 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 3147 { 3148 struct hclgevf_query_res_cmd *req; 3149 struct hclge_desc desc; 3150 int ret; 3151 3152 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 3153 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 3154 if (ret) { 3155 dev_err(&hdev->pdev->dev, 3156 "query vf resource failed, ret = %d.\n", ret); 3157 return ret; 3158 } 3159 3160 req = (struct hclgevf_query_res_cmd *)desc.data; 3161 3162 if (hnae3_dev_roce_supported(hdev)) { 3163 hdev->roce_base_msix_offset = 3164 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 3165 HCLGEVF_MSIX_OFT_ROCEE_M, 3166 HCLGEVF_MSIX_OFT_ROCEE_S); 3167 hdev->num_roce_msix = 3168 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 3169 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 3170 3171 /* nic's msix numbers is always equals to the roce's. */ 3172 hdev->num_nic_msix = hdev->num_roce_msix; 3173 3174 /* VF should have NIC vectors and Roce vectors, NIC vectors 3175 * are queued before Roce vectors. The offset is fixed to 64. 3176 */ 3177 hdev->num_msi = hdev->num_roce_msix + 3178 hdev->roce_base_msix_offset; 3179 } else { 3180 hdev->num_msi = 3181 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 3182 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 3183 3184 hdev->num_nic_msix = hdev->num_msi; 3185 } 3186 3187 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 3188 dev_err(&hdev->pdev->dev, 3189 "Just %u msi resources, not enough for vf(min:2).\n", 3190 hdev->num_nic_msix); 3191 return -EINVAL; 3192 } 3193 3194 return 0; 3195 } 3196 3197 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) 3198 { 3199 #define HCLGEVF_MAX_NON_TSO_BD_NUM 8U 3200 3201 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3202 3203 ae_dev->dev_specs.max_non_tso_bd_num = 3204 HCLGEVF_MAX_NON_TSO_BD_NUM; 3205 ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 3206 ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE; 3207 ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 3208 ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME; 3209 } 3210 3211 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, 3212 struct hclge_desc *desc) 3213 { 3214 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3215 struct hclgevf_dev_specs_0_cmd *req0; 3216 struct hclgevf_dev_specs_1_cmd *req1; 3217 3218 req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data; 3219 req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data; 3220 3221 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; 3222 ae_dev->dev_specs.rss_ind_tbl_size = 3223 le16_to_cpu(req0->rss_ind_tbl_size); 3224 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); 3225 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); 3226 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); 3227 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); 3228 } 3229 3230 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) 3231 { 3232 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; 3233 3234 if (!dev_specs->max_non_tso_bd_num) 3235 dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM; 3236 if (!dev_specs->rss_ind_tbl_size) 3237 dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 3238 if (!dev_specs->rss_key_size) 3239 dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE; 3240 if (!dev_specs->max_int_gl) 3241 dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 3242 if (!dev_specs->max_frm_size) 3243 dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME; 3244 } 3245 3246 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) 3247 { 3248 struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; 3249 int ret; 3250 int i; 3251 3252 /* set default specifications as devices lower than version V3 do not 3253 * support querying specifications from firmware. 3254 */ 3255 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 3256 hclgevf_set_default_dev_specs(hdev); 3257 return 0; 3258 } 3259 3260 for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 3261 hclgevf_cmd_setup_basic_desc(&desc[i], 3262 HCLGEVF_OPC_QUERY_DEV_SPECS, true); 3263 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 3264 } 3265 hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS, 3266 true); 3267 3268 ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM); 3269 if (ret) 3270 return ret; 3271 3272 hclgevf_parse_dev_specs(hdev, desc); 3273 hclgevf_check_dev_specs(hdev); 3274 3275 return 0; 3276 } 3277 3278 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 3279 { 3280 struct pci_dev *pdev = hdev->pdev; 3281 int ret = 0; 3282 3283 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 3284 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3285 hclgevf_misc_irq_uninit(hdev); 3286 hclgevf_uninit_msi(hdev); 3287 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3288 } 3289 3290 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3291 pci_set_master(pdev); 3292 ret = hclgevf_init_msi(hdev); 3293 if (ret) { 3294 dev_err(&pdev->dev, 3295 "failed(%d) to init MSI/MSI-X\n", ret); 3296 return ret; 3297 } 3298 3299 ret = hclgevf_misc_irq_init(hdev); 3300 if (ret) { 3301 hclgevf_uninit_msi(hdev); 3302 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 3303 ret); 3304 return ret; 3305 } 3306 3307 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3308 } 3309 3310 return ret; 3311 } 3312 3313 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) 3314 { 3315 struct hclge_vf_to_pf_msg send_msg; 3316 3317 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, 3318 HCLGE_MBX_VPORT_LIST_CLEAR); 3319 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3320 } 3321 3322 static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev) 3323 { 3324 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 3325 hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1); 3326 } 3327 3328 static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev) 3329 { 3330 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 3331 hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0); 3332 } 3333 3334 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 3335 { 3336 struct pci_dev *pdev = hdev->pdev; 3337 int ret; 3338 3339 ret = hclgevf_pci_reset(hdev); 3340 if (ret) { 3341 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 3342 return ret; 3343 } 3344 3345 hclgevf_arq_init(hdev); 3346 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, 3347 &hdev->fw_version, false, 3348 hdev->reset_pending); 3349 if (ret) { 3350 dev_err(&pdev->dev, "cmd failed %d\n", ret); 3351 return ret; 3352 } 3353 3354 ret = hclgevf_rss_init_hw(hdev); 3355 if (ret) { 3356 dev_err(&hdev->pdev->dev, 3357 "failed(%d) to initialize RSS\n", ret); 3358 return ret; 3359 } 3360 3361 ret = hclgevf_config_gro(hdev); 3362 if (ret) 3363 return ret; 3364 3365 ret = hclgevf_init_vlan_config(hdev); 3366 if (ret) { 3367 dev_err(&hdev->pdev->dev, 3368 "failed(%d) to initialize VLAN config\n", ret); 3369 return ret; 3370 } 3371 3372 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 3373 3374 hclgevf_init_rxd_adv_layout(hdev); 3375 3376 dev_info(&hdev->pdev->dev, "Reset done\n"); 3377 3378 return 0; 3379 } 3380 3381 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 3382 { 3383 struct pci_dev *pdev = hdev->pdev; 3384 int ret; 3385 3386 ret = hclgevf_pci_init(hdev); 3387 if (ret) 3388 return ret; 3389 3390 ret = hclgevf_devlink_init(hdev); 3391 if (ret) 3392 goto err_devlink_init; 3393 3394 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); 3395 if (ret) 3396 goto err_cmd_queue_init; 3397 3398 hclgevf_arq_init(hdev); 3399 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, 3400 &hdev->fw_version, false, 3401 hdev->reset_pending); 3402 if (ret) 3403 goto err_cmd_init; 3404 3405 /* Get vf resource */ 3406 ret = hclgevf_query_vf_resource(hdev); 3407 if (ret) 3408 goto err_cmd_init; 3409 3410 ret = hclgevf_query_dev_specs(hdev); 3411 if (ret) { 3412 dev_err(&pdev->dev, 3413 "failed to query dev specifications, ret = %d\n", ret); 3414 goto err_cmd_init; 3415 } 3416 3417 ret = hclgevf_init_msi(hdev); 3418 if (ret) { 3419 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 3420 goto err_cmd_init; 3421 } 3422 3423 hclgevf_state_init(hdev); 3424 hdev->reset_level = HNAE3_VF_FUNC_RESET; 3425 hdev->reset_type = HNAE3_NONE_RESET; 3426 3427 ret = hclgevf_misc_irq_init(hdev); 3428 if (ret) 3429 goto err_misc_irq_init; 3430 3431 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3432 3433 ret = hclgevf_configure(hdev); 3434 if (ret) { 3435 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 3436 goto err_config; 3437 } 3438 3439 ret = hclgevf_alloc_tqps(hdev); 3440 if (ret) { 3441 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 3442 goto err_config; 3443 } 3444 3445 ret = hclgevf_set_handle_info(hdev); 3446 if (ret) 3447 goto err_config; 3448 3449 ret = hclgevf_config_gro(hdev); 3450 if (ret) 3451 goto err_config; 3452 3453 /* Initialize RSS for this VF */ 3454 ret = hclgevf_rss_init_cfg(hdev); 3455 if (ret) { 3456 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); 3457 goto err_config; 3458 } 3459 3460 ret = hclgevf_rss_init_hw(hdev); 3461 if (ret) { 3462 dev_err(&hdev->pdev->dev, 3463 "failed(%d) to initialize RSS\n", ret); 3464 goto err_config; 3465 } 3466 3467 /* ensure vf tbl list as empty before init*/ 3468 ret = hclgevf_clear_vport_list(hdev); 3469 if (ret) { 3470 dev_err(&pdev->dev, 3471 "failed to clear tbl list configuration, ret = %d.\n", 3472 ret); 3473 goto err_config; 3474 } 3475 3476 ret = hclgevf_init_vlan_config(hdev); 3477 if (ret) { 3478 dev_err(&hdev->pdev->dev, 3479 "failed(%d) to initialize VLAN config\n", ret); 3480 goto err_config; 3481 } 3482 3483 hclgevf_init_rxd_adv_layout(hdev); 3484 3485 set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state); 3486 3487 hdev->last_reset_time = jiffies; 3488 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 3489 HCLGEVF_DRIVER_NAME); 3490 3491 hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 3492 3493 return 0; 3494 3495 err_config: 3496 hclgevf_misc_irq_uninit(hdev); 3497 err_misc_irq_init: 3498 hclgevf_state_uninit(hdev); 3499 hclgevf_uninit_msi(hdev); 3500 err_cmd_init: 3501 hclge_comm_cmd_uninit(hdev->ae_dev, false, &hdev->hw.hw); 3502 err_cmd_queue_init: 3503 hclgevf_devlink_uninit(hdev); 3504 err_devlink_init: 3505 hclgevf_pci_uninit(hdev); 3506 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3507 return ret; 3508 } 3509 3510 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 3511 { 3512 struct hclge_vf_to_pf_msg send_msg; 3513 3514 hclgevf_state_uninit(hdev); 3515 hclgevf_uninit_rxd_adv_layout(hdev); 3516 3517 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 3518 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3519 3520 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3521 hclgevf_misc_irq_uninit(hdev); 3522 hclgevf_uninit_msi(hdev); 3523 } 3524 3525 hclge_comm_cmd_uninit(hdev->ae_dev, false, &hdev->hw.hw); 3526 hclgevf_devlink_uninit(hdev); 3527 hclgevf_pci_uninit(hdev); 3528 hclgevf_uninit_mac_list(hdev); 3529 } 3530 3531 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 3532 { 3533 struct pci_dev *pdev = ae_dev->pdev; 3534 int ret; 3535 3536 ret = hclgevf_alloc_hdev(ae_dev); 3537 if (ret) { 3538 dev_err(&pdev->dev, "hclge device allocation failed\n"); 3539 return ret; 3540 } 3541 3542 ret = hclgevf_init_hdev(ae_dev->priv); 3543 if (ret) { 3544 dev_err(&pdev->dev, "hclge device initialization failed\n"); 3545 return ret; 3546 } 3547 3548 return 0; 3549 } 3550 3551 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 3552 { 3553 struct hclgevf_dev *hdev = ae_dev->priv; 3554 3555 hclgevf_uninit_hdev(hdev); 3556 ae_dev->priv = NULL; 3557 } 3558 3559 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3560 { 3561 struct hnae3_handle *nic = &hdev->nic; 3562 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3563 3564 return min_t(u32, hdev->rss_size_max, 3565 hdev->num_tqps / kinfo->tc_info.num_tc); 3566 } 3567 3568 /** 3569 * hclgevf_get_channels - Get the current channels enabled and max supported. 3570 * @handle: hardware information for network interface 3571 * @ch: ethtool channels structure 3572 * 3573 * We don't support separate tx and rx queues as channels. The other count 3574 * represents how many queues are being used for control. max_combined counts 3575 * how many queue pairs we can support. They may not be mapped 1 to 1 with 3576 * q_vectors since we support a lot more queue pairs than q_vectors. 3577 **/ 3578 static void hclgevf_get_channels(struct hnae3_handle *handle, 3579 struct ethtool_channels *ch) 3580 { 3581 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3582 3583 ch->max_combined = hclgevf_get_max_channels(hdev); 3584 ch->other_count = 0; 3585 ch->max_other = 0; 3586 ch->combined_count = handle->kinfo.rss_size; 3587 } 3588 3589 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 3590 u16 *alloc_tqps, u16 *max_rss_size) 3591 { 3592 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3593 3594 *alloc_tqps = hdev->num_tqps; 3595 *max_rss_size = hdev->rss_size_max; 3596 } 3597 3598 static void hclgevf_update_rss_size(struct hnae3_handle *handle, 3599 u32 new_tqps_num) 3600 { 3601 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3602 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3603 u16 max_rss_size; 3604 3605 kinfo->req_rss_size = new_tqps_num; 3606 3607 max_rss_size = min_t(u16, hdev->rss_size_max, 3608 hdev->num_tqps / kinfo->tc_info.num_tc); 3609 3610 /* Use the user's configuration when it is not larger than 3611 * max_rss_size, otherwise, use the maximum specification value. 3612 */ 3613 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 3614 kinfo->req_rss_size <= max_rss_size) 3615 kinfo->rss_size = kinfo->req_rss_size; 3616 else if (kinfo->rss_size > max_rss_size || 3617 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 3618 kinfo->rss_size = max_rss_size; 3619 3620 kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size; 3621 } 3622 3623 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 3624 bool rxfh_configured) 3625 { 3626 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3627 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3628 u16 cur_rss_size = kinfo->rss_size; 3629 u16 cur_tqps = kinfo->num_tqps; 3630 u32 *rss_indir; 3631 unsigned int i; 3632 int ret; 3633 3634 hclgevf_update_rss_size(handle, new_tqps_num); 3635 3636 ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); 3637 if (ret) 3638 return ret; 3639 3640 /* RSS indirection table has been configured by user */ 3641 if (rxfh_configured) 3642 goto out; 3643 3644 /* Reinitializes the rss indirect table according to the new RSS size */ 3645 rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size, 3646 sizeof(u32), GFP_KERNEL); 3647 if (!rss_indir) 3648 return -ENOMEM; 3649 3650 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 3651 rss_indir[i] = i % kinfo->rss_size; 3652 3653 hdev->rss_cfg.rss_size = kinfo->rss_size; 3654 3655 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 3656 if (ret) 3657 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 3658 ret); 3659 3660 kfree(rss_indir); 3661 3662 out: 3663 if (!ret) 3664 dev_info(&hdev->pdev->dev, 3665 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 3666 cur_rss_size, kinfo->rss_size, 3667 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); 3668 3669 return ret; 3670 } 3671 3672 static int hclgevf_get_status(struct hnae3_handle *handle) 3673 { 3674 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3675 3676 return hdev->hw.mac.link; 3677 } 3678 3679 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 3680 u8 *auto_neg, u32 *speed, 3681 u8 *duplex) 3682 { 3683 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3684 3685 if (speed) 3686 *speed = hdev->hw.mac.speed; 3687 if (duplex) 3688 *duplex = hdev->hw.mac.duplex; 3689 if (auto_neg) 3690 *auto_neg = AUTONEG_DISABLE; 3691 } 3692 3693 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 3694 u8 duplex) 3695 { 3696 hdev->hw.mac.speed = speed; 3697 hdev->hw.mac.duplex = duplex; 3698 } 3699 3700 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 3701 { 3702 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3703 bool gro_en_old = hdev->gro_en; 3704 int ret; 3705 3706 hdev->gro_en = enable; 3707 ret = hclgevf_config_gro(hdev); 3708 if (ret) 3709 hdev->gro_en = gro_en_old; 3710 3711 return ret; 3712 } 3713 3714 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 3715 u8 *module_type) 3716 { 3717 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3718 3719 if (media_type) 3720 *media_type = hdev->hw.mac.media_type; 3721 3722 if (module_type) 3723 *module_type = hdev->hw.mac.module_type; 3724 } 3725 3726 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 3727 { 3728 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3729 3730 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 3731 } 3732 3733 static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle) 3734 { 3735 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3736 3737 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 3738 } 3739 3740 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 3741 { 3742 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3743 3744 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 3745 } 3746 3747 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 3748 { 3749 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3750 3751 return hdev->rst_stats.hw_rst_done_cnt; 3752 } 3753 3754 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 3755 unsigned long *supported, 3756 unsigned long *advertising) 3757 { 3758 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3759 3760 *supported = hdev->hw.mac.supported; 3761 *advertising = hdev->hw.mac.advertising; 3762 } 3763 3764 #define MAX_SEPARATE_NUM 4 3765 #define SEPARATOR_VALUE 0xFDFCFBFA 3766 #define REG_NUM_PER_LINE 4 3767 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 3768 3769 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 3770 { 3771 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 3772 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3773 3774 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 3775 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 3776 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 3777 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 3778 3779 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 3780 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 3781 } 3782 3783 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 3784 void *data) 3785 { 3786 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3787 int i, j, reg_um, separator_num; 3788 u32 *reg = data; 3789 3790 *version = hdev->fw_version; 3791 3792 /* fetching per-VF registers values from VF PCIe register space */ 3793 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 3794 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3795 for (i = 0; i < reg_um; i++) 3796 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 3797 for (i = 0; i < separator_num; i++) 3798 *reg++ = SEPARATOR_VALUE; 3799 3800 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 3801 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3802 for (i = 0; i < reg_um; i++) 3803 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 3804 for (i = 0; i < separator_num; i++) 3805 *reg++ = SEPARATOR_VALUE; 3806 3807 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 3808 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3809 for (j = 0; j < hdev->num_tqps; j++) { 3810 for (i = 0; i < reg_um; i++) 3811 *reg++ = hclgevf_read_dev(&hdev->hw, 3812 ring_reg_addr_list[i] + 3813 0x200 * j); 3814 for (i = 0; i < separator_num; i++) 3815 *reg++ = SEPARATOR_VALUE; 3816 } 3817 3818 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 3819 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3820 for (j = 0; j < hdev->num_msi_used - 1; j++) { 3821 for (i = 0; i < reg_um; i++) 3822 *reg++ = hclgevf_read_dev(&hdev->hw, 3823 tqp_intr_reg_addr_list[i] + 3824 4 * j); 3825 for (i = 0; i < separator_num; i++) 3826 *reg++ = SEPARATOR_VALUE; 3827 } 3828 } 3829 3830 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 3831 u8 *port_base_vlan_info, u8 data_size) 3832 { 3833 struct hnae3_handle *nic = &hdev->nic; 3834 struct hclge_vf_to_pf_msg send_msg; 3835 int ret; 3836 3837 rtnl_lock(); 3838 3839 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 3840 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { 3841 dev_warn(&hdev->pdev->dev, 3842 "is resetting when updating port based vlan info\n"); 3843 rtnl_unlock(); 3844 return; 3845 } 3846 3847 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3848 if (ret) { 3849 rtnl_unlock(); 3850 return; 3851 } 3852 3853 /* send msg to PF and wait update port based vlan info */ 3854 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3855 HCLGE_MBX_PORT_BASE_VLAN_CFG); 3856 memcpy(send_msg.data, port_base_vlan_info, data_size); 3857 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3858 if (!ret) { 3859 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3860 nic->port_base_vlan_state = state; 3861 else 3862 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3863 } 3864 3865 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 3866 rtnl_unlock(); 3867 } 3868 3869 static const struct hnae3_ae_ops hclgevf_ops = { 3870 .init_ae_dev = hclgevf_init_ae_dev, 3871 .uninit_ae_dev = hclgevf_uninit_ae_dev, 3872 .reset_prepare = hclgevf_reset_prepare_general, 3873 .reset_done = hclgevf_reset_done, 3874 .init_client_instance = hclgevf_init_client_instance, 3875 .uninit_client_instance = hclgevf_uninit_client_instance, 3876 .start = hclgevf_ae_start, 3877 .stop = hclgevf_ae_stop, 3878 .client_start = hclgevf_client_start, 3879 .client_stop = hclgevf_client_stop, 3880 .map_ring_to_vector = hclgevf_map_ring_to_vector, 3881 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3882 .get_vector = hclgevf_get_vector, 3883 .put_vector = hclgevf_put_vector, 3884 .reset_queue = hclgevf_reset_tqp, 3885 .get_mac_addr = hclgevf_get_mac_addr, 3886 .set_mac_addr = hclgevf_set_mac_addr, 3887 .add_uc_addr = hclgevf_add_uc_addr, 3888 .rm_uc_addr = hclgevf_rm_uc_addr, 3889 .add_mc_addr = hclgevf_add_mc_addr, 3890 .rm_mc_addr = hclgevf_rm_mc_addr, 3891 .get_stats = hclgevf_get_stats, 3892 .update_stats = hclgevf_update_stats, 3893 .get_strings = hclgevf_get_strings, 3894 .get_sset_count = hclgevf_get_sset_count, 3895 .get_rss_key_size = hclgevf_get_rss_key_size, 3896 .get_rss = hclgevf_get_rss, 3897 .set_rss = hclgevf_set_rss, 3898 .get_rss_tuple = hclgevf_get_rss_tuple, 3899 .set_rss_tuple = hclgevf_set_rss_tuple, 3900 .get_tc_size = hclgevf_get_tc_size, 3901 .get_fw_version = hclgevf_get_fw_version, 3902 .set_vlan_filter = hclgevf_set_vlan_filter, 3903 .enable_vlan_filter = hclgevf_enable_vlan_filter, 3904 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 3905 .reset_event = hclgevf_reset_event, 3906 .set_default_reset_request = hclgevf_set_def_reset_request, 3907 .set_channels = hclgevf_set_channels, 3908 .get_channels = hclgevf_get_channels, 3909 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 3910 .get_regs_len = hclgevf_get_regs_len, 3911 .get_regs = hclgevf_get_regs, 3912 .get_status = hclgevf_get_status, 3913 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3914 .get_media_type = hclgevf_get_media_type, 3915 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 3916 .ae_dev_resetting = hclgevf_ae_dev_resetting, 3917 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 3918 .set_gro_en = hclgevf_gro_en, 3919 .set_mtu = hclgevf_set_mtu, 3920 .get_global_queue_id = hclgevf_get_qid_global, 3921 .set_timer_task = hclgevf_set_timer_task, 3922 .get_link_mode = hclgevf_get_link_mode, 3923 .set_promisc_mode = hclgevf_set_promisc_mode, 3924 .request_update_promisc_mode = hclgevf_request_update_promisc_mode, 3925 .get_cmdq_stat = hclgevf_get_cmdq_stat, 3926 }; 3927 3928 static struct hnae3_ae_algo ae_algovf = { 3929 .ops = &hclgevf_ops, 3930 .pdev_id_table = ae_algovf_pci_tbl, 3931 }; 3932 3933 static int hclgevf_init(void) 3934 { 3935 pr_info("%s is initializing\n", HCLGEVF_NAME); 3936 3937 hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME); 3938 if (!hclgevf_wq) { 3939 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 3940 return -ENOMEM; 3941 } 3942 3943 hnae3_register_ae_algo(&ae_algovf); 3944 3945 return 0; 3946 } 3947 3948 static void hclgevf_exit(void) 3949 { 3950 hnae3_unregister_ae_algo(&ae_algovf); 3951 destroy_workqueue(hclgevf_wq); 3952 } 3953 module_init(hclgevf_init); 3954 module_exit(hclgevf_exit); 3955 3956 MODULE_LICENSE("GPL"); 3957 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3958 MODULE_DESCRIPTION("HCLGEVF Driver"); 3959 MODULE_VERSION(HCLGEVF_MOD_VERSION); 3960