1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 #include "hclgevf_devlink.h" 12 #include "hclge_comm_rss.h" 13 14 #define HCLGEVF_NAME "hclgevf" 15 16 #define HCLGEVF_RESET_MAX_FAIL_CNT 5 17 18 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 19 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 20 unsigned long delay); 21 22 static struct hnae3_ae_algo ae_algovf; 23 24 static struct workqueue_struct *hclgevf_wq; 25 26 static const struct pci_device_id ae_algovf_pci_tbl[] = { 27 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, 28 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 29 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 30 /* required last entry */ 31 {0, } 32 }; 33 34 static const u8 hclgevf_hash_key[] = { 35 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 36 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 37 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 38 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 39 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 40 }; 41 42 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 43 44 static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 45 HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 46 HCLGE_COMM_NIC_CSQ_DEPTH_REG, 47 HCLGE_COMM_NIC_CSQ_TAIL_REG, 48 HCLGE_COMM_NIC_CSQ_HEAD_REG, 49 HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 50 HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 51 HCLGE_COMM_NIC_CRQ_DEPTH_REG, 52 HCLGE_COMM_NIC_CRQ_TAIL_REG, 53 HCLGE_COMM_NIC_CRQ_HEAD_REG, 54 HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, 55 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG, 56 HCLGE_COMM_CMDQ_INTR_EN_REG, 57 HCLGE_COMM_CMDQ_INTR_GEN_REG}; 58 59 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 60 HCLGEVF_RST_ING, 61 HCLGEVF_GRO_EN_REG}; 62 63 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 64 HCLGEVF_RING_RX_ADDR_H_REG, 65 HCLGEVF_RING_RX_BD_NUM_REG, 66 HCLGEVF_RING_RX_BD_LENGTH_REG, 67 HCLGEVF_RING_RX_MERGE_EN_REG, 68 HCLGEVF_RING_RX_TAIL_REG, 69 HCLGEVF_RING_RX_HEAD_REG, 70 HCLGEVF_RING_RX_FBD_NUM_REG, 71 HCLGEVF_RING_RX_OFFSET_REG, 72 HCLGEVF_RING_RX_FBD_OFFSET_REG, 73 HCLGEVF_RING_RX_STASH_REG, 74 HCLGEVF_RING_RX_BD_ERR_REG, 75 HCLGEVF_RING_TX_ADDR_L_REG, 76 HCLGEVF_RING_TX_ADDR_H_REG, 77 HCLGEVF_RING_TX_BD_NUM_REG, 78 HCLGEVF_RING_TX_PRIORITY_REG, 79 HCLGEVF_RING_TX_TC_REG, 80 HCLGEVF_RING_TX_MERGE_EN_REG, 81 HCLGEVF_RING_TX_TAIL_REG, 82 HCLGEVF_RING_TX_HEAD_REG, 83 HCLGEVF_RING_TX_FBD_NUM_REG, 84 HCLGEVF_RING_TX_OFFSET_REG, 85 HCLGEVF_RING_TX_EBD_NUM_REG, 86 HCLGEVF_RING_TX_EBD_OFFSET_REG, 87 HCLGEVF_RING_TX_BD_ERR_REG, 88 HCLGEVF_RING_EN_REG}; 89 90 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 91 HCLGEVF_TQP_INTR_GL0_REG, 92 HCLGEVF_TQP_INTR_GL1_REG, 93 HCLGEVF_TQP_INTR_GL2_REG, 94 HCLGEVF_TQP_INTR_RL_REG}; 95 96 /* hclgevf_cmd_send - send command to command queue 97 * @hw: pointer to the hw struct 98 * @desc: prefilled descriptor for describing the command 99 * @num : the number of descriptors to be sent 100 * 101 * This is the main send command for command queue, it 102 * sends the queue, cleans the queue, etc 103 */ 104 int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num) 105 { 106 return hclge_comm_cmd_send(&hw->hw, desc, num); 107 } 108 109 void hclgevf_arq_init(struct hclgevf_dev *hdev) 110 { 111 struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq; 112 113 spin_lock(&cmdq->crq.lock); 114 /* initialize the pointers of async rx queue of mailbox */ 115 hdev->arq.hdev = hdev; 116 hdev->arq.head = 0; 117 hdev->arq.tail = 0; 118 atomic_set(&hdev->arq.count, 0); 119 spin_unlock(&cmdq->crq.lock); 120 } 121 122 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 123 { 124 if (!handle->client) 125 return container_of(handle, struct hclgevf_dev, nic); 126 else if (handle->client->type == HNAE3_CLIENT_ROCE) 127 return container_of(handle, struct hclgevf_dev, roce); 128 else 129 return container_of(handle, struct hclgevf_dev, nic); 130 } 131 132 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 133 { 134 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 135 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 136 struct hclge_desc desc; 137 struct hclgevf_tqp *tqp; 138 int status; 139 int i; 140 141 for (i = 0; i < kinfo->num_tqps; i++) { 142 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 143 hclgevf_cmd_setup_basic_desc(&desc, 144 HCLGEVF_OPC_QUERY_RX_STATUS, 145 true); 146 147 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 148 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 149 if (status) { 150 dev_err(&hdev->pdev->dev, 151 "Query tqp stat fail, status = %d,queue = %d\n", 152 status, i); 153 return status; 154 } 155 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 156 le32_to_cpu(desc.data[1]); 157 158 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 159 true); 160 161 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 162 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 163 if (status) { 164 dev_err(&hdev->pdev->dev, 165 "Query tqp stat fail, status = %d,queue = %d\n", 166 status, i); 167 return status; 168 } 169 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 170 le32_to_cpu(desc.data[1]); 171 } 172 173 return 0; 174 } 175 176 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 177 { 178 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 179 struct hclgevf_tqp *tqp; 180 u64 *buff = data; 181 int i; 182 183 for (i = 0; i < kinfo->num_tqps; i++) { 184 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 185 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 186 } 187 for (i = 0; i < kinfo->num_tqps; i++) { 188 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 189 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 190 } 191 192 return buff; 193 } 194 195 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 196 { 197 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 198 199 return kinfo->num_tqps * 2; 200 } 201 202 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 203 { 204 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 205 u8 *buff = data; 206 int i; 207 208 for (i = 0; i < kinfo->num_tqps; i++) { 209 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 210 struct hclgevf_tqp, q); 211 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", 212 tqp->index); 213 buff += ETH_GSTRING_LEN; 214 } 215 216 for (i = 0; i < kinfo->num_tqps; i++) { 217 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 218 struct hclgevf_tqp, q); 219 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", 220 tqp->index); 221 buff += ETH_GSTRING_LEN; 222 } 223 224 return buff; 225 } 226 227 static void hclgevf_update_stats(struct hnae3_handle *handle, 228 struct net_device_stats *net_stats) 229 { 230 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 231 int status; 232 233 status = hclgevf_tqps_update_stats(handle); 234 if (status) 235 dev_err(&hdev->pdev->dev, 236 "VF update of TQPS stats fail, status = %d.\n", 237 status); 238 } 239 240 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 241 { 242 if (strset == ETH_SS_TEST) 243 return -EOPNOTSUPP; 244 else if (strset == ETH_SS_STATS) 245 return hclgevf_tqps_get_sset_count(handle, strset); 246 247 return 0; 248 } 249 250 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 251 u8 *data) 252 { 253 u8 *p = (char *)data; 254 255 if (strset == ETH_SS_STATS) 256 p = hclgevf_tqps_get_strings(handle, p); 257 } 258 259 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 260 { 261 hclgevf_tqps_get_stats(handle, data); 262 } 263 264 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 265 u8 subcode) 266 { 267 if (msg) { 268 memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 269 msg->code = code; 270 msg->subcode = subcode; 271 } 272 } 273 274 static int hclgevf_get_basic_info(struct hclgevf_dev *hdev) 275 { 276 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 277 u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE]; 278 struct hclge_basic_info *basic_info; 279 struct hclge_vf_to_pf_msg send_msg; 280 unsigned long caps; 281 int status; 282 283 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0); 284 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 285 sizeof(resp_msg)); 286 if (status) { 287 dev_err(&hdev->pdev->dev, 288 "failed to get basic info from pf, ret = %d", status); 289 return status; 290 } 291 292 basic_info = (struct hclge_basic_info *)resp_msg; 293 294 hdev->hw_tc_map = basic_info->hw_tc_map; 295 hdev->mbx_api_version = basic_info->mbx_api_version; 296 caps = basic_info->pf_caps; 297 if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps)) 298 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); 299 300 return 0; 301 } 302 303 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 304 { 305 struct hnae3_handle *nic = &hdev->nic; 306 struct hclge_vf_to_pf_msg send_msg; 307 u8 resp_msg; 308 int ret; 309 310 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 311 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 312 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 313 sizeof(u8)); 314 if (ret) { 315 dev_err(&hdev->pdev->dev, 316 "VF request to get port based vlan state failed %d", 317 ret); 318 return ret; 319 } 320 321 nic->port_base_vlan_state = resp_msg; 322 323 return 0; 324 } 325 326 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 327 { 328 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 329 #define HCLGEVF_TQPS_ALLOC_OFFSET 0 330 #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 331 #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 332 333 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 334 struct hclge_vf_to_pf_msg send_msg; 335 int status; 336 337 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 338 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 339 HCLGEVF_TQPS_RSS_INFO_LEN); 340 if (status) { 341 dev_err(&hdev->pdev->dev, 342 "VF request to get tqp info from PF failed %d", 343 status); 344 return status; 345 } 346 347 memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], 348 sizeof(u16)); 349 memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], 350 sizeof(u16)); 351 memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], 352 sizeof(u16)); 353 354 return 0; 355 } 356 357 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 358 { 359 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 360 #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 361 #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 362 363 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 364 struct hclge_vf_to_pf_msg send_msg; 365 int ret; 366 367 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 368 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 369 HCLGEVF_TQPS_DEPTH_INFO_LEN); 370 if (ret) { 371 dev_err(&hdev->pdev->dev, 372 "VF request to get tqp depth info from PF failed %d", 373 ret); 374 return ret; 375 } 376 377 memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], 378 sizeof(u16)); 379 memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], 380 sizeof(u16)); 381 382 return 0; 383 } 384 385 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 386 { 387 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 388 struct hclge_vf_to_pf_msg send_msg; 389 u16 qid_in_pf = 0; 390 u8 resp_data[2]; 391 int ret; 392 393 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 394 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 395 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 396 sizeof(resp_data)); 397 if (!ret) 398 qid_in_pf = *(u16 *)resp_data; 399 400 return qid_in_pf; 401 } 402 403 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 404 { 405 struct hclge_vf_to_pf_msg send_msg; 406 u8 resp_msg[2]; 407 int ret; 408 409 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 410 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 411 sizeof(resp_msg)); 412 if (ret) { 413 dev_err(&hdev->pdev->dev, 414 "VF request to get the pf port media type failed %d", 415 ret); 416 return ret; 417 } 418 419 hdev->hw.mac.media_type = resp_msg[0]; 420 hdev->hw.mac.module_type = resp_msg[1]; 421 422 return 0; 423 } 424 425 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 426 { 427 struct hclgevf_tqp *tqp; 428 int i; 429 430 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 431 sizeof(struct hclgevf_tqp), GFP_KERNEL); 432 if (!hdev->htqp) 433 return -ENOMEM; 434 435 tqp = hdev->htqp; 436 437 for (i = 0; i < hdev->num_tqps; i++) { 438 tqp->dev = &hdev->pdev->dev; 439 tqp->index = i; 440 441 tqp->q.ae_algo = &ae_algovf; 442 tqp->q.buf_size = hdev->rx_buf_len; 443 tqp->q.tx_desc_num = hdev->num_tx_desc; 444 tqp->q.rx_desc_num = hdev->num_rx_desc; 445 446 /* need an extended offset to configure queues >= 447 * HCLGEVF_TQP_MAX_SIZE_DEV_V2. 448 */ 449 if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2) 450 tqp->q.io_base = hdev->hw.hw.io_base + 451 HCLGEVF_TQP_REG_OFFSET + 452 i * HCLGEVF_TQP_REG_SIZE; 453 else 454 tqp->q.io_base = hdev->hw.hw.io_base + 455 HCLGEVF_TQP_REG_OFFSET + 456 HCLGEVF_TQP_EXT_REG_OFFSET + 457 (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) * 458 HCLGEVF_TQP_REG_SIZE; 459 460 tqp++; 461 } 462 463 return 0; 464 } 465 466 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 467 { 468 struct hnae3_handle *nic = &hdev->nic; 469 struct hnae3_knic_private_info *kinfo; 470 u16 new_tqps = hdev->num_tqps; 471 unsigned int i; 472 u8 num_tc = 0; 473 474 kinfo = &nic->kinfo; 475 kinfo->num_tx_desc = hdev->num_tx_desc; 476 kinfo->num_rx_desc = hdev->num_rx_desc; 477 kinfo->rx_buf_len = hdev->rx_buf_len; 478 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 479 if (hdev->hw_tc_map & BIT(i)) 480 num_tc++; 481 482 num_tc = num_tc ? num_tc : 1; 483 kinfo->tc_info.num_tc = num_tc; 484 kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc); 485 new_tqps = kinfo->rss_size * num_tc; 486 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 487 488 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 489 sizeof(struct hnae3_queue *), GFP_KERNEL); 490 if (!kinfo->tqp) 491 return -ENOMEM; 492 493 for (i = 0; i < kinfo->num_tqps; i++) { 494 hdev->htqp[i].q.handle = &hdev->nic; 495 hdev->htqp[i].q.tqp_index = i; 496 kinfo->tqp[i] = &hdev->htqp[i].q; 497 } 498 499 /* after init the max rss_size and tqps, adjust the default tqp numbers 500 * and rss size with the actual vector numbers 501 */ 502 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 503 kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc, 504 kinfo->rss_size); 505 506 return 0; 507 } 508 509 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 510 { 511 struct hclge_vf_to_pf_msg send_msg; 512 int status; 513 514 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 515 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 516 if (status) 517 dev_err(&hdev->pdev->dev, 518 "VF failed to fetch link status(%d) from PF", status); 519 } 520 521 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 522 { 523 struct hnae3_handle *rhandle = &hdev->roce; 524 struct hnae3_handle *handle = &hdev->nic; 525 struct hnae3_client *rclient; 526 struct hnae3_client *client; 527 528 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 529 return; 530 531 client = handle->client; 532 rclient = hdev->roce_client; 533 534 link_state = 535 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 536 if (link_state != hdev->hw.mac.link) { 537 hdev->hw.mac.link = link_state; 538 client->ops->link_status_change(handle, !!link_state); 539 if (rclient && rclient->ops->link_status_change) 540 rclient->ops->link_status_change(rhandle, !!link_state); 541 } 542 543 clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 544 } 545 546 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 547 { 548 #define HCLGEVF_ADVERTISING 0 549 #define HCLGEVF_SUPPORTED 1 550 551 struct hclge_vf_to_pf_msg send_msg; 552 553 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 554 send_msg.data[0] = HCLGEVF_ADVERTISING; 555 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 556 send_msg.data[0] = HCLGEVF_SUPPORTED; 557 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 558 } 559 560 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 561 { 562 struct hnae3_handle *nic = &hdev->nic; 563 int ret; 564 565 nic->ae_algo = &ae_algovf; 566 nic->pdev = hdev->pdev; 567 nic->numa_node_mask = hdev->numa_node_mask; 568 nic->flags |= HNAE3_SUPPORT_VF; 569 nic->kinfo.io_base = hdev->hw.hw.io_base; 570 571 ret = hclgevf_knic_setup(hdev); 572 if (ret) 573 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 574 ret); 575 return ret; 576 } 577 578 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 579 { 580 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 581 dev_warn(&hdev->pdev->dev, 582 "vector(vector_id %d) has been freed.\n", vector_id); 583 return; 584 } 585 586 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 587 hdev->num_msi_left += 1; 588 hdev->num_msi_used -= 1; 589 } 590 591 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 592 struct hnae3_vector_info *vector_info) 593 { 594 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 595 struct hnae3_vector_info *vector = vector_info; 596 int alloc = 0; 597 int i, j; 598 599 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 600 vector_num = min(hdev->num_msi_left, vector_num); 601 602 for (j = 0; j < vector_num; j++) { 603 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 604 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 605 vector->vector = pci_irq_vector(hdev->pdev, i); 606 vector->io_addr = hdev->hw.hw.io_base + 607 HCLGEVF_VECTOR_REG_BASE + 608 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 609 hdev->vector_status[i] = 0; 610 hdev->vector_irq[i] = vector->vector; 611 612 vector++; 613 alloc++; 614 615 break; 616 } 617 } 618 } 619 hdev->num_msi_left -= alloc; 620 hdev->num_msi_used += alloc; 621 622 return alloc; 623 } 624 625 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 626 { 627 int i; 628 629 for (i = 0; i < hdev->num_msi; i++) 630 if (vector == hdev->vector_irq[i]) 631 return i; 632 633 return -EINVAL; 634 } 635 636 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 637 const u8 hfunc, const u8 *key) 638 { 639 struct hclgevf_rss_config_cmd *req; 640 unsigned int key_offset = 0; 641 struct hclge_desc desc; 642 int key_counts; 643 int key_size; 644 int ret; 645 646 key_counts = HCLGEVF_RSS_KEY_SIZE; 647 req = (struct hclgevf_rss_config_cmd *)desc.data; 648 649 while (key_counts) { 650 hclgevf_cmd_setup_basic_desc(&desc, 651 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 652 false); 653 654 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 655 req->hash_config |= 656 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 657 658 key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 659 memcpy(req->hash_key, 660 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 661 662 key_counts -= key_size; 663 key_offset++; 664 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 665 if (ret) { 666 dev_err(&hdev->pdev->dev, 667 "Configure RSS config fail, status = %d\n", 668 ret); 669 return ret; 670 } 671 } 672 673 return 0; 674 } 675 676 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 677 { 678 const u16 *indir = hdev->rss_cfg.rss_indirection_tbl; 679 struct hclgevf_rss_indirection_table_cmd *req; 680 struct hclge_desc desc; 681 int rss_cfg_tbl_num; 682 int status; 683 int i, j; 684 685 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 686 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size / 687 HCLGEVF_RSS_CFG_TBL_SIZE; 688 689 for (i = 0; i < rss_cfg_tbl_num; i++) { 690 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 691 false); 692 req->start_table_index = 693 cpu_to_le16(i * HCLGEVF_RSS_CFG_TBL_SIZE); 694 req->rss_set_bitmap = cpu_to_le16(HCLGEVF_RSS_SET_BITMAP_MSK); 695 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 696 req->rss_result[j] = 697 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 698 699 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 700 if (status) { 701 dev_err(&hdev->pdev->dev, 702 "VF failed(=%d) to set RSS indirection table\n", 703 status); 704 return status; 705 } 706 } 707 708 return 0; 709 } 710 711 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 712 { 713 struct hclgevf_rss_tc_mode_cmd *req; 714 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 715 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 716 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 717 struct hclge_desc desc; 718 u16 roundup_size; 719 unsigned int i; 720 int status; 721 722 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 723 724 roundup_size = roundup_pow_of_two(rss_size); 725 roundup_size = ilog2(roundup_size); 726 727 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 728 tc_valid[i] = 1; 729 tc_size[i] = roundup_size; 730 tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0; 731 } 732 733 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 734 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 735 u16 mode = 0; 736 737 hnae3_set_bit(mode, HCLGEVF_RSS_TC_VALID_B, 738 (tc_valid[i] & 0x1)); 739 hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M, 740 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 741 hnae3_set_bit(mode, HCLGEVF_RSS_TC_SIZE_MSB_B, 742 tc_size[i] >> HCLGEVF_RSS_TC_SIZE_MSB_OFFSET & 743 0x1); 744 hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M, 745 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 746 747 req->rss_tc_mode[i] = cpu_to_le16(mode); 748 } 749 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 750 if (status) 751 dev_err(&hdev->pdev->dev, 752 "VF failed(=%d) to set rss tc mode\n", status); 753 754 return status; 755 } 756 757 /* for revision 0x20, vf shared the same rss config with pf */ 758 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 759 { 760 #define HCLGEVF_RSS_MBX_RESP_LEN 8 761 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 762 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 763 struct hclge_vf_to_pf_msg send_msg; 764 u16 msg_num, hash_key_index; 765 u8 index; 766 int ret; 767 768 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 769 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 770 HCLGEVF_RSS_MBX_RESP_LEN; 771 for (index = 0; index < msg_num; index++) { 772 send_msg.data[0] = index; 773 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 774 HCLGEVF_RSS_MBX_RESP_LEN); 775 if (ret) { 776 dev_err(&hdev->pdev->dev, 777 "VF get rss hash key from PF failed, ret=%d", 778 ret); 779 return ret; 780 } 781 782 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 783 if (index == msg_num - 1) 784 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 785 &resp_msg[0], 786 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 787 else 788 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 789 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 790 } 791 792 return 0; 793 } 794 795 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 796 u8 *hfunc) 797 { 798 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 799 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 800 int i, ret; 801 802 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 803 /* Get hash algorithm */ 804 if (hfunc) { 805 switch (rss_cfg->rss_algo) { 806 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 807 *hfunc = ETH_RSS_HASH_TOP; 808 break; 809 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 810 *hfunc = ETH_RSS_HASH_XOR; 811 break; 812 default: 813 *hfunc = ETH_RSS_HASH_UNKNOWN; 814 break; 815 } 816 } 817 818 /* Get the RSS Key required by the user */ 819 if (key) 820 memcpy(key, rss_cfg->rss_hash_key, 821 HCLGEVF_RSS_KEY_SIZE); 822 } else { 823 if (hfunc) 824 *hfunc = ETH_RSS_HASH_TOP; 825 if (key) { 826 ret = hclgevf_get_rss_hash_key(hdev); 827 if (ret) 828 return ret; 829 memcpy(key, rss_cfg->rss_hash_key, 830 HCLGEVF_RSS_KEY_SIZE); 831 } 832 } 833 834 if (indir) 835 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 836 indir[i] = rss_cfg->rss_indirection_tbl[i]; 837 838 return 0; 839 } 840 841 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 842 const u8 *key, const u8 hfunc) 843 { 844 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 845 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 846 u8 hash_algo; 847 int ret, i; 848 849 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 850 ret = hclge_comm_parse_rss_hfunc(rss_cfg, hfunc, &hash_algo); 851 if (ret) 852 return ret; 853 854 /* Set the RSS Hash Key if specififed by the user */ 855 if (key) { 856 ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key); 857 if (ret) { 858 dev_err(&hdev->pdev->dev, 859 "invalid hfunc type %u\n", hfunc); 860 return ret; 861 } 862 863 /* Update the shadow RSS key with user specified qids */ 864 memcpy(rss_cfg->rss_hash_key, key, 865 HCLGEVF_RSS_KEY_SIZE); 866 } else { 867 ret = hclgevf_set_rss_algo_key(hdev, hash_algo, 868 rss_cfg->rss_hash_key); 869 if (ret) 870 return ret; 871 } 872 rss_cfg->rss_algo = hash_algo; 873 } 874 875 /* update the shadow RSS table with user specified qids */ 876 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 877 rss_cfg->rss_indirection_tbl[i] = indir[i]; 878 879 /* update the hardware */ 880 return hclgevf_set_rss_indir_table(hdev); 881 } 882 883 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 884 { 885 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 886 887 if (nfc->data & RXH_L4_B_2_3) 888 hash_sets |= HCLGEVF_D_PORT_BIT; 889 else 890 hash_sets &= ~HCLGEVF_D_PORT_BIT; 891 892 if (nfc->data & RXH_IP_SRC) 893 hash_sets |= HCLGEVF_S_IP_BIT; 894 else 895 hash_sets &= ~HCLGEVF_S_IP_BIT; 896 897 if (nfc->data & RXH_IP_DST) 898 hash_sets |= HCLGEVF_D_IP_BIT; 899 else 900 hash_sets &= ~HCLGEVF_D_IP_BIT; 901 902 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 903 hash_sets |= HCLGEVF_V_TAG_BIT; 904 905 return hash_sets; 906 } 907 908 static int hclgevf_init_rss_tuple_cmd(struct hnae3_handle *handle, 909 struct ethtool_rxnfc *nfc, 910 struct hclgevf_rss_input_tuple_cmd *req) 911 { 912 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 913 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 914 u8 tuple_sets; 915 916 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 917 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 918 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 919 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 920 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 921 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 922 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 923 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 924 925 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 926 switch (nfc->flow_type) { 927 case TCP_V4_FLOW: 928 req->ipv4_tcp_en = tuple_sets; 929 break; 930 case TCP_V6_FLOW: 931 req->ipv6_tcp_en = tuple_sets; 932 break; 933 case UDP_V4_FLOW: 934 req->ipv4_udp_en = tuple_sets; 935 break; 936 case UDP_V6_FLOW: 937 req->ipv6_udp_en = tuple_sets; 938 break; 939 case SCTP_V4_FLOW: 940 req->ipv4_sctp_en = tuple_sets; 941 break; 942 case SCTP_V6_FLOW: 943 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && 944 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))) 945 return -EINVAL; 946 947 req->ipv6_sctp_en = tuple_sets; 948 break; 949 case IPV4_FLOW: 950 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 951 break; 952 case IPV6_FLOW: 953 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 954 break; 955 default: 956 return -EINVAL; 957 } 958 959 return 0; 960 } 961 962 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 963 struct ethtool_rxnfc *nfc) 964 { 965 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 966 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 967 struct hclgevf_rss_input_tuple_cmd *req; 968 struct hclge_desc desc; 969 int ret; 970 971 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 972 return -EOPNOTSUPP; 973 974 if (nfc->data & 975 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 976 return -EINVAL; 977 978 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 979 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 980 981 ret = hclgevf_init_rss_tuple_cmd(handle, nfc, req); 982 if (ret) { 983 dev_err(&hdev->pdev->dev, 984 "failed to init rss tuple cmd, ret = %d\n", ret); 985 return ret; 986 } 987 988 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 989 if (ret) { 990 dev_err(&hdev->pdev->dev, 991 "Set rss tuple fail, status = %d\n", ret); 992 return ret; 993 } 994 995 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 996 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 997 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 998 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 999 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 1000 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 1001 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 1002 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 1003 return 0; 1004 } 1005 1006 static u64 hclgevf_convert_rss_tuple(u8 tuple_sets) 1007 { 1008 u64 tuple_data = 0; 1009 1010 if (tuple_sets & HCLGEVF_D_PORT_BIT) 1011 tuple_data |= RXH_L4_B_2_3; 1012 if (tuple_sets & HCLGEVF_S_PORT_BIT) 1013 tuple_data |= RXH_L4_B_0_1; 1014 if (tuple_sets & HCLGEVF_D_IP_BIT) 1015 tuple_data |= RXH_IP_DST; 1016 if (tuple_sets & HCLGEVF_S_IP_BIT) 1017 tuple_data |= RXH_IP_SRC; 1018 1019 return tuple_data; 1020 } 1021 1022 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 1023 struct ethtool_rxnfc *nfc) 1024 { 1025 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1026 u8 tuple_sets; 1027 int ret; 1028 1029 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 1030 return -EOPNOTSUPP; 1031 1032 nfc->data = 0; 1033 1034 ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type, 1035 &tuple_sets); 1036 if (ret || !tuple_sets) 1037 return ret; 1038 1039 nfc->data = hclgevf_convert_rss_tuple(tuple_sets); 1040 1041 return 0; 1042 } 1043 1044 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 1045 struct hclge_comm_rss_cfg *rss_cfg) 1046 { 1047 struct hclgevf_rss_input_tuple_cmd *req; 1048 struct hclge_desc desc; 1049 int ret; 1050 1051 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 1052 1053 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 1054 1055 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 1056 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 1057 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 1058 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 1059 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 1060 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 1061 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 1062 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 1063 1064 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1065 if (ret) 1066 dev_err(&hdev->pdev->dev, 1067 "Configure rss input fail, status = %d\n", ret); 1068 return ret; 1069 } 1070 1071 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 1072 { 1073 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1074 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 1075 1076 return rss_cfg->rss_size; 1077 } 1078 1079 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 1080 int vector_id, 1081 struct hnae3_ring_chain_node *ring_chain) 1082 { 1083 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1084 struct hclge_vf_to_pf_msg send_msg; 1085 struct hnae3_ring_chain_node *node; 1086 int status; 1087 int i = 0; 1088 1089 memset(&send_msg, 0, sizeof(send_msg)); 1090 send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 1091 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1092 send_msg.vector_id = vector_id; 1093 1094 for (node = ring_chain; node; node = node->next) { 1095 send_msg.param[i].ring_type = 1096 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1097 1098 send_msg.param[i].tqp_index = node->tqp_index; 1099 send_msg.param[i].int_gl_index = 1100 hnae3_get_field(node->int_gl_idx, 1101 HNAE3_RING_GL_IDX_M, 1102 HNAE3_RING_GL_IDX_S); 1103 1104 i++; 1105 if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 1106 send_msg.ring_num = i; 1107 1108 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 1109 NULL, 0); 1110 if (status) { 1111 dev_err(&hdev->pdev->dev, 1112 "Map TQP fail, status is %d.\n", 1113 status); 1114 return status; 1115 } 1116 i = 0; 1117 } 1118 } 1119 1120 return 0; 1121 } 1122 1123 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1124 struct hnae3_ring_chain_node *ring_chain) 1125 { 1126 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1127 int vector_id; 1128 1129 vector_id = hclgevf_get_vector_index(hdev, vector); 1130 if (vector_id < 0) { 1131 dev_err(&handle->pdev->dev, 1132 "Get vector index fail. ret =%d\n", vector_id); 1133 return vector_id; 1134 } 1135 1136 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1137 } 1138 1139 static int hclgevf_unmap_ring_from_vector( 1140 struct hnae3_handle *handle, 1141 int vector, 1142 struct hnae3_ring_chain_node *ring_chain) 1143 { 1144 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1145 int ret, vector_id; 1146 1147 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1148 return 0; 1149 1150 vector_id = hclgevf_get_vector_index(hdev, vector); 1151 if (vector_id < 0) { 1152 dev_err(&handle->pdev->dev, 1153 "Get vector index fail. ret =%d\n", vector_id); 1154 return vector_id; 1155 } 1156 1157 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1158 if (ret) 1159 dev_err(&handle->pdev->dev, 1160 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1161 vector_id, 1162 ret); 1163 1164 return ret; 1165 } 1166 1167 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1168 { 1169 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1170 int vector_id; 1171 1172 vector_id = hclgevf_get_vector_index(hdev, vector); 1173 if (vector_id < 0) { 1174 dev_err(&handle->pdev->dev, 1175 "hclgevf_put_vector get vector index fail. ret =%d\n", 1176 vector_id); 1177 return vector_id; 1178 } 1179 1180 hclgevf_free_vector(hdev, vector_id); 1181 1182 return 0; 1183 } 1184 1185 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1186 bool en_uc_pmc, bool en_mc_pmc, 1187 bool en_bc_pmc) 1188 { 1189 struct hnae3_handle *handle = &hdev->nic; 1190 struct hclge_vf_to_pf_msg send_msg; 1191 int ret; 1192 1193 memset(&send_msg, 0, sizeof(send_msg)); 1194 send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 1195 send_msg.en_bc = en_bc_pmc ? 1 : 0; 1196 send_msg.en_uc = en_uc_pmc ? 1 : 0; 1197 send_msg.en_mc = en_mc_pmc ? 1 : 0; 1198 send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC, 1199 &handle->priv_flags) ? 1 : 0; 1200 1201 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1202 if (ret) 1203 dev_err(&hdev->pdev->dev, 1204 "Set promisc mode fail, status is %d.\n", ret); 1205 1206 return ret; 1207 } 1208 1209 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 1210 bool en_mc_pmc) 1211 { 1212 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1213 bool en_bc_pmc; 1214 1215 en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2; 1216 1217 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 1218 en_bc_pmc); 1219 } 1220 1221 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) 1222 { 1223 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1224 1225 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1226 hclgevf_task_schedule(hdev, 0); 1227 } 1228 1229 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) 1230 { 1231 struct hnae3_handle *handle = &hdev->nic; 1232 bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; 1233 bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; 1234 int ret; 1235 1236 if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { 1237 ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); 1238 if (!ret) 1239 clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1240 } 1241 } 1242 1243 static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id, 1244 u16 stream_id, bool enable) 1245 { 1246 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1247 struct hclge_desc desc; 1248 1249 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1250 1251 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1252 false); 1253 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1254 req->stream_id = cpu_to_le16(stream_id); 1255 if (enable) 1256 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1257 1258 return hclgevf_cmd_send(&hdev->hw, &desc, 1); 1259 } 1260 1261 static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable) 1262 { 1263 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1264 int ret; 1265 u16 i; 1266 1267 for (i = 0; i < handle->kinfo.num_tqps; i++) { 1268 ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable); 1269 if (ret) 1270 return ret; 1271 } 1272 1273 return 0; 1274 } 1275 1276 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1277 { 1278 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1279 struct hclgevf_tqp *tqp; 1280 int i; 1281 1282 for (i = 0; i < kinfo->num_tqps; i++) { 1283 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1284 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1285 } 1286 } 1287 1288 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 1289 { 1290 struct hclge_vf_to_pf_msg send_msg; 1291 u8 host_mac[ETH_ALEN]; 1292 int status; 1293 1294 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 1295 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 1296 ETH_ALEN); 1297 if (status) { 1298 dev_err(&hdev->pdev->dev, 1299 "fail to get VF MAC from host %d", status); 1300 return status; 1301 } 1302 1303 ether_addr_copy(p, host_mac); 1304 1305 return 0; 1306 } 1307 1308 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1309 { 1310 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1311 u8 host_mac_addr[ETH_ALEN]; 1312 1313 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 1314 return; 1315 1316 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 1317 if (hdev->has_pf_mac) 1318 ether_addr_copy(p, host_mac_addr); 1319 else 1320 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1321 } 1322 1323 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p, 1324 bool is_first) 1325 { 1326 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1327 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1328 struct hclge_vf_to_pf_msg send_msg; 1329 u8 *new_mac_addr = (u8 *)p; 1330 int status; 1331 1332 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 1333 send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1334 ether_addr_copy(send_msg.data, new_mac_addr); 1335 if (is_first && !hdev->has_pf_mac) 1336 eth_zero_addr(&send_msg.data[ETH_ALEN]); 1337 else 1338 ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 1339 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1340 if (!status) 1341 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1342 1343 return status; 1344 } 1345 1346 static struct hclgevf_mac_addr_node * 1347 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) 1348 { 1349 struct hclgevf_mac_addr_node *mac_node, *tmp; 1350 1351 list_for_each_entry_safe(mac_node, tmp, list, node) 1352 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 1353 return mac_node; 1354 1355 return NULL; 1356 } 1357 1358 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, 1359 enum HCLGEVF_MAC_NODE_STATE state) 1360 { 1361 switch (state) { 1362 /* from set_rx_mode or tmp_add_list */ 1363 case HCLGEVF_MAC_TO_ADD: 1364 if (mac_node->state == HCLGEVF_MAC_TO_DEL) 1365 mac_node->state = HCLGEVF_MAC_ACTIVE; 1366 break; 1367 /* only from set_rx_mode */ 1368 case HCLGEVF_MAC_TO_DEL: 1369 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1370 list_del(&mac_node->node); 1371 kfree(mac_node); 1372 } else { 1373 mac_node->state = HCLGEVF_MAC_TO_DEL; 1374 } 1375 break; 1376 /* only from tmp_add_list, the mac_node->state won't be 1377 * HCLGEVF_MAC_ACTIVE 1378 */ 1379 case HCLGEVF_MAC_ACTIVE: 1380 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1381 mac_node->state = HCLGEVF_MAC_ACTIVE; 1382 break; 1383 } 1384 } 1385 1386 static int hclgevf_update_mac_list(struct hnae3_handle *handle, 1387 enum HCLGEVF_MAC_NODE_STATE state, 1388 enum HCLGEVF_MAC_ADDR_TYPE mac_type, 1389 const unsigned char *addr) 1390 { 1391 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1392 struct hclgevf_mac_addr_node *mac_node; 1393 struct list_head *list; 1394 1395 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1396 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1397 1398 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1399 1400 /* if the mac addr is already in the mac list, no need to add a new 1401 * one into it, just check the mac addr state, convert it to a new 1402 * new state, or just remove it, or do nothing. 1403 */ 1404 mac_node = hclgevf_find_mac_node(list, addr); 1405 if (mac_node) { 1406 hclgevf_update_mac_node(mac_node, state); 1407 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1408 return 0; 1409 } 1410 /* if this address is never added, unnecessary to delete */ 1411 if (state == HCLGEVF_MAC_TO_DEL) { 1412 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1413 return -ENOENT; 1414 } 1415 1416 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 1417 if (!mac_node) { 1418 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1419 return -ENOMEM; 1420 } 1421 1422 mac_node->state = state; 1423 ether_addr_copy(mac_node->mac_addr, addr); 1424 list_add_tail(&mac_node->node, list); 1425 1426 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1427 return 0; 1428 } 1429 1430 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1431 const unsigned char *addr) 1432 { 1433 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1434 HCLGEVF_MAC_ADDR_UC, addr); 1435 } 1436 1437 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1438 const unsigned char *addr) 1439 { 1440 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1441 HCLGEVF_MAC_ADDR_UC, addr); 1442 } 1443 1444 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1445 const unsigned char *addr) 1446 { 1447 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1448 HCLGEVF_MAC_ADDR_MC, addr); 1449 } 1450 1451 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1452 const unsigned char *addr) 1453 { 1454 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1455 HCLGEVF_MAC_ADDR_MC, addr); 1456 } 1457 1458 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, 1459 struct hclgevf_mac_addr_node *mac_node, 1460 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1461 { 1462 struct hclge_vf_to_pf_msg send_msg; 1463 u8 code, subcode; 1464 1465 if (mac_type == HCLGEVF_MAC_ADDR_UC) { 1466 code = HCLGE_MBX_SET_UNICAST; 1467 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1468 subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; 1469 else 1470 subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; 1471 } else { 1472 code = HCLGE_MBX_SET_MULTICAST; 1473 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1474 subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; 1475 else 1476 subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; 1477 } 1478 1479 hclgevf_build_send_msg(&send_msg, code, subcode); 1480 ether_addr_copy(send_msg.data, mac_node->mac_addr); 1481 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1482 } 1483 1484 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, 1485 struct list_head *list, 1486 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1487 { 1488 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 1489 struct hclgevf_mac_addr_node *mac_node, *tmp; 1490 int ret; 1491 1492 list_for_each_entry_safe(mac_node, tmp, list, node) { 1493 ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); 1494 if (ret) { 1495 hnae3_format_mac_addr(format_mac_addr, 1496 mac_node->mac_addr); 1497 dev_err(&hdev->pdev->dev, 1498 "failed to configure mac %s, state = %d, ret = %d\n", 1499 format_mac_addr, mac_node->state, ret); 1500 return; 1501 } 1502 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1503 mac_node->state = HCLGEVF_MAC_ACTIVE; 1504 } else { 1505 list_del(&mac_node->node); 1506 kfree(mac_node); 1507 } 1508 } 1509 } 1510 1511 static void hclgevf_sync_from_add_list(struct list_head *add_list, 1512 struct list_head *mac_list) 1513 { 1514 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1515 1516 list_for_each_entry_safe(mac_node, tmp, add_list, node) { 1517 /* if the mac address from tmp_add_list is not in the 1518 * uc/mc_mac_list, it means have received a TO_DEL request 1519 * during the time window of sending mac config request to PF 1520 * If mac_node state is ACTIVE, then change its state to TO_DEL, 1521 * then it will be removed at next time. If is TO_ADD, it means 1522 * send TO_ADD request failed, so just remove the mac node. 1523 */ 1524 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1525 if (new_node) { 1526 hclgevf_update_mac_node(new_node, mac_node->state); 1527 list_del(&mac_node->node); 1528 kfree(mac_node); 1529 } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { 1530 mac_node->state = HCLGEVF_MAC_TO_DEL; 1531 list_move_tail(&mac_node->node, mac_list); 1532 } else { 1533 list_del(&mac_node->node); 1534 kfree(mac_node); 1535 } 1536 } 1537 } 1538 1539 static void hclgevf_sync_from_del_list(struct list_head *del_list, 1540 struct list_head *mac_list) 1541 { 1542 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1543 1544 list_for_each_entry_safe(mac_node, tmp, del_list, node) { 1545 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1546 if (new_node) { 1547 /* If the mac addr is exist in the mac list, it means 1548 * received a new request TO_ADD during the time window 1549 * of sending mac addr configurrequest to PF, so just 1550 * change the mac state to ACTIVE. 1551 */ 1552 new_node->state = HCLGEVF_MAC_ACTIVE; 1553 list_del(&mac_node->node); 1554 kfree(mac_node); 1555 } else { 1556 list_move_tail(&mac_node->node, mac_list); 1557 } 1558 } 1559 } 1560 1561 static void hclgevf_clear_list(struct list_head *list) 1562 { 1563 struct hclgevf_mac_addr_node *mac_node, *tmp; 1564 1565 list_for_each_entry_safe(mac_node, tmp, list, node) { 1566 list_del(&mac_node->node); 1567 kfree(mac_node); 1568 } 1569 } 1570 1571 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, 1572 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1573 { 1574 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1575 struct list_head tmp_add_list, tmp_del_list; 1576 struct list_head *list; 1577 1578 INIT_LIST_HEAD(&tmp_add_list); 1579 INIT_LIST_HEAD(&tmp_del_list); 1580 1581 /* move the mac addr to the tmp_add_list and tmp_del_list, then 1582 * we can add/delete these mac addr outside the spin lock 1583 */ 1584 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1585 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1586 1587 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1588 1589 list_for_each_entry_safe(mac_node, tmp, list, node) { 1590 switch (mac_node->state) { 1591 case HCLGEVF_MAC_TO_DEL: 1592 list_move_tail(&mac_node->node, &tmp_del_list); 1593 break; 1594 case HCLGEVF_MAC_TO_ADD: 1595 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 1596 if (!new_node) 1597 goto stop_traverse; 1598 1599 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 1600 new_node->state = mac_node->state; 1601 list_add_tail(&new_node->node, &tmp_add_list); 1602 break; 1603 default: 1604 break; 1605 } 1606 } 1607 1608 stop_traverse: 1609 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1610 1611 /* delete first, in order to get max mac table space for adding */ 1612 hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); 1613 hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); 1614 1615 /* if some mac addresses were added/deleted fail, move back to the 1616 * mac_list, and retry at next time. 1617 */ 1618 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1619 1620 hclgevf_sync_from_del_list(&tmp_del_list, list); 1621 hclgevf_sync_from_add_list(&tmp_add_list, list); 1622 1623 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1624 } 1625 1626 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) 1627 { 1628 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); 1629 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); 1630 } 1631 1632 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) 1633 { 1634 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1635 1636 hclgevf_clear_list(&hdev->mac_table.uc_mac_list); 1637 hclgevf_clear_list(&hdev->mac_table.mc_mac_list); 1638 1639 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1640 } 1641 1642 static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 1643 { 1644 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1645 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 1646 struct hclge_vf_to_pf_msg send_msg; 1647 1648 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) 1649 return -EOPNOTSUPP; 1650 1651 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1652 HCLGE_MBX_ENABLE_VLAN_FILTER); 1653 send_msg.data[0] = enable ? 1 : 0; 1654 1655 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1656 } 1657 1658 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1659 __be16 proto, u16 vlan_id, 1660 bool is_kill) 1661 { 1662 #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 1663 #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 1664 #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 1665 1666 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1667 struct hclge_vf_to_pf_msg send_msg; 1668 int ret; 1669 1670 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1671 return -EINVAL; 1672 1673 if (proto != htons(ETH_P_8021Q)) 1674 return -EPROTONOSUPPORT; 1675 1676 /* When device is resetting or reset failed, firmware is unable to 1677 * handle mailbox. Just record the vlan id, and remove it after 1678 * reset finished. 1679 */ 1680 if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 1681 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { 1682 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1683 return -EBUSY; 1684 } 1685 1686 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1687 HCLGE_MBX_VLAN_FILTER); 1688 send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; 1689 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, 1690 sizeof(vlan_id)); 1691 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, 1692 sizeof(proto)); 1693 /* when remove hw vlan filter failed, record the vlan id, 1694 * and try to remove it from hw later, to be consistence 1695 * with stack. 1696 */ 1697 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1698 if (is_kill && ret) 1699 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1700 1701 return ret; 1702 } 1703 1704 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1705 { 1706 #define HCLGEVF_MAX_SYNC_COUNT 60 1707 struct hnae3_handle *handle = &hdev->nic; 1708 int ret, sync_cnt = 0; 1709 u16 vlan_id; 1710 1711 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1712 while (vlan_id != VLAN_N_VID) { 1713 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1714 vlan_id, true); 1715 if (ret) 1716 return; 1717 1718 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1719 sync_cnt++; 1720 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1721 return; 1722 1723 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1724 } 1725 } 1726 1727 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1728 { 1729 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1730 struct hclge_vf_to_pf_msg send_msg; 1731 1732 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1733 HCLGE_MBX_VLAN_RX_OFF_CFG); 1734 send_msg.data[0] = enable ? 1 : 0; 1735 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1736 } 1737 1738 static int hclgevf_reset_tqp(struct hnae3_handle *handle) 1739 { 1740 #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U 1741 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1742 struct hclge_vf_to_pf_msg send_msg; 1743 u8 return_status = 0; 1744 int ret; 1745 u16 i; 1746 1747 /* disable vf queue before send queue reset msg to PF */ 1748 ret = hclgevf_tqp_enable(handle, false); 1749 if (ret) { 1750 dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n", 1751 ret); 1752 return ret; 1753 } 1754 1755 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1756 1757 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status, 1758 sizeof(return_status)); 1759 if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE) 1760 return ret; 1761 1762 for (i = 1; i < handle->kinfo.num_tqps; i++) { 1763 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1764 memcpy(send_msg.data, &i, sizeof(i)); 1765 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1766 if (ret) 1767 return ret; 1768 } 1769 1770 return 0; 1771 } 1772 1773 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1774 { 1775 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1776 struct hclge_vf_to_pf_msg send_msg; 1777 1778 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1779 memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); 1780 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1781 } 1782 1783 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1784 enum hnae3_reset_notify_type type) 1785 { 1786 struct hnae3_client *client = hdev->nic_client; 1787 struct hnae3_handle *handle = &hdev->nic; 1788 int ret; 1789 1790 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1791 !client) 1792 return 0; 1793 1794 if (!client->ops->reset_notify) 1795 return -EOPNOTSUPP; 1796 1797 ret = client->ops->reset_notify(handle, type); 1798 if (ret) 1799 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1800 type, ret); 1801 1802 return ret; 1803 } 1804 1805 static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev, 1806 enum hnae3_reset_notify_type type) 1807 { 1808 struct hnae3_client *client = hdev->roce_client; 1809 struct hnae3_handle *handle = &hdev->roce; 1810 int ret; 1811 1812 if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client) 1813 return 0; 1814 1815 if (!client->ops->reset_notify) 1816 return -EOPNOTSUPP; 1817 1818 ret = client->ops->reset_notify(handle, type); 1819 if (ret) 1820 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", 1821 type, ret); 1822 return ret; 1823 } 1824 1825 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1826 { 1827 #define HCLGEVF_RESET_WAIT_US 20000 1828 #define HCLGEVF_RESET_WAIT_CNT 2000 1829 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1830 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1831 1832 u32 val; 1833 int ret; 1834 1835 if (hdev->reset_type == HNAE3_VF_RESET) 1836 ret = readl_poll_timeout(hdev->hw.hw.io_base + 1837 HCLGEVF_VF_RST_ING, val, 1838 !(val & HCLGEVF_VF_RST_ING_BIT), 1839 HCLGEVF_RESET_WAIT_US, 1840 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1841 else 1842 ret = readl_poll_timeout(hdev->hw.hw.io_base + 1843 HCLGEVF_RST_ING, val, 1844 !(val & HCLGEVF_RST_ING_BITS), 1845 HCLGEVF_RESET_WAIT_US, 1846 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1847 1848 /* hardware completion status should be available by this time */ 1849 if (ret) { 1850 dev_err(&hdev->pdev->dev, 1851 "couldn't get reset done status from h/w, timeout!\n"); 1852 return ret; 1853 } 1854 1855 /* we will wait a bit more to let reset of the stack to complete. This 1856 * might happen in case reset assertion was made by PF. Yes, this also 1857 * means we might end up waiting bit more even for VF reset. 1858 */ 1859 msleep(5000); 1860 1861 return 0; 1862 } 1863 1864 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 1865 { 1866 u32 reg_val; 1867 1868 reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); 1869 if (enable) 1870 reg_val |= HCLGEVF_NIC_SW_RST_RDY; 1871 else 1872 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 1873 1874 hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 1875 reg_val); 1876 } 1877 1878 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1879 { 1880 int ret; 1881 1882 /* uninitialize the nic client */ 1883 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1884 if (ret) 1885 return ret; 1886 1887 /* re-initialize the hclge device */ 1888 ret = hclgevf_reset_hdev(hdev); 1889 if (ret) { 1890 dev_err(&hdev->pdev->dev, 1891 "hclge device re-init failed, VF is disabled!\n"); 1892 return ret; 1893 } 1894 1895 /* bring up the nic client again */ 1896 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1897 if (ret) 1898 return ret; 1899 1900 /* clear handshake status with IMP */ 1901 hclgevf_reset_handshake(hdev, false); 1902 1903 /* bring up the nic to enable TX/RX again */ 1904 return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1905 } 1906 1907 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1908 { 1909 #define HCLGEVF_RESET_SYNC_TIME 100 1910 1911 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1912 struct hclge_vf_to_pf_msg send_msg; 1913 int ret; 1914 1915 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1916 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1917 if (ret) { 1918 dev_err(&hdev->pdev->dev, 1919 "failed to assert VF reset, ret = %d\n", ret); 1920 return ret; 1921 } 1922 hdev->rst_stats.vf_func_rst_cnt++; 1923 } 1924 1925 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 1926 /* inform hardware that preparatory work is done */ 1927 msleep(HCLGEVF_RESET_SYNC_TIME); 1928 hclgevf_reset_handshake(hdev, true); 1929 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n", 1930 hdev->reset_type); 1931 1932 return 0; 1933 } 1934 1935 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 1936 { 1937 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 1938 hdev->rst_stats.vf_func_rst_cnt); 1939 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1940 hdev->rst_stats.flr_rst_cnt); 1941 dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 1942 hdev->rst_stats.vf_rst_cnt); 1943 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1944 hdev->rst_stats.rst_done_cnt); 1945 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1946 hdev->rst_stats.hw_rst_done_cnt); 1947 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1948 hdev->rst_stats.rst_cnt); 1949 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1950 hdev->rst_stats.rst_fail_cnt); 1951 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1952 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 1953 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 1954 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG)); 1955 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1956 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG)); 1957 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1958 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 1959 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1960 } 1961 1962 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1963 { 1964 /* recover handshake status with IMP when reset fail */ 1965 hclgevf_reset_handshake(hdev, true); 1966 hdev->rst_stats.rst_fail_cnt++; 1967 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1968 hdev->rst_stats.rst_fail_cnt); 1969 1970 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1971 set_bit(hdev->reset_type, &hdev->reset_pending); 1972 1973 if (hclgevf_is_reset_pending(hdev)) { 1974 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1975 hclgevf_reset_task_schedule(hdev); 1976 } else { 1977 set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1978 hclgevf_dump_rst_info(hdev); 1979 } 1980 } 1981 1982 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 1983 { 1984 int ret; 1985 1986 hdev->rst_stats.rst_cnt++; 1987 1988 /* perform reset of the stack & ae device for a client */ 1989 ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); 1990 if (ret) 1991 return ret; 1992 1993 rtnl_lock(); 1994 /* bring down the nic to stop any ongoing TX/RX */ 1995 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1996 rtnl_unlock(); 1997 if (ret) 1998 return ret; 1999 2000 return hclgevf_reset_prepare_wait(hdev); 2001 } 2002 2003 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 2004 { 2005 int ret; 2006 2007 hdev->rst_stats.hw_rst_done_cnt++; 2008 ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); 2009 if (ret) 2010 return ret; 2011 2012 rtnl_lock(); 2013 /* now, re-initialize the nic client and ae device */ 2014 ret = hclgevf_reset_stack(hdev); 2015 rtnl_unlock(); 2016 if (ret) { 2017 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 2018 return ret; 2019 } 2020 2021 ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT); 2022 /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1 2023 * times 2024 */ 2025 if (ret && 2026 hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1) 2027 return ret; 2028 2029 ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT); 2030 if (ret) 2031 return ret; 2032 2033 hdev->last_reset_time = jiffies; 2034 hdev->rst_stats.rst_done_cnt++; 2035 hdev->rst_stats.rst_fail_cnt = 0; 2036 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2037 2038 return 0; 2039 } 2040 2041 static void hclgevf_reset(struct hclgevf_dev *hdev) 2042 { 2043 if (hclgevf_reset_prepare(hdev)) 2044 goto err_reset; 2045 2046 /* check if VF could successfully fetch the hardware reset completion 2047 * status from the hardware 2048 */ 2049 if (hclgevf_reset_wait(hdev)) { 2050 /* can't do much in this situation, will disable VF */ 2051 dev_err(&hdev->pdev->dev, 2052 "failed to fetch H/W reset completion status\n"); 2053 goto err_reset; 2054 } 2055 2056 if (hclgevf_reset_rebuild(hdev)) 2057 goto err_reset; 2058 2059 return; 2060 2061 err_reset: 2062 hclgevf_reset_err_handle(hdev); 2063 } 2064 2065 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 2066 unsigned long *addr) 2067 { 2068 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 2069 2070 /* return the highest priority reset level amongst all */ 2071 if (test_bit(HNAE3_VF_RESET, addr)) { 2072 rst_level = HNAE3_VF_RESET; 2073 clear_bit(HNAE3_VF_RESET, addr); 2074 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 2075 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2076 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 2077 rst_level = HNAE3_VF_FULL_RESET; 2078 clear_bit(HNAE3_VF_FULL_RESET, addr); 2079 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2080 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 2081 rst_level = HNAE3_VF_PF_FUNC_RESET; 2082 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 2083 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2084 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 2085 rst_level = HNAE3_VF_FUNC_RESET; 2086 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2087 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 2088 rst_level = HNAE3_FLR_RESET; 2089 clear_bit(HNAE3_FLR_RESET, addr); 2090 } 2091 2092 return rst_level; 2093 } 2094 2095 static void hclgevf_reset_event(struct pci_dev *pdev, 2096 struct hnae3_handle *handle) 2097 { 2098 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2099 struct hclgevf_dev *hdev = ae_dev->priv; 2100 2101 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 2102 2103 if (hdev->default_reset_request) 2104 hdev->reset_level = 2105 hclgevf_get_reset_level(hdev, 2106 &hdev->default_reset_request); 2107 else 2108 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2109 2110 /* reset of this VF requested */ 2111 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 2112 hclgevf_reset_task_schedule(hdev); 2113 2114 hdev->last_reset_time = jiffies; 2115 } 2116 2117 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 2118 enum hnae3_reset_type rst_type) 2119 { 2120 struct hclgevf_dev *hdev = ae_dev->priv; 2121 2122 set_bit(rst_type, &hdev->default_reset_request); 2123 } 2124 2125 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 2126 { 2127 writel(en ? 1 : 0, vector->addr); 2128 } 2129 2130 static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev, 2131 enum hnae3_reset_type rst_type) 2132 { 2133 #define HCLGEVF_RESET_RETRY_WAIT_MS 500 2134 #define HCLGEVF_RESET_RETRY_CNT 5 2135 2136 struct hclgevf_dev *hdev = ae_dev->priv; 2137 int retry_cnt = 0; 2138 int ret; 2139 2140 while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) { 2141 down(&hdev->reset_sem); 2142 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2143 hdev->reset_type = rst_type; 2144 ret = hclgevf_reset_prepare(hdev); 2145 if (!ret && !hdev->reset_pending) 2146 break; 2147 2148 dev_err(&hdev->pdev->dev, 2149 "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n", 2150 ret, hdev->reset_pending, retry_cnt); 2151 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2152 up(&hdev->reset_sem); 2153 msleep(HCLGEVF_RESET_RETRY_WAIT_MS); 2154 } 2155 2156 /* disable misc vector before reset done */ 2157 hclgevf_enable_vector(&hdev->misc_vector, false); 2158 2159 if (hdev->reset_type == HNAE3_FLR_RESET) 2160 hdev->rst_stats.flr_rst_cnt++; 2161 } 2162 2163 static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev) 2164 { 2165 struct hclgevf_dev *hdev = ae_dev->priv; 2166 int ret; 2167 2168 hclgevf_enable_vector(&hdev->misc_vector, true); 2169 2170 ret = hclgevf_reset_rebuild(hdev); 2171 if (ret) 2172 dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 2173 ret); 2174 2175 hdev->reset_type = HNAE3_NONE_RESET; 2176 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2177 up(&hdev->reset_sem); 2178 } 2179 2180 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 2181 { 2182 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2183 2184 return hdev->fw_version; 2185 } 2186 2187 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 2188 { 2189 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 2190 2191 vector->vector_irq = pci_irq_vector(hdev->pdev, 2192 HCLGEVF_MISC_VECTOR_NUM); 2193 vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 2194 /* vector status always valid for Vector 0 */ 2195 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 2196 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 2197 2198 hdev->num_msi_left -= 1; 2199 hdev->num_msi_used += 1; 2200 } 2201 2202 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 2203 { 2204 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2205 test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) && 2206 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 2207 &hdev->state)) 2208 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2209 } 2210 2211 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 2212 { 2213 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2214 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 2215 &hdev->state)) 2216 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2217 } 2218 2219 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 2220 unsigned long delay) 2221 { 2222 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2223 !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2224 mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 2225 } 2226 2227 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 2228 { 2229 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 2230 2231 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 2232 return; 2233 2234 down(&hdev->reset_sem); 2235 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2236 2237 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 2238 &hdev->reset_state)) { 2239 /* PF has intimated that it is about to reset the hardware. 2240 * We now have to poll & check if hardware has actually 2241 * completed the reset sequence. On hardware reset completion, 2242 * VF needs to reset the client and ae device. 2243 */ 2244 hdev->reset_attempts = 0; 2245 2246 hdev->last_reset_time = jiffies; 2247 hdev->reset_type = 2248 hclgevf_get_reset_level(hdev, &hdev->reset_pending); 2249 if (hdev->reset_type != HNAE3_NONE_RESET) 2250 hclgevf_reset(hdev); 2251 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 2252 &hdev->reset_state)) { 2253 /* we could be here when either of below happens: 2254 * 1. reset was initiated due to watchdog timeout caused by 2255 * a. IMP was earlier reset and our TX got choked down and 2256 * which resulted in watchdog reacting and inducing VF 2257 * reset. This also means our cmdq would be unreliable. 2258 * b. problem in TX due to other lower layer(example link 2259 * layer not functioning properly etc.) 2260 * 2. VF reset might have been initiated due to some config 2261 * change. 2262 * 2263 * NOTE: Theres no clear way to detect above cases than to react 2264 * to the response of PF for this reset request. PF will ack the 2265 * 1b and 2. cases but we will not get any intimation about 1a 2266 * from PF as cmdq would be in unreliable state i.e. mailbox 2267 * communication between PF and VF would be broken. 2268 * 2269 * if we are never geting into pending state it means either: 2270 * 1. PF is not receiving our request which could be due to IMP 2271 * reset 2272 * 2. PF is screwed 2273 * We cannot do much for 2. but to check first we can try reset 2274 * our PCIe + stack and see if it alleviates the problem. 2275 */ 2276 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 2277 /* prepare for full reset of stack + pcie interface */ 2278 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 2279 2280 /* "defer" schedule the reset task again */ 2281 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2282 } else { 2283 hdev->reset_attempts++; 2284 2285 set_bit(hdev->reset_level, &hdev->reset_pending); 2286 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2287 } 2288 hclgevf_reset_task_schedule(hdev); 2289 } 2290 2291 hdev->reset_type = HNAE3_NONE_RESET; 2292 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2293 up(&hdev->reset_sem); 2294 } 2295 2296 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 2297 { 2298 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2299 return; 2300 2301 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 2302 return; 2303 2304 hclgevf_mbx_async_handler(hdev); 2305 2306 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2307 } 2308 2309 static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 2310 { 2311 struct hclge_vf_to_pf_msg send_msg; 2312 int ret; 2313 2314 if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) 2315 return; 2316 2317 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 2318 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2319 if (ret) 2320 dev_err(&hdev->pdev->dev, 2321 "VF sends keep alive cmd failed(=%d)\n", ret); 2322 } 2323 2324 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 2325 { 2326 unsigned long delta = round_jiffies_relative(HZ); 2327 struct hnae3_handle *handle = &hdev->nic; 2328 2329 if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2330 return; 2331 2332 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 2333 delta = jiffies - hdev->last_serv_processed; 2334 2335 if (delta < round_jiffies_relative(HZ)) { 2336 delta = round_jiffies_relative(HZ) - delta; 2337 goto out; 2338 } 2339 } 2340 2341 hdev->serv_processed_cnt++; 2342 if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 2343 hclgevf_keep_alive(hdev); 2344 2345 if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 2346 hdev->last_serv_processed = jiffies; 2347 goto out; 2348 } 2349 2350 if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 2351 hclgevf_tqps_update_stats(handle); 2352 2353 /* VF does not need to request link status when this bit is set, because 2354 * PF will push its link status to VFs when link status changed. 2355 */ 2356 if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state)) 2357 hclgevf_request_link_info(hdev); 2358 2359 hclgevf_update_link_mode(hdev); 2360 2361 hclgevf_sync_vlan_filter(hdev); 2362 2363 hclgevf_sync_mac_table(hdev); 2364 2365 hclgevf_sync_promisc_mode(hdev); 2366 2367 hdev->last_serv_processed = jiffies; 2368 2369 out: 2370 hclgevf_task_schedule(hdev, delta); 2371 } 2372 2373 static void hclgevf_service_task(struct work_struct *work) 2374 { 2375 struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 2376 service_task.work); 2377 2378 hclgevf_reset_service_task(hdev); 2379 hclgevf_mailbox_service_task(hdev); 2380 hclgevf_periodic_service_task(hdev); 2381 2382 /* Handle reset and mbx again in case periodical task delays the 2383 * handling by calling hclgevf_task_schedule() in 2384 * hclgevf_periodic_service_task() 2385 */ 2386 hclgevf_reset_service_task(hdev); 2387 hclgevf_mailbox_service_task(hdev); 2388 } 2389 2390 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 2391 { 2392 hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr); 2393 } 2394 2395 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 2396 u32 *clearval) 2397 { 2398 u32 val, cmdq_stat_reg, rst_ing_reg; 2399 2400 /* fetch the events from their corresponding regs */ 2401 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 2402 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG); 2403 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 2404 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2405 dev_info(&hdev->pdev->dev, 2406 "receive reset interrupt 0x%x!\n", rst_ing_reg); 2407 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 2408 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2409 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 2410 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 2411 hdev->rst_stats.vf_rst_cnt++; 2412 /* set up VF hardware reset status, its PF will clear 2413 * this status when PF has initialized done. 2414 */ 2415 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 2416 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 2417 val | HCLGEVF_VF_RST_ING_BIT); 2418 return HCLGEVF_VECTOR0_EVENT_RST; 2419 } 2420 2421 /* check for vector0 mailbox(=CMDQ RX) event source */ 2422 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 2423 /* for revision 0x21, clearing interrupt is writing bit 0 2424 * to the clear register, writing bit 1 means to keep the 2425 * old value. 2426 * for revision 0x20, the clear register is a read & write 2427 * register, so we should just write 0 to the bit we are 2428 * handling, and keep other bits as cmdq_stat_reg. 2429 */ 2430 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) 2431 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2432 else 2433 *clearval = cmdq_stat_reg & 2434 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2435 2436 return HCLGEVF_VECTOR0_EVENT_MBX; 2437 } 2438 2439 /* print other vector0 event source */ 2440 dev_info(&hdev->pdev->dev, 2441 "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2442 cmdq_stat_reg); 2443 2444 return HCLGEVF_VECTOR0_EVENT_OTHER; 2445 } 2446 2447 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2448 { 2449 enum hclgevf_evt_cause event_cause; 2450 struct hclgevf_dev *hdev = data; 2451 u32 clearval; 2452 2453 hclgevf_enable_vector(&hdev->misc_vector, false); 2454 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2455 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) 2456 hclgevf_clear_event_cause(hdev, clearval); 2457 2458 switch (event_cause) { 2459 case HCLGEVF_VECTOR0_EVENT_RST: 2460 hclgevf_reset_task_schedule(hdev); 2461 break; 2462 case HCLGEVF_VECTOR0_EVENT_MBX: 2463 hclgevf_mbx_handler(hdev); 2464 break; 2465 default: 2466 break; 2467 } 2468 2469 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) 2470 hclgevf_enable_vector(&hdev->misc_vector, true); 2471 2472 return IRQ_HANDLED; 2473 } 2474 2475 static int hclgevf_configure(struct hclgevf_dev *hdev) 2476 { 2477 int ret; 2478 2479 hdev->gro_en = true; 2480 2481 ret = hclgevf_get_basic_info(hdev); 2482 if (ret) 2483 return ret; 2484 2485 /* get current port based vlan state from PF */ 2486 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2487 if (ret) 2488 return ret; 2489 2490 /* get queue configuration from PF */ 2491 ret = hclgevf_get_queue_info(hdev); 2492 if (ret) 2493 return ret; 2494 2495 /* get queue depth info from PF */ 2496 ret = hclgevf_get_queue_depth(hdev); 2497 if (ret) 2498 return ret; 2499 2500 return hclgevf_get_pf_media_type(hdev); 2501 } 2502 2503 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 2504 { 2505 struct pci_dev *pdev = ae_dev->pdev; 2506 struct hclgevf_dev *hdev; 2507 2508 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 2509 if (!hdev) 2510 return -ENOMEM; 2511 2512 hdev->pdev = pdev; 2513 hdev->ae_dev = ae_dev; 2514 ae_dev->priv = hdev; 2515 2516 return 0; 2517 } 2518 2519 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2520 { 2521 struct hnae3_handle *roce = &hdev->roce; 2522 struct hnae3_handle *nic = &hdev->nic; 2523 2524 roce->rinfo.num_vectors = hdev->num_roce_msix; 2525 2526 if (hdev->num_msi_left < roce->rinfo.num_vectors || 2527 hdev->num_msi_left == 0) 2528 return -EINVAL; 2529 2530 roce->rinfo.base_vector = hdev->roce_base_msix_offset; 2531 2532 roce->rinfo.netdev = nic->kinfo.netdev; 2533 roce->rinfo.roce_io_base = hdev->hw.hw.io_base; 2534 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; 2535 2536 roce->pdev = nic->pdev; 2537 roce->ae_algo = nic->ae_algo; 2538 roce->numa_node_mask = nic->numa_node_mask; 2539 2540 return 0; 2541 } 2542 2543 static int hclgevf_config_gro(struct hclgevf_dev *hdev) 2544 { 2545 struct hclgevf_cfg_gro_status_cmd *req; 2546 struct hclge_desc desc; 2547 int ret; 2548 2549 if (!hnae3_dev_gro_supported(hdev)) 2550 return 0; 2551 2552 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2553 false); 2554 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2555 2556 req->gro_en = hdev->gro_en ? 1 : 0; 2557 2558 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2559 if (ret) 2560 dev_err(&hdev->pdev->dev, 2561 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2562 2563 return ret; 2564 } 2565 2566 static int hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) 2567 { 2568 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size; 2569 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 2570 struct hclge_comm_rss_tuple_cfg *tuple_sets; 2571 u32 i; 2572 2573 rss_cfg->rss_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 2574 rss_cfg->rss_size = hdev->nic.kinfo.rss_size; 2575 tuple_sets = &rss_cfg->rss_tuple_sets; 2576 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2577 u16 *rss_ind_tbl; 2578 2579 rss_cfg->rss_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 2580 2581 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size, 2582 sizeof(*rss_ind_tbl), GFP_KERNEL); 2583 if (!rss_ind_tbl) 2584 return -ENOMEM; 2585 2586 rss_cfg->rss_indirection_tbl = rss_ind_tbl; 2587 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2588 HCLGEVF_RSS_KEY_SIZE); 2589 2590 tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2591 tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2592 tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2593 tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2594 tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2595 tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2596 tuple_sets->ipv6_sctp_en = 2597 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ? 2598 HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT : 2599 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2600 tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2601 } 2602 2603 /* Initialize RSS indirect table */ 2604 for (i = 0; i < rss_ind_tbl_size; i++) 2605 rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 2606 2607 return 0; 2608 } 2609 2610 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2611 { 2612 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 2613 int ret; 2614 2615 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2616 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->rss_algo, 2617 rss_cfg->rss_hash_key); 2618 if (ret) 2619 return ret; 2620 2621 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2622 if (ret) 2623 return ret; 2624 } 2625 2626 ret = hclgevf_set_rss_indir_table(hdev); 2627 if (ret) 2628 return ret; 2629 2630 return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); 2631 } 2632 2633 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2634 { 2635 struct hnae3_handle *nic = &hdev->nic; 2636 int ret; 2637 2638 ret = hclgevf_en_hw_strip_rxvtag(nic, true); 2639 if (ret) { 2640 dev_err(&hdev->pdev->dev, 2641 "failed to enable rx vlan offload, ret = %d\n", ret); 2642 return ret; 2643 } 2644 2645 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2646 false); 2647 } 2648 2649 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2650 { 2651 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2652 2653 unsigned long last = hdev->serv_processed_cnt; 2654 int i = 0; 2655 2656 while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2657 i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2658 last == hdev->serv_processed_cnt) 2659 usleep_range(1, 1); 2660 } 2661 2662 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2663 { 2664 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2665 2666 if (enable) { 2667 hclgevf_task_schedule(hdev, 0); 2668 } else { 2669 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2670 2671 /* flush memory to make sure DOWN is seen by service task */ 2672 smp_mb__before_atomic(); 2673 hclgevf_flush_link_update(hdev); 2674 } 2675 } 2676 2677 static int hclgevf_ae_start(struct hnae3_handle *handle) 2678 { 2679 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2680 2681 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2682 clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state); 2683 2684 hclgevf_reset_tqp_stats(handle); 2685 2686 hclgevf_request_link_info(hdev); 2687 2688 hclgevf_update_link_mode(hdev); 2689 2690 return 0; 2691 } 2692 2693 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2694 { 2695 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2696 2697 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2698 2699 if (hdev->reset_type != HNAE3_VF_RESET) 2700 hclgevf_reset_tqp(handle); 2701 2702 hclgevf_reset_tqp_stats(handle); 2703 hclgevf_update_link_status(hdev, 0); 2704 } 2705 2706 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2707 { 2708 #define HCLGEVF_STATE_ALIVE 1 2709 #define HCLGEVF_STATE_NOT_ALIVE 0 2710 2711 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2712 struct hclge_vf_to_pf_msg send_msg; 2713 2714 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2715 send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2716 HCLGEVF_STATE_NOT_ALIVE; 2717 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2718 } 2719 2720 static int hclgevf_client_start(struct hnae3_handle *handle) 2721 { 2722 return hclgevf_set_alive(handle, true); 2723 } 2724 2725 static void hclgevf_client_stop(struct hnae3_handle *handle) 2726 { 2727 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2728 int ret; 2729 2730 ret = hclgevf_set_alive(handle, false); 2731 if (ret) 2732 dev_warn(&hdev->pdev->dev, 2733 "%s failed %d\n", __func__, ret); 2734 } 2735 2736 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2737 { 2738 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2739 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2740 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2741 2742 INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 2743 2744 mutex_init(&hdev->mbx_resp.mbx_mutex); 2745 sema_init(&hdev->reset_sem, 1); 2746 2747 spin_lock_init(&hdev->mac_table.mac_list_lock); 2748 INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); 2749 INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); 2750 2751 /* bring the device down */ 2752 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2753 } 2754 2755 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2756 { 2757 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2758 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2759 2760 if (hdev->service_task.work.func) 2761 cancel_delayed_work_sync(&hdev->service_task); 2762 2763 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2764 } 2765 2766 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2767 { 2768 struct pci_dev *pdev = hdev->pdev; 2769 int vectors; 2770 int i; 2771 2772 if (hnae3_dev_roce_supported(hdev)) 2773 vectors = pci_alloc_irq_vectors(pdev, 2774 hdev->roce_base_msix_offset + 1, 2775 hdev->num_msi, 2776 PCI_IRQ_MSIX); 2777 else 2778 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2779 hdev->num_msi, 2780 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2781 2782 if (vectors < 0) { 2783 dev_err(&pdev->dev, 2784 "failed(%d) to allocate MSI/MSI-X vectors\n", 2785 vectors); 2786 return vectors; 2787 } 2788 if (vectors < hdev->num_msi) 2789 dev_warn(&hdev->pdev->dev, 2790 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2791 hdev->num_msi, vectors); 2792 2793 hdev->num_msi = vectors; 2794 hdev->num_msi_left = vectors; 2795 2796 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2797 sizeof(u16), GFP_KERNEL); 2798 if (!hdev->vector_status) { 2799 pci_free_irq_vectors(pdev); 2800 return -ENOMEM; 2801 } 2802 2803 for (i = 0; i < hdev->num_msi; i++) 2804 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2805 2806 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2807 sizeof(int), GFP_KERNEL); 2808 if (!hdev->vector_irq) { 2809 devm_kfree(&pdev->dev, hdev->vector_status); 2810 pci_free_irq_vectors(pdev); 2811 return -ENOMEM; 2812 } 2813 2814 return 0; 2815 } 2816 2817 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2818 { 2819 struct pci_dev *pdev = hdev->pdev; 2820 2821 devm_kfree(&pdev->dev, hdev->vector_status); 2822 devm_kfree(&pdev->dev, hdev->vector_irq); 2823 pci_free_irq_vectors(pdev); 2824 } 2825 2826 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2827 { 2828 int ret; 2829 2830 hclgevf_get_misc_vector(hdev); 2831 2832 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2833 HCLGEVF_NAME, pci_name(hdev->pdev)); 2834 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2835 0, hdev->misc_vector.name, hdev); 2836 if (ret) { 2837 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2838 hdev->misc_vector.vector_irq); 2839 return ret; 2840 } 2841 2842 hclgevf_clear_event_cause(hdev, 0); 2843 2844 /* enable misc. vector(vector 0) */ 2845 hclgevf_enable_vector(&hdev->misc_vector, true); 2846 2847 return ret; 2848 } 2849 2850 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2851 { 2852 /* disable misc vector(vector 0) */ 2853 hclgevf_enable_vector(&hdev->misc_vector, false); 2854 synchronize_irq(hdev->misc_vector.vector_irq); 2855 free_irq(hdev->misc_vector.vector_irq, hdev); 2856 hclgevf_free_vector(hdev, 0); 2857 } 2858 2859 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2860 { 2861 struct device *dev = &hdev->pdev->dev; 2862 2863 dev_info(dev, "VF info begin:\n"); 2864 2865 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2866 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2867 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2868 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2869 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2870 dev_info(dev, "PF media type of this VF: %u\n", 2871 hdev->hw.mac.media_type); 2872 2873 dev_info(dev, "VF info end.\n"); 2874 } 2875 2876 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2877 struct hnae3_client *client) 2878 { 2879 struct hclgevf_dev *hdev = ae_dev->priv; 2880 int rst_cnt = hdev->rst_stats.rst_cnt; 2881 int ret; 2882 2883 ret = client->ops->init_instance(&hdev->nic); 2884 if (ret) 2885 return ret; 2886 2887 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2888 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 2889 rst_cnt != hdev->rst_stats.rst_cnt) { 2890 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2891 2892 client->ops->uninit_instance(&hdev->nic, 0); 2893 return -EBUSY; 2894 } 2895 2896 hnae3_set_client_init_flag(client, ae_dev, 1); 2897 2898 if (netif_msg_drv(&hdev->nic)) 2899 hclgevf_info_show(hdev); 2900 2901 return 0; 2902 } 2903 2904 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2905 struct hnae3_client *client) 2906 { 2907 struct hclgevf_dev *hdev = ae_dev->priv; 2908 int ret; 2909 2910 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2911 !hdev->nic_client) 2912 return 0; 2913 2914 ret = hclgevf_init_roce_base_info(hdev); 2915 if (ret) 2916 return ret; 2917 2918 ret = client->ops->init_instance(&hdev->roce); 2919 if (ret) 2920 return ret; 2921 2922 set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2923 hnae3_set_client_init_flag(client, ae_dev, 1); 2924 2925 return 0; 2926 } 2927 2928 static int hclgevf_init_client_instance(struct hnae3_client *client, 2929 struct hnae3_ae_dev *ae_dev) 2930 { 2931 struct hclgevf_dev *hdev = ae_dev->priv; 2932 int ret; 2933 2934 switch (client->type) { 2935 case HNAE3_CLIENT_KNIC: 2936 hdev->nic_client = client; 2937 hdev->nic.client = client; 2938 2939 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2940 if (ret) 2941 goto clear_nic; 2942 2943 ret = hclgevf_init_roce_client_instance(ae_dev, 2944 hdev->roce_client); 2945 if (ret) 2946 goto clear_roce; 2947 2948 break; 2949 case HNAE3_CLIENT_ROCE: 2950 if (hnae3_dev_roce_supported(hdev)) { 2951 hdev->roce_client = client; 2952 hdev->roce.client = client; 2953 } 2954 2955 ret = hclgevf_init_roce_client_instance(ae_dev, client); 2956 if (ret) 2957 goto clear_roce; 2958 2959 break; 2960 default: 2961 return -EINVAL; 2962 } 2963 2964 return 0; 2965 2966 clear_nic: 2967 hdev->nic_client = NULL; 2968 hdev->nic.client = NULL; 2969 return ret; 2970 clear_roce: 2971 hdev->roce_client = NULL; 2972 hdev->roce.client = NULL; 2973 return ret; 2974 } 2975 2976 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2977 struct hnae3_ae_dev *ae_dev) 2978 { 2979 struct hclgevf_dev *hdev = ae_dev->priv; 2980 2981 /* un-init roce, if it exists */ 2982 if (hdev->roce_client) { 2983 while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 2984 msleep(HCLGEVF_WAIT_RESET_DONE); 2985 clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2986 2987 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2988 hdev->roce_client = NULL; 2989 hdev->roce.client = NULL; 2990 } 2991 2992 /* un-init nic/unic, if this was not called by roce client */ 2993 if (client->ops->uninit_instance && hdev->nic_client && 2994 client->type != HNAE3_CLIENT_ROCE) { 2995 while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 2996 msleep(HCLGEVF_WAIT_RESET_DONE); 2997 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2998 2999 client->ops->uninit_instance(&hdev->nic, 0); 3000 hdev->nic_client = NULL; 3001 hdev->nic.client = NULL; 3002 } 3003 } 3004 3005 static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev) 3006 { 3007 #define HCLGEVF_MEM_BAR 4 3008 3009 struct pci_dev *pdev = hdev->pdev; 3010 struct hclgevf_hw *hw = &hdev->hw; 3011 3012 /* for device does not have device memory, return directly */ 3013 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR))) 3014 return 0; 3015 3016 hw->hw.mem_base = 3017 devm_ioremap_wc(&pdev->dev, 3018 pci_resource_start(pdev, HCLGEVF_MEM_BAR), 3019 pci_resource_len(pdev, HCLGEVF_MEM_BAR)); 3020 if (!hw->hw.mem_base) { 3021 dev_err(&pdev->dev, "failed to map device memory\n"); 3022 return -EFAULT; 3023 } 3024 3025 return 0; 3026 } 3027 3028 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 3029 { 3030 struct pci_dev *pdev = hdev->pdev; 3031 struct hclgevf_hw *hw; 3032 int ret; 3033 3034 ret = pci_enable_device(pdev); 3035 if (ret) { 3036 dev_err(&pdev->dev, "failed to enable PCI device\n"); 3037 return ret; 3038 } 3039 3040 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3041 if (ret) { 3042 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 3043 goto err_disable_device; 3044 } 3045 3046 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 3047 if (ret) { 3048 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 3049 goto err_disable_device; 3050 } 3051 3052 pci_set_master(pdev); 3053 hw = &hdev->hw; 3054 hw->hw.io_base = pci_iomap(pdev, 2, 0); 3055 if (!hw->hw.io_base) { 3056 dev_err(&pdev->dev, "can't map configuration register space\n"); 3057 ret = -ENOMEM; 3058 goto err_clr_master; 3059 } 3060 3061 ret = hclgevf_dev_mem_map(hdev); 3062 if (ret) 3063 goto err_unmap_io_base; 3064 3065 return 0; 3066 3067 err_unmap_io_base: 3068 pci_iounmap(pdev, hdev->hw.hw.io_base); 3069 err_clr_master: 3070 pci_clear_master(pdev); 3071 pci_release_regions(pdev); 3072 err_disable_device: 3073 pci_disable_device(pdev); 3074 3075 return ret; 3076 } 3077 3078 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 3079 { 3080 struct pci_dev *pdev = hdev->pdev; 3081 3082 if (hdev->hw.hw.mem_base) 3083 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); 3084 3085 pci_iounmap(pdev, hdev->hw.hw.io_base); 3086 pci_clear_master(pdev); 3087 pci_release_regions(pdev); 3088 pci_disable_device(pdev); 3089 } 3090 3091 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 3092 { 3093 struct hclgevf_query_res_cmd *req; 3094 struct hclge_desc desc; 3095 int ret; 3096 3097 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 3098 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 3099 if (ret) { 3100 dev_err(&hdev->pdev->dev, 3101 "query vf resource failed, ret = %d.\n", ret); 3102 return ret; 3103 } 3104 3105 req = (struct hclgevf_query_res_cmd *)desc.data; 3106 3107 if (hnae3_dev_roce_supported(hdev)) { 3108 hdev->roce_base_msix_offset = 3109 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 3110 HCLGEVF_MSIX_OFT_ROCEE_M, 3111 HCLGEVF_MSIX_OFT_ROCEE_S); 3112 hdev->num_roce_msix = 3113 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 3114 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 3115 3116 /* nic's msix numbers is always equals to the roce's. */ 3117 hdev->num_nic_msix = hdev->num_roce_msix; 3118 3119 /* VF should have NIC vectors and Roce vectors, NIC vectors 3120 * are queued before Roce vectors. The offset is fixed to 64. 3121 */ 3122 hdev->num_msi = hdev->num_roce_msix + 3123 hdev->roce_base_msix_offset; 3124 } else { 3125 hdev->num_msi = 3126 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 3127 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 3128 3129 hdev->num_nic_msix = hdev->num_msi; 3130 } 3131 3132 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 3133 dev_err(&hdev->pdev->dev, 3134 "Just %u msi resources, not enough for vf(min:2).\n", 3135 hdev->num_nic_msix); 3136 return -EINVAL; 3137 } 3138 3139 return 0; 3140 } 3141 3142 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) 3143 { 3144 #define HCLGEVF_MAX_NON_TSO_BD_NUM 8U 3145 3146 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3147 3148 ae_dev->dev_specs.max_non_tso_bd_num = 3149 HCLGEVF_MAX_NON_TSO_BD_NUM; 3150 ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 3151 ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE; 3152 ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 3153 ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME; 3154 } 3155 3156 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, 3157 struct hclge_desc *desc) 3158 { 3159 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3160 struct hclgevf_dev_specs_0_cmd *req0; 3161 struct hclgevf_dev_specs_1_cmd *req1; 3162 3163 req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data; 3164 req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data; 3165 3166 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; 3167 ae_dev->dev_specs.rss_ind_tbl_size = 3168 le16_to_cpu(req0->rss_ind_tbl_size); 3169 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); 3170 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); 3171 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); 3172 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); 3173 } 3174 3175 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) 3176 { 3177 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; 3178 3179 if (!dev_specs->max_non_tso_bd_num) 3180 dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM; 3181 if (!dev_specs->rss_ind_tbl_size) 3182 dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 3183 if (!dev_specs->rss_key_size) 3184 dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE; 3185 if (!dev_specs->max_int_gl) 3186 dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 3187 if (!dev_specs->max_frm_size) 3188 dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME; 3189 } 3190 3191 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) 3192 { 3193 struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; 3194 int ret; 3195 int i; 3196 3197 /* set default specifications as devices lower than version V3 do not 3198 * support querying specifications from firmware. 3199 */ 3200 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 3201 hclgevf_set_default_dev_specs(hdev); 3202 return 0; 3203 } 3204 3205 for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 3206 hclgevf_cmd_setup_basic_desc(&desc[i], 3207 HCLGEVF_OPC_QUERY_DEV_SPECS, true); 3208 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 3209 } 3210 hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS, 3211 true); 3212 3213 ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM); 3214 if (ret) 3215 return ret; 3216 3217 hclgevf_parse_dev_specs(hdev, desc); 3218 hclgevf_check_dev_specs(hdev); 3219 3220 return 0; 3221 } 3222 3223 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 3224 { 3225 struct pci_dev *pdev = hdev->pdev; 3226 int ret = 0; 3227 3228 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 3229 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3230 hclgevf_misc_irq_uninit(hdev); 3231 hclgevf_uninit_msi(hdev); 3232 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3233 } 3234 3235 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3236 pci_set_master(pdev); 3237 ret = hclgevf_init_msi(hdev); 3238 if (ret) { 3239 dev_err(&pdev->dev, 3240 "failed(%d) to init MSI/MSI-X\n", ret); 3241 return ret; 3242 } 3243 3244 ret = hclgevf_misc_irq_init(hdev); 3245 if (ret) { 3246 hclgevf_uninit_msi(hdev); 3247 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 3248 ret); 3249 return ret; 3250 } 3251 3252 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3253 } 3254 3255 return ret; 3256 } 3257 3258 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) 3259 { 3260 struct hclge_vf_to_pf_msg send_msg; 3261 3262 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, 3263 HCLGE_MBX_VPORT_LIST_CLEAR); 3264 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3265 } 3266 3267 static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev) 3268 { 3269 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 3270 hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1); 3271 } 3272 3273 static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev) 3274 { 3275 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 3276 hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0); 3277 } 3278 3279 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 3280 { 3281 struct pci_dev *pdev = hdev->pdev; 3282 int ret; 3283 3284 ret = hclgevf_pci_reset(hdev); 3285 if (ret) { 3286 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 3287 return ret; 3288 } 3289 3290 hclgevf_arq_init(hdev); 3291 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, 3292 &hdev->fw_version, false, 3293 hdev->reset_pending); 3294 if (ret) { 3295 dev_err(&pdev->dev, "cmd failed %d\n", ret); 3296 return ret; 3297 } 3298 3299 ret = hclgevf_rss_init_hw(hdev); 3300 if (ret) { 3301 dev_err(&hdev->pdev->dev, 3302 "failed(%d) to initialize RSS\n", ret); 3303 return ret; 3304 } 3305 3306 ret = hclgevf_config_gro(hdev); 3307 if (ret) 3308 return ret; 3309 3310 ret = hclgevf_init_vlan_config(hdev); 3311 if (ret) { 3312 dev_err(&hdev->pdev->dev, 3313 "failed(%d) to initialize VLAN config\n", ret); 3314 return ret; 3315 } 3316 3317 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 3318 3319 hclgevf_init_rxd_adv_layout(hdev); 3320 3321 dev_info(&hdev->pdev->dev, "Reset done\n"); 3322 3323 return 0; 3324 } 3325 3326 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 3327 { 3328 struct pci_dev *pdev = hdev->pdev; 3329 int ret; 3330 3331 ret = hclgevf_pci_init(hdev); 3332 if (ret) 3333 return ret; 3334 3335 ret = hclgevf_devlink_init(hdev); 3336 if (ret) 3337 goto err_devlink_init; 3338 3339 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); 3340 if (ret) 3341 goto err_cmd_queue_init; 3342 3343 hclgevf_arq_init(hdev); 3344 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, 3345 &hdev->fw_version, false, 3346 hdev->reset_pending); 3347 if (ret) 3348 goto err_cmd_init; 3349 3350 /* Get vf resource */ 3351 ret = hclgevf_query_vf_resource(hdev); 3352 if (ret) 3353 goto err_cmd_init; 3354 3355 ret = hclgevf_query_dev_specs(hdev); 3356 if (ret) { 3357 dev_err(&pdev->dev, 3358 "failed to query dev specifications, ret = %d\n", ret); 3359 goto err_cmd_init; 3360 } 3361 3362 ret = hclgevf_init_msi(hdev); 3363 if (ret) { 3364 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 3365 goto err_cmd_init; 3366 } 3367 3368 hclgevf_state_init(hdev); 3369 hdev->reset_level = HNAE3_VF_FUNC_RESET; 3370 hdev->reset_type = HNAE3_NONE_RESET; 3371 3372 ret = hclgevf_misc_irq_init(hdev); 3373 if (ret) 3374 goto err_misc_irq_init; 3375 3376 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3377 3378 ret = hclgevf_configure(hdev); 3379 if (ret) { 3380 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 3381 goto err_config; 3382 } 3383 3384 ret = hclgevf_alloc_tqps(hdev); 3385 if (ret) { 3386 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 3387 goto err_config; 3388 } 3389 3390 ret = hclgevf_set_handle_info(hdev); 3391 if (ret) 3392 goto err_config; 3393 3394 ret = hclgevf_config_gro(hdev); 3395 if (ret) 3396 goto err_config; 3397 3398 /* Initialize RSS for this VF */ 3399 ret = hclgevf_rss_init_cfg(hdev); 3400 if (ret) { 3401 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); 3402 goto err_config; 3403 } 3404 3405 ret = hclgevf_rss_init_hw(hdev); 3406 if (ret) { 3407 dev_err(&hdev->pdev->dev, 3408 "failed(%d) to initialize RSS\n", ret); 3409 goto err_config; 3410 } 3411 3412 /* ensure vf tbl list as empty before init*/ 3413 ret = hclgevf_clear_vport_list(hdev); 3414 if (ret) { 3415 dev_err(&pdev->dev, 3416 "failed to clear tbl list configuration, ret = %d.\n", 3417 ret); 3418 goto err_config; 3419 } 3420 3421 ret = hclgevf_init_vlan_config(hdev); 3422 if (ret) { 3423 dev_err(&hdev->pdev->dev, 3424 "failed(%d) to initialize VLAN config\n", ret); 3425 goto err_config; 3426 } 3427 3428 hclgevf_init_rxd_adv_layout(hdev); 3429 3430 set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state); 3431 3432 hdev->last_reset_time = jiffies; 3433 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 3434 HCLGEVF_DRIVER_NAME); 3435 3436 hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 3437 3438 return 0; 3439 3440 err_config: 3441 hclgevf_misc_irq_uninit(hdev); 3442 err_misc_irq_init: 3443 hclgevf_state_uninit(hdev); 3444 hclgevf_uninit_msi(hdev); 3445 err_cmd_init: 3446 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 3447 err_cmd_queue_init: 3448 hclgevf_devlink_uninit(hdev); 3449 err_devlink_init: 3450 hclgevf_pci_uninit(hdev); 3451 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3452 return ret; 3453 } 3454 3455 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 3456 { 3457 struct hclge_vf_to_pf_msg send_msg; 3458 3459 hclgevf_state_uninit(hdev); 3460 hclgevf_uninit_rxd_adv_layout(hdev); 3461 3462 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 3463 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3464 3465 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3466 hclgevf_misc_irq_uninit(hdev); 3467 hclgevf_uninit_msi(hdev); 3468 } 3469 3470 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 3471 hclgevf_devlink_uninit(hdev); 3472 hclgevf_pci_uninit(hdev); 3473 hclgevf_uninit_mac_list(hdev); 3474 } 3475 3476 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 3477 { 3478 struct pci_dev *pdev = ae_dev->pdev; 3479 int ret; 3480 3481 ret = hclgevf_alloc_hdev(ae_dev); 3482 if (ret) { 3483 dev_err(&pdev->dev, "hclge device allocation failed\n"); 3484 return ret; 3485 } 3486 3487 ret = hclgevf_init_hdev(ae_dev->priv); 3488 if (ret) { 3489 dev_err(&pdev->dev, "hclge device initialization failed\n"); 3490 return ret; 3491 } 3492 3493 return 0; 3494 } 3495 3496 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 3497 { 3498 struct hclgevf_dev *hdev = ae_dev->priv; 3499 3500 hclgevf_uninit_hdev(hdev); 3501 ae_dev->priv = NULL; 3502 } 3503 3504 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3505 { 3506 struct hnae3_handle *nic = &hdev->nic; 3507 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3508 3509 return min_t(u32, hdev->rss_size_max, 3510 hdev->num_tqps / kinfo->tc_info.num_tc); 3511 } 3512 3513 /** 3514 * hclgevf_get_channels - Get the current channels enabled and max supported. 3515 * @handle: hardware information for network interface 3516 * @ch: ethtool channels structure 3517 * 3518 * We don't support separate tx and rx queues as channels. The other count 3519 * represents how many queues are being used for control. max_combined counts 3520 * how many queue pairs we can support. They may not be mapped 1 to 1 with 3521 * q_vectors since we support a lot more queue pairs than q_vectors. 3522 **/ 3523 static void hclgevf_get_channels(struct hnae3_handle *handle, 3524 struct ethtool_channels *ch) 3525 { 3526 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3527 3528 ch->max_combined = hclgevf_get_max_channels(hdev); 3529 ch->other_count = 0; 3530 ch->max_other = 0; 3531 ch->combined_count = handle->kinfo.rss_size; 3532 } 3533 3534 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 3535 u16 *alloc_tqps, u16 *max_rss_size) 3536 { 3537 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3538 3539 *alloc_tqps = hdev->num_tqps; 3540 *max_rss_size = hdev->rss_size_max; 3541 } 3542 3543 static void hclgevf_update_rss_size(struct hnae3_handle *handle, 3544 u32 new_tqps_num) 3545 { 3546 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3547 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3548 u16 max_rss_size; 3549 3550 kinfo->req_rss_size = new_tqps_num; 3551 3552 max_rss_size = min_t(u16, hdev->rss_size_max, 3553 hdev->num_tqps / kinfo->tc_info.num_tc); 3554 3555 /* Use the user's configuration when it is not larger than 3556 * max_rss_size, otherwise, use the maximum specification value. 3557 */ 3558 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 3559 kinfo->req_rss_size <= max_rss_size) 3560 kinfo->rss_size = kinfo->req_rss_size; 3561 else if (kinfo->rss_size > max_rss_size || 3562 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 3563 kinfo->rss_size = max_rss_size; 3564 3565 kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size; 3566 } 3567 3568 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 3569 bool rxfh_configured) 3570 { 3571 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3572 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3573 u16 cur_rss_size = kinfo->rss_size; 3574 u16 cur_tqps = kinfo->num_tqps; 3575 u32 *rss_indir; 3576 unsigned int i; 3577 int ret; 3578 3579 hclgevf_update_rss_size(handle, new_tqps_num); 3580 3581 ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); 3582 if (ret) 3583 return ret; 3584 3585 /* RSS indirection table has been configured by user */ 3586 if (rxfh_configured) 3587 goto out; 3588 3589 /* Reinitializes the rss indirect table according to the new RSS size */ 3590 rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size, 3591 sizeof(u32), GFP_KERNEL); 3592 if (!rss_indir) 3593 return -ENOMEM; 3594 3595 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 3596 rss_indir[i] = i % kinfo->rss_size; 3597 3598 hdev->rss_cfg.rss_size = kinfo->rss_size; 3599 3600 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 3601 if (ret) 3602 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 3603 ret); 3604 3605 kfree(rss_indir); 3606 3607 out: 3608 if (!ret) 3609 dev_info(&hdev->pdev->dev, 3610 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 3611 cur_rss_size, kinfo->rss_size, 3612 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); 3613 3614 return ret; 3615 } 3616 3617 static int hclgevf_get_status(struct hnae3_handle *handle) 3618 { 3619 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3620 3621 return hdev->hw.mac.link; 3622 } 3623 3624 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 3625 u8 *auto_neg, u32 *speed, 3626 u8 *duplex) 3627 { 3628 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3629 3630 if (speed) 3631 *speed = hdev->hw.mac.speed; 3632 if (duplex) 3633 *duplex = hdev->hw.mac.duplex; 3634 if (auto_neg) 3635 *auto_neg = AUTONEG_DISABLE; 3636 } 3637 3638 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 3639 u8 duplex) 3640 { 3641 hdev->hw.mac.speed = speed; 3642 hdev->hw.mac.duplex = duplex; 3643 } 3644 3645 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 3646 { 3647 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3648 bool gro_en_old = hdev->gro_en; 3649 int ret; 3650 3651 hdev->gro_en = enable; 3652 ret = hclgevf_config_gro(hdev); 3653 if (ret) 3654 hdev->gro_en = gro_en_old; 3655 3656 return ret; 3657 } 3658 3659 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 3660 u8 *module_type) 3661 { 3662 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3663 3664 if (media_type) 3665 *media_type = hdev->hw.mac.media_type; 3666 3667 if (module_type) 3668 *module_type = hdev->hw.mac.module_type; 3669 } 3670 3671 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 3672 { 3673 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3674 3675 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 3676 } 3677 3678 static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle) 3679 { 3680 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3681 3682 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 3683 } 3684 3685 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 3686 { 3687 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3688 3689 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 3690 } 3691 3692 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 3693 { 3694 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3695 3696 return hdev->rst_stats.hw_rst_done_cnt; 3697 } 3698 3699 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 3700 unsigned long *supported, 3701 unsigned long *advertising) 3702 { 3703 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3704 3705 *supported = hdev->hw.mac.supported; 3706 *advertising = hdev->hw.mac.advertising; 3707 } 3708 3709 #define MAX_SEPARATE_NUM 4 3710 #define SEPARATOR_VALUE 0xFDFCFBFA 3711 #define REG_NUM_PER_LINE 4 3712 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 3713 3714 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 3715 { 3716 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 3717 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3718 3719 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 3720 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 3721 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 3722 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 3723 3724 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 3725 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 3726 } 3727 3728 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 3729 void *data) 3730 { 3731 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3732 int i, j, reg_um, separator_num; 3733 u32 *reg = data; 3734 3735 *version = hdev->fw_version; 3736 3737 /* fetching per-VF registers values from VF PCIe register space */ 3738 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 3739 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3740 for (i = 0; i < reg_um; i++) 3741 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 3742 for (i = 0; i < separator_num; i++) 3743 *reg++ = SEPARATOR_VALUE; 3744 3745 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 3746 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3747 for (i = 0; i < reg_um; i++) 3748 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 3749 for (i = 0; i < separator_num; i++) 3750 *reg++ = SEPARATOR_VALUE; 3751 3752 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 3753 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3754 for (j = 0; j < hdev->num_tqps; j++) { 3755 for (i = 0; i < reg_um; i++) 3756 *reg++ = hclgevf_read_dev(&hdev->hw, 3757 ring_reg_addr_list[i] + 3758 0x200 * j); 3759 for (i = 0; i < separator_num; i++) 3760 *reg++ = SEPARATOR_VALUE; 3761 } 3762 3763 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 3764 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3765 for (j = 0; j < hdev->num_msi_used - 1; j++) { 3766 for (i = 0; i < reg_um; i++) 3767 *reg++ = hclgevf_read_dev(&hdev->hw, 3768 tqp_intr_reg_addr_list[i] + 3769 4 * j); 3770 for (i = 0; i < separator_num; i++) 3771 *reg++ = SEPARATOR_VALUE; 3772 } 3773 } 3774 3775 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 3776 u8 *port_base_vlan_info, u8 data_size) 3777 { 3778 struct hnae3_handle *nic = &hdev->nic; 3779 struct hclge_vf_to_pf_msg send_msg; 3780 int ret; 3781 3782 rtnl_lock(); 3783 3784 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 3785 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { 3786 dev_warn(&hdev->pdev->dev, 3787 "is resetting when updating port based vlan info\n"); 3788 rtnl_unlock(); 3789 return; 3790 } 3791 3792 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3793 if (ret) { 3794 rtnl_unlock(); 3795 return; 3796 } 3797 3798 /* send msg to PF and wait update port based vlan info */ 3799 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3800 HCLGE_MBX_PORT_BASE_VLAN_CFG); 3801 memcpy(send_msg.data, port_base_vlan_info, data_size); 3802 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3803 if (!ret) { 3804 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3805 nic->port_base_vlan_state = state; 3806 else 3807 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3808 } 3809 3810 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 3811 rtnl_unlock(); 3812 } 3813 3814 static const struct hnae3_ae_ops hclgevf_ops = { 3815 .init_ae_dev = hclgevf_init_ae_dev, 3816 .uninit_ae_dev = hclgevf_uninit_ae_dev, 3817 .reset_prepare = hclgevf_reset_prepare_general, 3818 .reset_done = hclgevf_reset_done, 3819 .init_client_instance = hclgevf_init_client_instance, 3820 .uninit_client_instance = hclgevf_uninit_client_instance, 3821 .start = hclgevf_ae_start, 3822 .stop = hclgevf_ae_stop, 3823 .client_start = hclgevf_client_start, 3824 .client_stop = hclgevf_client_stop, 3825 .map_ring_to_vector = hclgevf_map_ring_to_vector, 3826 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3827 .get_vector = hclgevf_get_vector, 3828 .put_vector = hclgevf_put_vector, 3829 .reset_queue = hclgevf_reset_tqp, 3830 .get_mac_addr = hclgevf_get_mac_addr, 3831 .set_mac_addr = hclgevf_set_mac_addr, 3832 .add_uc_addr = hclgevf_add_uc_addr, 3833 .rm_uc_addr = hclgevf_rm_uc_addr, 3834 .add_mc_addr = hclgevf_add_mc_addr, 3835 .rm_mc_addr = hclgevf_rm_mc_addr, 3836 .get_stats = hclgevf_get_stats, 3837 .update_stats = hclgevf_update_stats, 3838 .get_strings = hclgevf_get_strings, 3839 .get_sset_count = hclgevf_get_sset_count, 3840 .get_rss_key_size = hclge_comm_get_rss_key_size, 3841 .get_rss = hclgevf_get_rss, 3842 .set_rss = hclgevf_set_rss, 3843 .get_rss_tuple = hclgevf_get_rss_tuple, 3844 .set_rss_tuple = hclgevf_set_rss_tuple, 3845 .get_tc_size = hclgevf_get_tc_size, 3846 .get_fw_version = hclgevf_get_fw_version, 3847 .set_vlan_filter = hclgevf_set_vlan_filter, 3848 .enable_vlan_filter = hclgevf_enable_vlan_filter, 3849 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 3850 .reset_event = hclgevf_reset_event, 3851 .set_default_reset_request = hclgevf_set_def_reset_request, 3852 .set_channels = hclgevf_set_channels, 3853 .get_channels = hclgevf_get_channels, 3854 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 3855 .get_regs_len = hclgevf_get_regs_len, 3856 .get_regs = hclgevf_get_regs, 3857 .get_status = hclgevf_get_status, 3858 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3859 .get_media_type = hclgevf_get_media_type, 3860 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 3861 .ae_dev_resetting = hclgevf_ae_dev_resetting, 3862 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 3863 .set_gro_en = hclgevf_gro_en, 3864 .set_mtu = hclgevf_set_mtu, 3865 .get_global_queue_id = hclgevf_get_qid_global, 3866 .set_timer_task = hclgevf_set_timer_task, 3867 .get_link_mode = hclgevf_get_link_mode, 3868 .set_promisc_mode = hclgevf_set_promisc_mode, 3869 .request_update_promisc_mode = hclgevf_request_update_promisc_mode, 3870 .get_cmdq_stat = hclgevf_get_cmdq_stat, 3871 }; 3872 3873 static struct hnae3_ae_algo ae_algovf = { 3874 .ops = &hclgevf_ops, 3875 .pdev_id_table = ae_algovf_pci_tbl, 3876 }; 3877 3878 static int hclgevf_init(void) 3879 { 3880 pr_info("%s is initializing\n", HCLGEVF_NAME); 3881 3882 hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME); 3883 if (!hclgevf_wq) { 3884 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 3885 return -ENOMEM; 3886 } 3887 3888 hnae3_register_ae_algo(&ae_algovf); 3889 3890 return 0; 3891 } 3892 3893 static void hclgevf_exit(void) 3894 { 3895 hnae3_unregister_ae_algo(&ae_algovf); 3896 destroy_workqueue(hclgevf_wq); 3897 } 3898 module_init(hclgevf_init); 3899 module_exit(hclgevf_exit); 3900 3901 MODULE_LICENSE("GPL"); 3902 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3903 MODULE_DESCRIPTION("HCLGEVF Driver"); 3904 MODULE_VERSION(HCLGEVF_MOD_VERSION); 3905