1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 #include "hclgevf_devlink.h" 12 #include "hclge_comm_rss.h" 13 14 #define HCLGEVF_NAME "hclgevf" 15 16 #define HCLGEVF_RESET_MAX_FAIL_CNT 5 17 18 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 19 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 20 unsigned long delay); 21 22 static struct hnae3_ae_algo ae_algovf; 23 24 static struct workqueue_struct *hclgevf_wq; 25 26 static const struct pci_device_id ae_algovf_pci_tbl[] = { 27 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, 28 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 29 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 30 /* required last entry */ 31 {0, } 32 }; 33 34 static const u8 hclgevf_hash_key[] = { 35 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 36 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 37 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 38 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 39 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 40 }; 41 42 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 43 44 static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 45 HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 46 HCLGE_COMM_NIC_CSQ_DEPTH_REG, 47 HCLGE_COMM_NIC_CSQ_TAIL_REG, 48 HCLGE_COMM_NIC_CSQ_HEAD_REG, 49 HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 50 HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 51 HCLGE_COMM_NIC_CRQ_DEPTH_REG, 52 HCLGE_COMM_NIC_CRQ_TAIL_REG, 53 HCLGE_COMM_NIC_CRQ_HEAD_REG, 54 HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, 55 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG, 56 HCLGE_COMM_CMDQ_INTR_EN_REG, 57 HCLGE_COMM_CMDQ_INTR_GEN_REG}; 58 59 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 60 HCLGEVF_RST_ING, 61 HCLGEVF_GRO_EN_REG}; 62 63 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 64 HCLGEVF_RING_RX_ADDR_H_REG, 65 HCLGEVF_RING_RX_BD_NUM_REG, 66 HCLGEVF_RING_RX_BD_LENGTH_REG, 67 HCLGEVF_RING_RX_MERGE_EN_REG, 68 HCLGEVF_RING_RX_TAIL_REG, 69 HCLGEVF_RING_RX_HEAD_REG, 70 HCLGEVF_RING_RX_FBD_NUM_REG, 71 HCLGEVF_RING_RX_OFFSET_REG, 72 HCLGEVF_RING_RX_FBD_OFFSET_REG, 73 HCLGEVF_RING_RX_STASH_REG, 74 HCLGEVF_RING_RX_BD_ERR_REG, 75 HCLGEVF_RING_TX_ADDR_L_REG, 76 HCLGEVF_RING_TX_ADDR_H_REG, 77 HCLGEVF_RING_TX_BD_NUM_REG, 78 HCLGEVF_RING_TX_PRIORITY_REG, 79 HCLGEVF_RING_TX_TC_REG, 80 HCLGEVF_RING_TX_MERGE_EN_REG, 81 HCLGEVF_RING_TX_TAIL_REG, 82 HCLGEVF_RING_TX_HEAD_REG, 83 HCLGEVF_RING_TX_FBD_NUM_REG, 84 HCLGEVF_RING_TX_OFFSET_REG, 85 HCLGEVF_RING_TX_EBD_NUM_REG, 86 HCLGEVF_RING_TX_EBD_OFFSET_REG, 87 HCLGEVF_RING_TX_BD_ERR_REG, 88 HCLGEVF_RING_EN_REG}; 89 90 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 91 HCLGEVF_TQP_INTR_GL0_REG, 92 HCLGEVF_TQP_INTR_GL1_REG, 93 HCLGEVF_TQP_INTR_GL2_REG, 94 HCLGEVF_TQP_INTR_RL_REG}; 95 96 /* hclgevf_cmd_send - send command to command queue 97 * @hw: pointer to the hw struct 98 * @desc: prefilled descriptor for describing the command 99 * @num : the number of descriptors to be sent 100 * 101 * This is the main send command for command queue, it 102 * sends the queue, cleans the queue, etc 103 */ 104 int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num) 105 { 106 return hclge_comm_cmd_send(&hw->hw, desc, num); 107 } 108 109 void hclgevf_arq_init(struct hclgevf_dev *hdev) 110 { 111 struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq; 112 113 spin_lock(&cmdq->crq.lock); 114 /* initialize the pointers of async rx queue of mailbox */ 115 hdev->arq.hdev = hdev; 116 hdev->arq.head = 0; 117 hdev->arq.tail = 0; 118 atomic_set(&hdev->arq.count, 0); 119 spin_unlock(&cmdq->crq.lock); 120 } 121 122 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 123 { 124 if (!handle->client) 125 return container_of(handle, struct hclgevf_dev, nic); 126 else if (handle->client->type == HNAE3_CLIENT_ROCE) 127 return container_of(handle, struct hclgevf_dev, roce); 128 else 129 return container_of(handle, struct hclgevf_dev, nic); 130 } 131 132 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 133 { 134 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 135 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 136 struct hclge_desc desc; 137 struct hclgevf_tqp *tqp; 138 int status; 139 int i; 140 141 for (i = 0; i < kinfo->num_tqps; i++) { 142 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 143 hclgevf_cmd_setup_basic_desc(&desc, 144 HCLGEVF_OPC_QUERY_RX_STATUS, 145 true); 146 147 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 148 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 149 if (status) { 150 dev_err(&hdev->pdev->dev, 151 "Query tqp stat fail, status = %d,queue = %d\n", 152 status, i); 153 return status; 154 } 155 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 156 le32_to_cpu(desc.data[1]); 157 158 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 159 true); 160 161 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 162 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 163 if (status) { 164 dev_err(&hdev->pdev->dev, 165 "Query tqp stat fail, status = %d,queue = %d\n", 166 status, i); 167 return status; 168 } 169 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 170 le32_to_cpu(desc.data[1]); 171 } 172 173 return 0; 174 } 175 176 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 177 { 178 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 179 struct hclgevf_tqp *tqp; 180 u64 *buff = data; 181 int i; 182 183 for (i = 0; i < kinfo->num_tqps; i++) { 184 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 185 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 186 } 187 for (i = 0; i < kinfo->num_tqps; i++) { 188 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 189 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 190 } 191 192 return buff; 193 } 194 195 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 196 { 197 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 198 199 return kinfo->num_tqps * 2; 200 } 201 202 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 203 { 204 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 205 u8 *buff = data; 206 int i; 207 208 for (i = 0; i < kinfo->num_tqps; i++) { 209 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 210 struct hclgevf_tqp, q); 211 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", 212 tqp->index); 213 buff += ETH_GSTRING_LEN; 214 } 215 216 for (i = 0; i < kinfo->num_tqps; i++) { 217 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 218 struct hclgevf_tqp, q); 219 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", 220 tqp->index); 221 buff += ETH_GSTRING_LEN; 222 } 223 224 return buff; 225 } 226 227 static void hclgevf_update_stats(struct hnae3_handle *handle, 228 struct net_device_stats *net_stats) 229 { 230 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 231 int status; 232 233 status = hclgevf_tqps_update_stats(handle); 234 if (status) 235 dev_err(&hdev->pdev->dev, 236 "VF update of TQPS stats fail, status = %d.\n", 237 status); 238 } 239 240 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 241 { 242 if (strset == ETH_SS_TEST) 243 return -EOPNOTSUPP; 244 else if (strset == ETH_SS_STATS) 245 return hclgevf_tqps_get_sset_count(handle, strset); 246 247 return 0; 248 } 249 250 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 251 u8 *data) 252 { 253 u8 *p = (char *)data; 254 255 if (strset == ETH_SS_STATS) 256 p = hclgevf_tqps_get_strings(handle, p); 257 } 258 259 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 260 { 261 hclgevf_tqps_get_stats(handle, data); 262 } 263 264 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 265 u8 subcode) 266 { 267 if (msg) { 268 memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 269 msg->code = code; 270 msg->subcode = subcode; 271 } 272 } 273 274 static int hclgevf_get_basic_info(struct hclgevf_dev *hdev) 275 { 276 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 277 u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE]; 278 struct hclge_basic_info *basic_info; 279 struct hclge_vf_to_pf_msg send_msg; 280 unsigned long caps; 281 int status; 282 283 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0); 284 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 285 sizeof(resp_msg)); 286 if (status) { 287 dev_err(&hdev->pdev->dev, 288 "failed to get basic info from pf, ret = %d", status); 289 return status; 290 } 291 292 basic_info = (struct hclge_basic_info *)resp_msg; 293 294 hdev->hw_tc_map = basic_info->hw_tc_map; 295 hdev->mbx_api_version = basic_info->mbx_api_version; 296 caps = basic_info->pf_caps; 297 if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps)) 298 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); 299 300 return 0; 301 } 302 303 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 304 { 305 struct hnae3_handle *nic = &hdev->nic; 306 struct hclge_vf_to_pf_msg send_msg; 307 u8 resp_msg; 308 int ret; 309 310 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 311 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 312 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 313 sizeof(u8)); 314 if (ret) { 315 dev_err(&hdev->pdev->dev, 316 "VF request to get port based vlan state failed %d", 317 ret); 318 return ret; 319 } 320 321 nic->port_base_vlan_state = resp_msg; 322 323 return 0; 324 } 325 326 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 327 { 328 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 329 #define HCLGEVF_TQPS_ALLOC_OFFSET 0 330 #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 331 #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 332 333 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 334 struct hclge_vf_to_pf_msg send_msg; 335 int status; 336 337 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 338 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 339 HCLGEVF_TQPS_RSS_INFO_LEN); 340 if (status) { 341 dev_err(&hdev->pdev->dev, 342 "VF request to get tqp info from PF failed %d", 343 status); 344 return status; 345 } 346 347 memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], 348 sizeof(u16)); 349 memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], 350 sizeof(u16)); 351 memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], 352 sizeof(u16)); 353 354 return 0; 355 } 356 357 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 358 { 359 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 360 #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 361 #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 362 363 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 364 struct hclge_vf_to_pf_msg send_msg; 365 int ret; 366 367 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 368 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 369 HCLGEVF_TQPS_DEPTH_INFO_LEN); 370 if (ret) { 371 dev_err(&hdev->pdev->dev, 372 "VF request to get tqp depth info from PF failed %d", 373 ret); 374 return ret; 375 } 376 377 memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], 378 sizeof(u16)); 379 memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], 380 sizeof(u16)); 381 382 return 0; 383 } 384 385 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 386 { 387 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 388 struct hclge_vf_to_pf_msg send_msg; 389 u16 qid_in_pf = 0; 390 u8 resp_data[2]; 391 int ret; 392 393 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 394 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 395 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 396 sizeof(resp_data)); 397 if (!ret) 398 qid_in_pf = *(u16 *)resp_data; 399 400 return qid_in_pf; 401 } 402 403 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 404 { 405 struct hclge_vf_to_pf_msg send_msg; 406 u8 resp_msg[2]; 407 int ret; 408 409 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 410 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 411 sizeof(resp_msg)); 412 if (ret) { 413 dev_err(&hdev->pdev->dev, 414 "VF request to get the pf port media type failed %d", 415 ret); 416 return ret; 417 } 418 419 hdev->hw.mac.media_type = resp_msg[0]; 420 hdev->hw.mac.module_type = resp_msg[1]; 421 422 return 0; 423 } 424 425 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 426 { 427 struct hclgevf_tqp *tqp; 428 int i; 429 430 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 431 sizeof(struct hclgevf_tqp), GFP_KERNEL); 432 if (!hdev->htqp) 433 return -ENOMEM; 434 435 tqp = hdev->htqp; 436 437 for (i = 0; i < hdev->num_tqps; i++) { 438 tqp->dev = &hdev->pdev->dev; 439 tqp->index = i; 440 441 tqp->q.ae_algo = &ae_algovf; 442 tqp->q.buf_size = hdev->rx_buf_len; 443 tqp->q.tx_desc_num = hdev->num_tx_desc; 444 tqp->q.rx_desc_num = hdev->num_rx_desc; 445 446 /* need an extended offset to configure queues >= 447 * HCLGEVF_TQP_MAX_SIZE_DEV_V2. 448 */ 449 if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2) 450 tqp->q.io_base = hdev->hw.hw.io_base + 451 HCLGEVF_TQP_REG_OFFSET + 452 i * HCLGEVF_TQP_REG_SIZE; 453 else 454 tqp->q.io_base = hdev->hw.hw.io_base + 455 HCLGEVF_TQP_REG_OFFSET + 456 HCLGEVF_TQP_EXT_REG_OFFSET + 457 (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) * 458 HCLGEVF_TQP_REG_SIZE; 459 460 tqp++; 461 } 462 463 return 0; 464 } 465 466 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 467 { 468 struct hnae3_handle *nic = &hdev->nic; 469 struct hnae3_knic_private_info *kinfo; 470 u16 new_tqps = hdev->num_tqps; 471 unsigned int i; 472 u8 num_tc = 0; 473 474 kinfo = &nic->kinfo; 475 kinfo->num_tx_desc = hdev->num_tx_desc; 476 kinfo->num_rx_desc = hdev->num_rx_desc; 477 kinfo->rx_buf_len = hdev->rx_buf_len; 478 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 479 if (hdev->hw_tc_map & BIT(i)) 480 num_tc++; 481 482 num_tc = num_tc ? num_tc : 1; 483 kinfo->tc_info.num_tc = num_tc; 484 kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc); 485 new_tqps = kinfo->rss_size * num_tc; 486 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 487 488 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 489 sizeof(struct hnae3_queue *), GFP_KERNEL); 490 if (!kinfo->tqp) 491 return -ENOMEM; 492 493 for (i = 0; i < kinfo->num_tqps; i++) { 494 hdev->htqp[i].q.handle = &hdev->nic; 495 hdev->htqp[i].q.tqp_index = i; 496 kinfo->tqp[i] = &hdev->htqp[i].q; 497 } 498 499 /* after init the max rss_size and tqps, adjust the default tqp numbers 500 * and rss size with the actual vector numbers 501 */ 502 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 503 kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc, 504 kinfo->rss_size); 505 506 return 0; 507 } 508 509 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 510 { 511 struct hclge_vf_to_pf_msg send_msg; 512 int status; 513 514 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 515 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 516 if (status) 517 dev_err(&hdev->pdev->dev, 518 "VF failed to fetch link status(%d) from PF", status); 519 } 520 521 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 522 { 523 struct hnae3_handle *rhandle = &hdev->roce; 524 struct hnae3_handle *handle = &hdev->nic; 525 struct hnae3_client *rclient; 526 struct hnae3_client *client; 527 528 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 529 return; 530 531 client = handle->client; 532 rclient = hdev->roce_client; 533 534 link_state = 535 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 536 if (link_state != hdev->hw.mac.link) { 537 hdev->hw.mac.link = link_state; 538 client->ops->link_status_change(handle, !!link_state); 539 if (rclient && rclient->ops->link_status_change) 540 rclient->ops->link_status_change(rhandle, !!link_state); 541 } 542 543 clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 544 } 545 546 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 547 { 548 #define HCLGEVF_ADVERTISING 0 549 #define HCLGEVF_SUPPORTED 1 550 551 struct hclge_vf_to_pf_msg send_msg; 552 553 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 554 send_msg.data[0] = HCLGEVF_ADVERTISING; 555 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 556 send_msg.data[0] = HCLGEVF_SUPPORTED; 557 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 558 } 559 560 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 561 { 562 struct hnae3_handle *nic = &hdev->nic; 563 int ret; 564 565 nic->ae_algo = &ae_algovf; 566 nic->pdev = hdev->pdev; 567 nic->numa_node_mask = hdev->numa_node_mask; 568 nic->flags |= HNAE3_SUPPORT_VF; 569 nic->kinfo.io_base = hdev->hw.hw.io_base; 570 571 ret = hclgevf_knic_setup(hdev); 572 if (ret) 573 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 574 ret); 575 return ret; 576 } 577 578 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 579 { 580 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 581 dev_warn(&hdev->pdev->dev, 582 "vector(vector_id %d) has been freed.\n", vector_id); 583 return; 584 } 585 586 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 587 hdev->num_msi_left += 1; 588 hdev->num_msi_used -= 1; 589 } 590 591 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 592 struct hnae3_vector_info *vector_info) 593 { 594 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 595 struct hnae3_vector_info *vector = vector_info; 596 int alloc = 0; 597 int i, j; 598 599 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 600 vector_num = min(hdev->num_msi_left, vector_num); 601 602 for (j = 0; j < vector_num; j++) { 603 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 604 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 605 vector->vector = pci_irq_vector(hdev->pdev, i); 606 vector->io_addr = hdev->hw.hw.io_base + 607 HCLGEVF_VECTOR_REG_BASE + 608 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 609 hdev->vector_status[i] = 0; 610 hdev->vector_irq[i] = vector->vector; 611 612 vector++; 613 alloc++; 614 615 break; 616 } 617 } 618 } 619 hdev->num_msi_left -= alloc; 620 hdev->num_msi_used += alloc; 621 622 return alloc; 623 } 624 625 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 626 { 627 int i; 628 629 for (i = 0; i < hdev->num_msi; i++) 630 if (vector == hdev->vector_irq[i]) 631 return i; 632 633 return -EINVAL; 634 } 635 636 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 637 { 638 struct hclgevf_rss_tc_mode_cmd *req; 639 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 640 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 641 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 642 struct hclge_desc desc; 643 u16 roundup_size; 644 unsigned int i; 645 int status; 646 647 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 648 649 roundup_size = roundup_pow_of_two(rss_size); 650 roundup_size = ilog2(roundup_size); 651 652 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 653 tc_valid[i] = 1; 654 tc_size[i] = roundup_size; 655 tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0; 656 } 657 658 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 659 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 660 u16 mode = 0; 661 662 hnae3_set_bit(mode, HCLGEVF_RSS_TC_VALID_B, 663 (tc_valid[i] & 0x1)); 664 hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M, 665 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 666 hnae3_set_bit(mode, HCLGEVF_RSS_TC_SIZE_MSB_B, 667 tc_size[i] >> HCLGEVF_RSS_TC_SIZE_MSB_OFFSET & 668 0x1); 669 hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M, 670 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 671 672 req->rss_tc_mode[i] = cpu_to_le16(mode); 673 } 674 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 675 if (status) 676 dev_err(&hdev->pdev->dev, 677 "VF failed(=%d) to set rss tc mode\n", status); 678 679 return status; 680 } 681 682 /* for revision 0x20, vf shared the same rss config with pf */ 683 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 684 { 685 #define HCLGEVF_RSS_MBX_RESP_LEN 8 686 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 687 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 688 struct hclge_vf_to_pf_msg send_msg; 689 u16 msg_num, hash_key_index; 690 u8 index; 691 int ret; 692 693 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 694 msg_num = (HCLGE_COMM_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 695 HCLGEVF_RSS_MBX_RESP_LEN; 696 for (index = 0; index < msg_num; index++) { 697 send_msg.data[0] = index; 698 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 699 HCLGEVF_RSS_MBX_RESP_LEN); 700 if (ret) { 701 dev_err(&hdev->pdev->dev, 702 "VF get rss hash key from PF failed, ret=%d", 703 ret); 704 return ret; 705 } 706 707 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 708 if (index == msg_num - 1) 709 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 710 &resp_msg[0], 711 HCLGE_COMM_RSS_KEY_SIZE - hash_key_index); 712 else 713 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 714 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 715 } 716 717 return 0; 718 } 719 720 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 721 u8 *hfunc) 722 { 723 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 724 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 725 int ret; 726 727 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 728 hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc); 729 } else { 730 if (hfunc) 731 *hfunc = ETH_RSS_HASH_TOP; 732 if (key) { 733 ret = hclgevf_get_rss_hash_key(hdev); 734 if (ret) 735 return ret; 736 memcpy(key, rss_cfg->rss_hash_key, 737 HCLGE_COMM_RSS_KEY_SIZE); 738 } 739 } 740 741 hclge_comm_get_rss_indir_tbl(rss_cfg, indir, 742 hdev->ae_dev->dev_specs.rss_ind_tbl_size); 743 744 return 0; 745 } 746 747 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 748 const u8 *key, const u8 hfunc) 749 { 750 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 751 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 752 const u8 *rss_hash_key = rss_cfg->rss_hash_key; 753 u8 hash_algo; 754 int ret, i; 755 756 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 757 ret = hclge_comm_parse_rss_hfunc(rss_cfg, hfunc, &hash_algo); 758 if (ret) 759 return ret; 760 761 /* Set the RSS Hash Key if specififed by the user */ 762 if (key) { 763 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, 764 hash_algo, key); 765 if (ret) { 766 dev_err(&hdev->pdev->dev, 767 "invalid hfunc type %u\n", hfunc); 768 return ret; 769 } 770 771 /* Update the shadow RSS key with user specified qids */ 772 memcpy(rss_cfg->rss_hash_key, key, 773 HCLGE_COMM_RSS_KEY_SIZE); 774 } else { 775 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, 776 hash_algo, 777 rss_hash_key); 778 if (ret) 779 return ret; 780 } 781 rss_cfg->rss_algo = hash_algo; 782 } 783 784 /* update the shadow RSS table with user specified qids */ 785 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 786 rss_cfg->rss_indirection_tbl[i] = indir[i]; 787 788 /* update the hardware */ 789 return hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, 790 rss_cfg->rss_indirection_tbl); 791 } 792 793 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 794 struct ethtool_rxnfc *nfc) 795 { 796 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 797 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 798 struct hclge_comm_rss_input_tuple_cmd *req; 799 struct hclge_desc desc; 800 int ret; 801 802 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 803 return -EOPNOTSUPP; 804 805 if (nfc->data & 806 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 807 return -EINVAL; 808 809 req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data; 810 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 811 812 ret = hclge_comm_init_rss_tuple_cmd(rss_cfg, nfc, hdev->ae_dev, req); 813 if (ret) { 814 dev_err(&hdev->pdev->dev, 815 "failed to init rss tuple cmd, ret = %d\n", ret); 816 return ret; 817 } 818 819 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 820 if (ret) { 821 dev_err(&hdev->pdev->dev, 822 "Set rss tuple fail, status = %d\n", ret); 823 return ret; 824 } 825 826 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 827 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 828 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 829 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 830 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 831 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 832 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 833 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 834 return 0; 835 } 836 837 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 838 struct ethtool_rxnfc *nfc) 839 { 840 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 841 u8 tuple_sets; 842 int ret; 843 844 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 845 return -EOPNOTSUPP; 846 847 nfc->data = 0; 848 849 ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type, 850 &tuple_sets); 851 if (ret || !tuple_sets) 852 return ret; 853 854 nfc->data = hclge_comm_convert_rss_tuple(tuple_sets); 855 856 return 0; 857 } 858 859 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 860 { 861 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 862 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 863 864 return rss_cfg->rss_size; 865 } 866 867 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 868 int vector_id, 869 struct hnae3_ring_chain_node *ring_chain) 870 { 871 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 872 struct hclge_vf_to_pf_msg send_msg; 873 struct hnae3_ring_chain_node *node; 874 int status; 875 int i = 0; 876 877 memset(&send_msg, 0, sizeof(send_msg)); 878 send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 879 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 880 send_msg.vector_id = vector_id; 881 882 for (node = ring_chain; node; node = node->next) { 883 send_msg.param[i].ring_type = 884 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 885 886 send_msg.param[i].tqp_index = node->tqp_index; 887 send_msg.param[i].int_gl_index = 888 hnae3_get_field(node->int_gl_idx, 889 HNAE3_RING_GL_IDX_M, 890 HNAE3_RING_GL_IDX_S); 891 892 i++; 893 if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 894 send_msg.ring_num = i; 895 896 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 897 NULL, 0); 898 if (status) { 899 dev_err(&hdev->pdev->dev, 900 "Map TQP fail, status is %d.\n", 901 status); 902 return status; 903 } 904 i = 0; 905 } 906 } 907 908 return 0; 909 } 910 911 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 912 struct hnae3_ring_chain_node *ring_chain) 913 { 914 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 915 int vector_id; 916 917 vector_id = hclgevf_get_vector_index(hdev, vector); 918 if (vector_id < 0) { 919 dev_err(&handle->pdev->dev, 920 "Get vector index fail. ret =%d\n", vector_id); 921 return vector_id; 922 } 923 924 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 925 } 926 927 static int hclgevf_unmap_ring_from_vector( 928 struct hnae3_handle *handle, 929 int vector, 930 struct hnae3_ring_chain_node *ring_chain) 931 { 932 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 933 int ret, vector_id; 934 935 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 936 return 0; 937 938 vector_id = hclgevf_get_vector_index(hdev, vector); 939 if (vector_id < 0) { 940 dev_err(&handle->pdev->dev, 941 "Get vector index fail. ret =%d\n", vector_id); 942 return vector_id; 943 } 944 945 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 946 if (ret) 947 dev_err(&handle->pdev->dev, 948 "Unmap ring from vector fail. vector=%d, ret =%d\n", 949 vector_id, 950 ret); 951 952 return ret; 953 } 954 955 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 956 { 957 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 958 int vector_id; 959 960 vector_id = hclgevf_get_vector_index(hdev, vector); 961 if (vector_id < 0) { 962 dev_err(&handle->pdev->dev, 963 "hclgevf_put_vector get vector index fail. ret =%d\n", 964 vector_id); 965 return vector_id; 966 } 967 968 hclgevf_free_vector(hdev, vector_id); 969 970 return 0; 971 } 972 973 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 974 bool en_uc_pmc, bool en_mc_pmc, 975 bool en_bc_pmc) 976 { 977 struct hnae3_handle *handle = &hdev->nic; 978 struct hclge_vf_to_pf_msg send_msg; 979 int ret; 980 981 memset(&send_msg, 0, sizeof(send_msg)); 982 send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 983 send_msg.en_bc = en_bc_pmc ? 1 : 0; 984 send_msg.en_uc = en_uc_pmc ? 1 : 0; 985 send_msg.en_mc = en_mc_pmc ? 1 : 0; 986 send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC, 987 &handle->priv_flags) ? 1 : 0; 988 989 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 990 if (ret) 991 dev_err(&hdev->pdev->dev, 992 "Set promisc mode fail, status is %d.\n", ret); 993 994 return ret; 995 } 996 997 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 998 bool en_mc_pmc) 999 { 1000 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1001 bool en_bc_pmc; 1002 1003 en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2; 1004 1005 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 1006 en_bc_pmc); 1007 } 1008 1009 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) 1010 { 1011 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1012 1013 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1014 hclgevf_task_schedule(hdev, 0); 1015 } 1016 1017 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) 1018 { 1019 struct hnae3_handle *handle = &hdev->nic; 1020 bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; 1021 bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; 1022 int ret; 1023 1024 if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { 1025 ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); 1026 if (!ret) 1027 clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1028 } 1029 } 1030 1031 static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id, 1032 u16 stream_id, bool enable) 1033 { 1034 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1035 struct hclge_desc desc; 1036 1037 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1038 1039 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1040 false); 1041 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1042 req->stream_id = cpu_to_le16(stream_id); 1043 if (enable) 1044 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1045 1046 return hclgevf_cmd_send(&hdev->hw, &desc, 1); 1047 } 1048 1049 static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable) 1050 { 1051 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1052 int ret; 1053 u16 i; 1054 1055 for (i = 0; i < handle->kinfo.num_tqps; i++) { 1056 ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable); 1057 if (ret) 1058 return ret; 1059 } 1060 1061 return 0; 1062 } 1063 1064 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1065 { 1066 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1067 struct hclgevf_tqp *tqp; 1068 int i; 1069 1070 for (i = 0; i < kinfo->num_tqps; i++) { 1071 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1072 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1073 } 1074 } 1075 1076 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 1077 { 1078 struct hclge_vf_to_pf_msg send_msg; 1079 u8 host_mac[ETH_ALEN]; 1080 int status; 1081 1082 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 1083 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 1084 ETH_ALEN); 1085 if (status) { 1086 dev_err(&hdev->pdev->dev, 1087 "fail to get VF MAC from host %d", status); 1088 return status; 1089 } 1090 1091 ether_addr_copy(p, host_mac); 1092 1093 return 0; 1094 } 1095 1096 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1097 { 1098 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1099 u8 host_mac_addr[ETH_ALEN]; 1100 1101 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 1102 return; 1103 1104 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 1105 if (hdev->has_pf_mac) 1106 ether_addr_copy(p, host_mac_addr); 1107 else 1108 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1109 } 1110 1111 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p, 1112 bool is_first) 1113 { 1114 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1115 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1116 struct hclge_vf_to_pf_msg send_msg; 1117 u8 *new_mac_addr = (u8 *)p; 1118 int status; 1119 1120 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 1121 send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1122 ether_addr_copy(send_msg.data, new_mac_addr); 1123 if (is_first && !hdev->has_pf_mac) 1124 eth_zero_addr(&send_msg.data[ETH_ALEN]); 1125 else 1126 ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 1127 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1128 if (!status) 1129 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1130 1131 return status; 1132 } 1133 1134 static struct hclgevf_mac_addr_node * 1135 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) 1136 { 1137 struct hclgevf_mac_addr_node *mac_node, *tmp; 1138 1139 list_for_each_entry_safe(mac_node, tmp, list, node) 1140 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 1141 return mac_node; 1142 1143 return NULL; 1144 } 1145 1146 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, 1147 enum HCLGEVF_MAC_NODE_STATE state) 1148 { 1149 switch (state) { 1150 /* from set_rx_mode or tmp_add_list */ 1151 case HCLGEVF_MAC_TO_ADD: 1152 if (mac_node->state == HCLGEVF_MAC_TO_DEL) 1153 mac_node->state = HCLGEVF_MAC_ACTIVE; 1154 break; 1155 /* only from set_rx_mode */ 1156 case HCLGEVF_MAC_TO_DEL: 1157 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1158 list_del(&mac_node->node); 1159 kfree(mac_node); 1160 } else { 1161 mac_node->state = HCLGEVF_MAC_TO_DEL; 1162 } 1163 break; 1164 /* only from tmp_add_list, the mac_node->state won't be 1165 * HCLGEVF_MAC_ACTIVE 1166 */ 1167 case HCLGEVF_MAC_ACTIVE: 1168 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1169 mac_node->state = HCLGEVF_MAC_ACTIVE; 1170 break; 1171 } 1172 } 1173 1174 static int hclgevf_update_mac_list(struct hnae3_handle *handle, 1175 enum HCLGEVF_MAC_NODE_STATE state, 1176 enum HCLGEVF_MAC_ADDR_TYPE mac_type, 1177 const unsigned char *addr) 1178 { 1179 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1180 struct hclgevf_mac_addr_node *mac_node; 1181 struct list_head *list; 1182 1183 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1184 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1185 1186 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1187 1188 /* if the mac addr is already in the mac list, no need to add a new 1189 * one into it, just check the mac addr state, convert it to a new 1190 * new state, or just remove it, or do nothing. 1191 */ 1192 mac_node = hclgevf_find_mac_node(list, addr); 1193 if (mac_node) { 1194 hclgevf_update_mac_node(mac_node, state); 1195 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1196 return 0; 1197 } 1198 /* if this address is never added, unnecessary to delete */ 1199 if (state == HCLGEVF_MAC_TO_DEL) { 1200 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1201 return -ENOENT; 1202 } 1203 1204 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 1205 if (!mac_node) { 1206 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1207 return -ENOMEM; 1208 } 1209 1210 mac_node->state = state; 1211 ether_addr_copy(mac_node->mac_addr, addr); 1212 list_add_tail(&mac_node->node, list); 1213 1214 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1215 return 0; 1216 } 1217 1218 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1219 const unsigned char *addr) 1220 { 1221 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1222 HCLGEVF_MAC_ADDR_UC, addr); 1223 } 1224 1225 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1226 const unsigned char *addr) 1227 { 1228 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1229 HCLGEVF_MAC_ADDR_UC, addr); 1230 } 1231 1232 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1233 const unsigned char *addr) 1234 { 1235 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1236 HCLGEVF_MAC_ADDR_MC, addr); 1237 } 1238 1239 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1240 const unsigned char *addr) 1241 { 1242 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1243 HCLGEVF_MAC_ADDR_MC, addr); 1244 } 1245 1246 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, 1247 struct hclgevf_mac_addr_node *mac_node, 1248 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1249 { 1250 struct hclge_vf_to_pf_msg send_msg; 1251 u8 code, subcode; 1252 1253 if (mac_type == HCLGEVF_MAC_ADDR_UC) { 1254 code = HCLGE_MBX_SET_UNICAST; 1255 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1256 subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; 1257 else 1258 subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; 1259 } else { 1260 code = HCLGE_MBX_SET_MULTICAST; 1261 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1262 subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; 1263 else 1264 subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; 1265 } 1266 1267 hclgevf_build_send_msg(&send_msg, code, subcode); 1268 ether_addr_copy(send_msg.data, mac_node->mac_addr); 1269 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1270 } 1271 1272 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, 1273 struct list_head *list, 1274 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1275 { 1276 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 1277 struct hclgevf_mac_addr_node *mac_node, *tmp; 1278 int ret; 1279 1280 list_for_each_entry_safe(mac_node, tmp, list, node) { 1281 ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); 1282 if (ret) { 1283 hnae3_format_mac_addr(format_mac_addr, 1284 mac_node->mac_addr); 1285 dev_err(&hdev->pdev->dev, 1286 "failed to configure mac %s, state = %d, ret = %d\n", 1287 format_mac_addr, mac_node->state, ret); 1288 return; 1289 } 1290 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1291 mac_node->state = HCLGEVF_MAC_ACTIVE; 1292 } else { 1293 list_del(&mac_node->node); 1294 kfree(mac_node); 1295 } 1296 } 1297 } 1298 1299 static void hclgevf_sync_from_add_list(struct list_head *add_list, 1300 struct list_head *mac_list) 1301 { 1302 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1303 1304 list_for_each_entry_safe(mac_node, tmp, add_list, node) { 1305 /* if the mac address from tmp_add_list is not in the 1306 * uc/mc_mac_list, it means have received a TO_DEL request 1307 * during the time window of sending mac config request to PF 1308 * If mac_node state is ACTIVE, then change its state to TO_DEL, 1309 * then it will be removed at next time. If is TO_ADD, it means 1310 * send TO_ADD request failed, so just remove the mac node. 1311 */ 1312 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1313 if (new_node) { 1314 hclgevf_update_mac_node(new_node, mac_node->state); 1315 list_del(&mac_node->node); 1316 kfree(mac_node); 1317 } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { 1318 mac_node->state = HCLGEVF_MAC_TO_DEL; 1319 list_move_tail(&mac_node->node, mac_list); 1320 } else { 1321 list_del(&mac_node->node); 1322 kfree(mac_node); 1323 } 1324 } 1325 } 1326 1327 static void hclgevf_sync_from_del_list(struct list_head *del_list, 1328 struct list_head *mac_list) 1329 { 1330 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1331 1332 list_for_each_entry_safe(mac_node, tmp, del_list, node) { 1333 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1334 if (new_node) { 1335 /* If the mac addr is exist in the mac list, it means 1336 * received a new request TO_ADD during the time window 1337 * of sending mac addr configurrequest to PF, so just 1338 * change the mac state to ACTIVE. 1339 */ 1340 new_node->state = HCLGEVF_MAC_ACTIVE; 1341 list_del(&mac_node->node); 1342 kfree(mac_node); 1343 } else { 1344 list_move_tail(&mac_node->node, mac_list); 1345 } 1346 } 1347 } 1348 1349 static void hclgevf_clear_list(struct list_head *list) 1350 { 1351 struct hclgevf_mac_addr_node *mac_node, *tmp; 1352 1353 list_for_each_entry_safe(mac_node, tmp, list, node) { 1354 list_del(&mac_node->node); 1355 kfree(mac_node); 1356 } 1357 } 1358 1359 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, 1360 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1361 { 1362 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1363 struct list_head tmp_add_list, tmp_del_list; 1364 struct list_head *list; 1365 1366 INIT_LIST_HEAD(&tmp_add_list); 1367 INIT_LIST_HEAD(&tmp_del_list); 1368 1369 /* move the mac addr to the tmp_add_list and tmp_del_list, then 1370 * we can add/delete these mac addr outside the spin lock 1371 */ 1372 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1373 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1374 1375 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1376 1377 list_for_each_entry_safe(mac_node, tmp, list, node) { 1378 switch (mac_node->state) { 1379 case HCLGEVF_MAC_TO_DEL: 1380 list_move_tail(&mac_node->node, &tmp_del_list); 1381 break; 1382 case HCLGEVF_MAC_TO_ADD: 1383 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 1384 if (!new_node) 1385 goto stop_traverse; 1386 1387 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 1388 new_node->state = mac_node->state; 1389 list_add_tail(&new_node->node, &tmp_add_list); 1390 break; 1391 default: 1392 break; 1393 } 1394 } 1395 1396 stop_traverse: 1397 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1398 1399 /* delete first, in order to get max mac table space for adding */ 1400 hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); 1401 hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); 1402 1403 /* if some mac addresses were added/deleted fail, move back to the 1404 * mac_list, and retry at next time. 1405 */ 1406 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1407 1408 hclgevf_sync_from_del_list(&tmp_del_list, list); 1409 hclgevf_sync_from_add_list(&tmp_add_list, list); 1410 1411 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1412 } 1413 1414 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) 1415 { 1416 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); 1417 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); 1418 } 1419 1420 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) 1421 { 1422 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1423 1424 hclgevf_clear_list(&hdev->mac_table.uc_mac_list); 1425 hclgevf_clear_list(&hdev->mac_table.mc_mac_list); 1426 1427 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1428 } 1429 1430 static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 1431 { 1432 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1433 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 1434 struct hclge_vf_to_pf_msg send_msg; 1435 1436 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) 1437 return -EOPNOTSUPP; 1438 1439 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1440 HCLGE_MBX_ENABLE_VLAN_FILTER); 1441 send_msg.data[0] = enable ? 1 : 0; 1442 1443 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1444 } 1445 1446 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1447 __be16 proto, u16 vlan_id, 1448 bool is_kill) 1449 { 1450 #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 1451 #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 1452 #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 1453 1454 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1455 struct hclge_vf_to_pf_msg send_msg; 1456 int ret; 1457 1458 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1459 return -EINVAL; 1460 1461 if (proto != htons(ETH_P_8021Q)) 1462 return -EPROTONOSUPPORT; 1463 1464 /* When device is resetting or reset failed, firmware is unable to 1465 * handle mailbox. Just record the vlan id, and remove it after 1466 * reset finished. 1467 */ 1468 if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 1469 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { 1470 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1471 return -EBUSY; 1472 } 1473 1474 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1475 HCLGE_MBX_VLAN_FILTER); 1476 send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; 1477 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, 1478 sizeof(vlan_id)); 1479 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, 1480 sizeof(proto)); 1481 /* when remove hw vlan filter failed, record the vlan id, 1482 * and try to remove it from hw later, to be consistence 1483 * with stack. 1484 */ 1485 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1486 if (is_kill && ret) 1487 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1488 1489 return ret; 1490 } 1491 1492 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1493 { 1494 #define HCLGEVF_MAX_SYNC_COUNT 60 1495 struct hnae3_handle *handle = &hdev->nic; 1496 int ret, sync_cnt = 0; 1497 u16 vlan_id; 1498 1499 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1500 while (vlan_id != VLAN_N_VID) { 1501 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1502 vlan_id, true); 1503 if (ret) 1504 return; 1505 1506 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1507 sync_cnt++; 1508 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1509 return; 1510 1511 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1512 } 1513 } 1514 1515 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1516 { 1517 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1518 struct hclge_vf_to_pf_msg send_msg; 1519 1520 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1521 HCLGE_MBX_VLAN_RX_OFF_CFG); 1522 send_msg.data[0] = enable ? 1 : 0; 1523 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1524 } 1525 1526 static int hclgevf_reset_tqp(struct hnae3_handle *handle) 1527 { 1528 #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U 1529 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1530 struct hclge_vf_to_pf_msg send_msg; 1531 u8 return_status = 0; 1532 int ret; 1533 u16 i; 1534 1535 /* disable vf queue before send queue reset msg to PF */ 1536 ret = hclgevf_tqp_enable(handle, false); 1537 if (ret) { 1538 dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n", 1539 ret); 1540 return ret; 1541 } 1542 1543 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1544 1545 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status, 1546 sizeof(return_status)); 1547 if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE) 1548 return ret; 1549 1550 for (i = 1; i < handle->kinfo.num_tqps; i++) { 1551 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1552 memcpy(send_msg.data, &i, sizeof(i)); 1553 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1554 if (ret) 1555 return ret; 1556 } 1557 1558 return 0; 1559 } 1560 1561 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1562 { 1563 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1564 struct hclge_vf_to_pf_msg send_msg; 1565 1566 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1567 memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); 1568 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1569 } 1570 1571 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1572 enum hnae3_reset_notify_type type) 1573 { 1574 struct hnae3_client *client = hdev->nic_client; 1575 struct hnae3_handle *handle = &hdev->nic; 1576 int ret; 1577 1578 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1579 !client) 1580 return 0; 1581 1582 if (!client->ops->reset_notify) 1583 return -EOPNOTSUPP; 1584 1585 ret = client->ops->reset_notify(handle, type); 1586 if (ret) 1587 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1588 type, ret); 1589 1590 return ret; 1591 } 1592 1593 static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev, 1594 enum hnae3_reset_notify_type type) 1595 { 1596 struct hnae3_client *client = hdev->roce_client; 1597 struct hnae3_handle *handle = &hdev->roce; 1598 int ret; 1599 1600 if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client) 1601 return 0; 1602 1603 if (!client->ops->reset_notify) 1604 return -EOPNOTSUPP; 1605 1606 ret = client->ops->reset_notify(handle, type); 1607 if (ret) 1608 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", 1609 type, ret); 1610 return ret; 1611 } 1612 1613 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1614 { 1615 #define HCLGEVF_RESET_WAIT_US 20000 1616 #define HCLGEVF_RESET_WAIT_CNT 2000 1617 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1618 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1619 1620 u32 val; 1621 int ret; 1622 1623 if (hdev->reset_type == HNAE3_VF_RESET) 1624 ret = readl_poll_timeout(hdev->hw.hw.io_base + 1625 HCLGEVF_VF_RST_ING, val, 1626 !(val & HCLGEVF_VF_RST_ING_BIT), 1627 HCLGEVF_RESET_WAIT_US, 1628 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1629 else 1630 ret = readl_poll_timeout(hdev->hw.hw.io_base + 1631 HCLGEVF_RST_ING, val, 1632 !(val & HCLGEVF_RST_ING_BITS), 1633 HCLGEVF_RESET_WAIT_US, 1634 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1635 1636 /* hardware completion status should be available by this time */ 1637 if (ret) { 1638 dev_err(&hdev->pdev->dev, 1639 "couldn't get reset done status from h/w, timeout!\n"); 1640 return ret; 1641 } 1642 1643 /* we will wait a bit more to let reset of the stack to complete. This 1644 * might happen in case reset assertion was made by PF. Yes, this also 1645 * means we might end up waiting bit more even for VF reset. 1646 */ 1647 msleep(5000); 1648 1649 return 0; 1650 } 1651 1652 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 1653 { 1654 u32 reg_val; 1655 1656 reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); 1657 if (enable) 1658 reg_val |= HCLGEVF_NIC_SW_RST_RDY; 1659 else 1660 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 1661 1662 hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 1663 reg_val); 1664 } 1665 1666 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1667 { 1668 int ret; 1669 1670 /* uninitialize the nic client */ 1671 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1672 if (ret) 1673 return ret; 1674 1675 /* re-initialize the hclge device */ 1676 ret = hclgevf_reset_hdev(hdev); 1677 if (ret) { 1678 dev_err(&hdev->pdev->dev, 1679 "hclge device re-init failed, VF is disabled!\n"); 1680 return ret; 1681 } 1682 1683 /* bring up the nic client again */ 1684 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1685 if (ret) 1686 return ret; 1687 1688 /* clear handshake status with IMP */ 1689 hclgevf_reset_handshake(hdev, false); 1690 1691 /* bring up the nic to enable TX/RX again */ 1692 return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1693 } 1694 1695 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1696 { 1697 #define HCLGEVF_RESET_SYNC_TIME 100 1698 1699 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1700 struct hclge_vf_to_pf_msg send_msg; 1701 int ret; 1702 1703 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1704 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1705 if (ret) { 1706 dev_err(&hdev->pdev->dev, 1707 "failed to assert VF reset, ret = %d\n", ret); 1708 return ret; 1709 } 1710 hdev->rst_stats.vf_func_rst_cnt++; 1711 } 1712 1713 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 1714 /* inform hardware that preparatory work is done */ 1715 msleep(HCLGEVF_RESET_SYNC_TIME); 1716 hclgevf_reset_handshake(hdev, true); 1717 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n", 1718 hdev->reset_type); 1719 1720 return 0; 1721 } 1722 1723 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 1724 { 1725 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 1726 hdev->rst_stats.vf_func_rst_cnt); 1727 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1728 hdev->rst_stats.flr_rst_cnt); 1729 dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 1730 hdev->rst_stats.vf_rst_cnt); 1731 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1732 hdev->rst_stats.rst_done_cnt); 1733 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1734 hdev->rst_stats.hw_rst_done_cnt); 1735 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1736 hdev->rst_stats.rst_cnt); 1737 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1738 hdev->rst_stats.rst_fail_cnt); 1739 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1740 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 1741 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 1742 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG)); 1743 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1744 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG)); 1745 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1746 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 1747 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1748 } 1749 1750 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1751 { 1752 /* recover handshake status with IMP when reset fail */ 1753 hclgevf_reset_handshake(hdev, true); 1754 hdev->rst_stats.rst_fail_cnt++; 1755 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1756 hdev->rst_stats.rst_fail_cnt); 1757 1758 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1759 set_bit(hdev->reset_type, &hdev->reset_pending); 1760 1761 if (hclgevf_is_reset_pending(hdev)) { 1762 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1763 hclgevf_reset_task_schedule(hdev); 1764 } else { 1765 set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1766 hclgevf_dump_rst_info(hdev); 1767 } 1768 } 1769 1770 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 1771 { 1772 int ret; 1773 1774 hdev->rst_stats.rst_cnt++; 1775 1776 /* perform reset of the stack & ae device for a client */ 1777 ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); 1778 if (ret) 1779 return ret; 1780 1781 rtnl_lock(); 1782 /* bring down the nic to stop any ongoing TX/RX */ 1783 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1784 rtnl_unlock(); 1785 if (ret) 1786 return ret; 1787 1788 return hclgevf_reset_prepare_wait(hdev); 1789 } 1790 1791 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 1792 { 1793 int ret; 1794 1795 hdev->rst_stats.hw_rst_done_cnt++; 1796 ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); 1797 if (ret) 1798 return ret; 1799 1800 rtnl_lock(); 1801 /* now, re-initialize the nic client and ae device */ 1802 ret = hclgevf_reset_stack(hdev); 1803 rtnl_unlock(); 1804 if (ret) { 1805 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1806 return ret; 1807 } 1808 1809 ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT); 1810 /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1 1811 * times 1812 */ 1813 if (ret && 1814 hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1) 1815 return ret; 1816 1817 ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT); 1818 if (ret) 1819 return ret; 1820 1821 hdev->last_reset_time = jiffies; 1822 hdev->rst_stats.rst_done_cnt++; 1823 hdev->rst_stats.rst_fail_cnt = 0; 1824 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1825 1826 return 0; 1827 } 1828 1829 static void hclgevf_reset(struct hclgevf_dev *hdev) 1830 { 1831 if (hclgevf_reset_prepare(hdev)) 1832 goto err_reset; 1833 1834 /* check if VF could successfully fetch the hardware reset completion 1835 * status from the hardware 1836 */ 1837 if (hclgevf_reset_wait(hdev)) { 1838 /* can't do much in this situation, will disable VF */ 1839 dev_err(&hdev->pdev->dev, 1840 "failed to fetch H/W reset completion status\n"); 1841 goto err_reset; 1842 } 1843 1844 if (hclgevf_reset_rebuild(hdev)) 1845 goto err_reset; 1846 1847 return; 1848 1849 err_reset: 1850 hclgevf_reset_err_handle(hdev); 1851 } 1852 1853 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1854 unsigned long *addr) 1855 { 1856 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1857 1858 /* return the highest priority reset level amongst all */ 1859 if (test_bit(HNAE3_VF_RESET, addr)) { 1860 rst_level = HNAE3_VF_RESET; 1861 clear_bit(HNAE3_VF_RESET, addr); 1862 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1863 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1864 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1865 rst_level = HNAE3_VF_FULL_RESET; 1866 clear_bit(HNAE3_VF_FULL_RESET, addr); 1867 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1868 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1869 rst_level = HNAE3_VF_PF_FUNC_RESET; 1870 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1871 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1872 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1873 rst_level = HNAE3_VF_FUNC_RESET; 1874 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1875 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1876 rst_level = HNAE3_FLR_RESET; 1877 clear_bit(HNAE3_FLR_RESET, addr); 1878 } 1879 1880 return rst_level; 1881 } 1882 1883 static void hclgevf_reset_event(struct pci_dev *pdev, 1884 struct hnae3_handle *handle) 1885 { 1886 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1887 struct hclgevf_dev *hdev = ae_dev->priv; 1888 1889 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1890 1891 if (hdev->default_reset_request) 1892 hdev->reset_level = 1893 hclgevf_get_reset_level(hdev, 1894 &hdev->default_reset_request); 1895 else 1896 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1897 1898 /* reset of this VF requested */ 1899 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1900 hclgevf_reset_task_schedule(hdev); 1901 1902 hdev->last_reset_time = jiffies; 1903 } 1904 1905 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1906 enum hnae3_reset_type rst_type) 1907 { 1908 struct hclgevf_dev *hdev = ae_dev->priv; 1909 1910 set_bit(rst_type, &hdev->default_reset_request); 1911 } 1912 1913 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1914 { 1915 writel(en ? 1 : 0, vector->addr); 1916 } 1917 1918 static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev, 1919 enum hnae3_reset_type rst_type) 1920 { 1921 #define HCLGEVF_RESET_RETRY_WAIT_MS 500 1922 #define HCLGEVF_RESET_RETRY_CNT 5 1923 1924 struct hclgevf_dev *hdev = ae_dev->priv; 1925 int retry_cnt = 0; 1926 int ret; 1927 1928 while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) { 1929 down(&hdev->reset_sem); 1930 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1931 hdev->reset_type = rst_type; 1932 ret = hclgevf_reset_prepare(hdev); 1933 if (!ret && !hdev->reset_pending) 1934 break; 1935 1936 dev_err(&hdev->pdev->dev, 1937 "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n", 1938 ret, hdev->reset_pending, retry_cnt); 1939 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1940 up(&hdev->reset_sem); 1941 msleep(HCLGEVF_RESET_RETRY_WAIT_MS); 1942 } 1943 1944 /* disable misc vector before reset done */ 1945 hclgevf_enable_vector(&hdev->misc_vector, false); 1946 1947 if (hdev->reset_type == HNAE3_FLR_RESET) 1948 hdev->rst_stats.flr_rst_cnt++; 1949 } 1950 1951 static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev) 1952 { 1953 struct hclgevf_dev *hdev = ae_dev->priv; 1954 int ret; 1955 1956 hclgevf_enable_vector(&hdev->misc_vector, true); 1957 1958 ret = hclgevf_reset_rebuild(hdev); 1959 if (ret) 1960 dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 1961 ret); 1962 1963 hdev->reset_type = HNAE3_NONE_RESET; 1964 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1965 up(&hdev->reset_sem); 1966 } 1967 1968 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1969 { 1970 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1971 1972 return hdev->fw_version; 1973 } 1974 1975 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1976 { 1977 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1978 1979 vector->vector_irq = pci_irq_vector(hdev->pdev, 1980 HCLGEVF_MISC_VECTOR_NUM); 1981 vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1982 /* vector status always valid for Vector 0 */ 1983 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1984 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1985 1986 hdev->num_msi_left -= 1; 1987 hdev->num_msi_used += 1; 1988 } 1989 1990 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1991 { 1992 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 1993 test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) && 1994 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 1995 &hdev->state)) 1996 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 1997 } 1998 1999 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 2000 { 2001 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2002 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 2003 &hdev->state)) 2004 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2005 } 2006 2007 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 2008 unsigned long delay) 2009 { 2010 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2011 !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2012 mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 2013 } 2014 2015 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 2016 { 2017 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 2018 2019 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 2020 return; 2021 2022 down(&hdev->reset_sem); 2023 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2024 2025 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 2026 &hdev->reset_state)) { 2027 /* PF has intimated that it is about to reset the hardware. 2028 * We now have to poll & check if hardware has actually 2029 * completed the reset sequence. On hardware reset completion, 2030 * VF needs to reset the client and ae device. 2031 */ 2032 hdev->reset_attempts = 0; 2033 2034 hdev->last_reset_time = jiffies; 2035 hdev->reset_type = 2036 hclgevf_get_reset_level(hdev, &hdev->reset_pending); 2037 if (hdev->reset_type != HNAE3_NONE_RESET) 2038 hclgevf_reset(hdev); 2039 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 2040 &hdev->reset_state)) { 2041 /* we could be here when either of below happens: 2042 * 1. reset was initiated due to watchdog timeout caused by 2043 * a. IMP was earlier reset and our TX got choked down and 2044 * which resulted in watchdog reacting and inducing VF 2045 * reset. This also means our cmdq would be unreliable. 2046 * b. problem in TX due to other lower layer(example link 2047 * layer not functioning properly etc.) 2048 * 2. VF reset might have been initiated due to some config 2049 * change. 2050 * 2051 * NOTE: Theres no clear way to detect above cases than to react 2052 * to the response of PF for this reset request. PF will ack the 2053 * 1b and 2. cases but we will not get any intimation about 1a 2054 * from PF as cmdq would be in unreliable state i.e. mailbox 2055 * communication between PF and VF would be broken. 2056 * 2057 * if we are never geting into pending state it means either: 2058 * 1. PF is not receiving our request which could be due to IMP 2059 * reset 2060 * 2. PF is screwed 2061 * We cannot do much for 2. but to check first we can try reset 2062 * our PCIe + stack and see if it alleviates the problem. 2063 */ 2064 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 2065 /* prepare for full reset of stack + pcie interface */ 2066 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 2067 2068 /* "defer" schedule the reset task again */ 2069 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2070 } else { 2071 hdev->reset_attempts++; 2072 2073 set_bit(hdev->reset_level, &hdev->reset_pending); 2074 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2075 } 2076 hclgevf_reset_task_schedule(hdev); 2077 } 2078 2079 hdev->reset_type = HNAE3_NONE_RESET; 2080 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2081 up(&hdev->reset_sem); 2082 } 2083 2084 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 2085 { 2086 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2087 return; 2088 2089 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 2090 return; 2091 2092 hclgevf_mbx_async_handler(hdev); 2093 2094 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2095 } 2096 2097 static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 2098 { 2099 struct hclge_vf_to_pf_msg send_msg; 2100 int ret; 2101 2102 if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) 2103 return; 2104 2105 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 2106 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2107 if (ret) 2108 dev_err(&hdev->pdev->dev, 2109 "VF sends keep alive cmd failed(=%d)\n", ret); 2110 } 2111 2112 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 2113 { 2114 unsigned long delta = round_jiffies_relative(HZ); 2115 struct hnae3_handle *handle = &hdev->nic; 2116 2117 if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2118 return; 2119 2120 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 2121 delta = jiffies - hdev->last_serv_processed; 2122 2123 if (delta < round_jiffies_relative(HZ)) { 2124 delta = round_jiffies_relative(HZ) - delta; 2125 goto out; 2126 } 2127 } 2128 2129 hdev->serv_processed_cnt++; 2130 if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 2131 hclgevf_keep_alive(hdev); 2132 2133 if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 2134 hdev->last_serv_processed = jiffies; 2135 goto out; 2136 } 2137 2138 if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 2139 hclgevf_tqps_update_stats(handle); 2140 2141 /* VF does not need to request link status when this bit is set, because 2142 * PF will push its link status to VFs when link status changed. 2143 */ 2144 if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state)) 2145 hclgevf_request_link_info(hdev); 2146 2147 hclgevf_update_link_mode(hdev); 2148 2149 hclgevf_sync_vlan_filter(hdev); 2150 2151 hclgevf_sync_mac_table(hdev); 2152 2153 hclgevf_sync_promisc_mode(hdev); 2154 2155 hdev->last_serv_processed = jiffies; 2156 2157 out: 2158 hclgevf_task_schedule(hdev, delta); 2159 } 2160 2161 static void hclgevf_service_task(struct work_struct *work) 2162 { 2163 struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 2164 service_task.work); 2165 2166 hclgevf_reset_service_task(hdev); 2167 hclgevf_mailbox_service_task(hdev); 2168 hclgevf_periodic_service_task(hdev); 2169 2170 /* Handle reset and mbx again in case periodical task delays the 2171 * handling by calling hclgevf_task_schedule() in 2172 * hclgevf_periodic_service_task() 2173 */ 2174 hclgevf_reset_service_task(hdev); 2175 hclgevf_mailbox_service_task(hdev); 2176 } 2177 2178 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 2179 { 2180 hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr); 2181 } 2182 2183 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 2184 u32 *clearval) 2185 { 2186 u32 val, cmdq_stat_reg, rst_ing_reg; 2187 2188 /* fetch the events from their corresponding regs */ 2189 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 2190 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG); 2191 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 2192 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2193 dev_info(&hdev->pdev->dev, 2194 "receive reset interrupt 0x%x!\n", rst_ing_reg); 2195 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 2196 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2197 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 2198 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 2199 hdev->rst_stats.vf_rst_cnt++; 2200 /* set up VF hardware reset status, its PF will clear 2201 * this status when PF has initialized done. 2202 */ 2203 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 2204 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 2205 val | HCLGEVF_VF_RST_ING_BIT); 2206 return HCLGEVF_VECTOR0_EVENT_RST; 2207 } 2208 2209 /* check for vector0 mailbox(=CMDQ RX) event source */ 2210 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 2211 /* for revision 0x21, clearing interrupt is writing bit 0 2212 * to the clear register, writing bit 1 means to keep the 2213 * old value. 2214 * for revision 0x20, the clear register is a read & write 2215 * register, so we should just write 0 to the bit we are 2216 * handling, and keep other bits as cmdq_stat_reg. 2217 */ 2218 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) 2219 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2220 else 2221 *clearval = cmdq_stat_reg & 2222 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2223 2224 return HCLGEVF_VECTOR0_EVENT_MBX; 2225 } 2226 2227 /* print other vector0 event source */ 2228 dev_info(&hdev->pdev->dev, 2229 "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2230 cmdq_stat_reg); 2231 2232 return HCLGEVF_VECTOR0_EVENT_OTHER; 2233 } 2234 2235 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2236 { 2237 enum hclgevf_evt_cause event_cause; 2238 struct hclgevf_dev *hdev = data; 2239 u32 clearval; 2240 2241 hclgevf_enable_vector(&hdev->misc_vector, false); 2242 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2243 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) 2244 hclgevf_clear_event_cause(hdev, clearval); 2245 2246 switch (event_cause) { 2247 case HCLGEVF_VECTOR0_EVENT_RST: 2248 hclgevf_reset_task_schedule(hdev); 2249 break; 2250 case HCLGEVF_VECTOR0_EVENT_MBX: 2251 hclgevf_mbx_handler(hdev); 2252 break; 2253 default: 2254 break; 2255 } 2256 2257 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) 2258 hclgevf_enable_vector(&hdev->misc_vector, true); 2259 2260 return IRQ_HANDLED; 2261 } 2262 2263 static int hclgevf_configure(struct hclgevf_dev *hdev) 2264 { 2265 int ret; 2266 2267 hdev->gro_en = true; 2268 2269 ret = hclgevf_get_basic_info(hdev); 2270 if (ret) 2271 return ret; 2272 2273 /* get current port based vlan state from PF */ 2274 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2275 if (ret) 2276 return ret; 2277 2278 /* get queue configuration from PF */ 2279 ret = hclgevf_get_queue_info(hdev); 2280 if (ret) 2281 return ret; 2282 2283 /* get queue depth info from PF */ 2284 ret = hclgevf_get_queue_depth(hdev); 2285 if (ret) 2286 return ret; 2287 2288 return hclgevf_get_pf_media_type(hdev); 2289 } 2290 2291 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 2292 { 2293 struct pci_dev *pdev = ae_dev->pdev; 2294 struct hclgevf_dev *hdev; 2295 2296 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 2297 if (!hdev) 2298 return -ENOMEM; 2299 2300 hdev->pdev = pdev; 2301 hdev->ae_dev = ae_dev; 2302 ae_dev->priv = hdev; 2303 2304 return 0; 2305 } 2306 2307 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2308 { 2309 struct hnae3_handle *roce = &hdev->roce; 2310 struct hnae3_handle *nic = &hdev->nic; 2311 2312 roce->rinfo.num_vectors = hdev->num_roce_msix; 2313 2314 if (hdev->num_msi_left < roce->rinfo.num_vectors || 2315 hdev->num_msi_left == 0) 2316 return -EINVAL; 2317 2318 roce->rinfo.base_vector = hdev->roce_base_msix_offset; 2319 2320 roce->rinfo.netdev = nic->kinfo.netdev; 2321 roce->rinfo.roce_io_base = hdev->hw.hw.io_base; 2322 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; 2323 2324 roce->pdev = nic->pdev; 2325 roce->ae_algo = nic->ae_algo; 2326 roce->numa_node_mask = nic->numa_node_mask; 2327 2328 return 0; 2329 } 2330 2331 static int hclgevf_config_gro(struct hclgevf_dev *hdev) 2332 { 2333 struct hclgevf_cfg_gro_status_cmd *req; 2334 struct hclge_desc desc; 2335 int ret; 2336 2337 if (!hnae3_dev_gro_supported(hdev)) 2338 return 0; 2339 2340 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2341 false); 2342 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2343 2344 req->gro_en = hdev->gro_en ? 1 : 0; 2345 2346 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2347 if (ret) 2348 dev_err(&hdev->pdev->dev, 2349 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2350 2351 return ret; 2352 } 2353 2354 static int hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) 2355 { 2356 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size; 2357 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 2358 struct hclge_comm_rss_tuple_cfg *tuple_sets; 2359 u32 i; 2360 2361 rss_cfg->rss_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ; 2362 rss_cfg->rss_size = hdev->nic.kinfo.rss_size; 2363 tuple_sets = &rss_cfg->rss_tuple_sets; 2364 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2365 u16 *rss_ind_tbl; 2366 2367 rss_cfg->rss_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE; 2368 2369 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size, 2370 sizeof(*rss_ind_tbl), GFP_KERNEL); 2371 if (!rss_ind_tbl) 2372 return -ENOMEM; 2373 2374 rss_cfg->rss_indirection_tbl = rss_ind_tbl; 2375 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2376 HCLGE_COMM_RSS_KEY_SIZE); 2377 2378 tuple_sets->ipv4_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; 2379 tuple_sets->ipv4_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; 2380 tuple_sets->ipv4_sctp_en = HCLGE_COMM_RSS_INPUT_TUPLE_SCTP; 2381 tuple_sets->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; 2382 tuple_sets->ipv6_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; 2383 tuple_sets->ipv6_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; 2384 tuple_sets->ipv6_sctp_en = 2385 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ? 2386 HCLGE_COMM_RSS_INPUT_TUPLE_SCTP_NO_PORT : 2387 HCLGE_COMM_RSS_INPUT_TUPLE_SCTP; 2388 tuple_sets->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; 2389 } 2390 2391 /* Initialize RSS indirect table */ 2392 for (i = 0; i < rss_ind_tbl_size; i++) 2393 rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 2394 2395 return 0; 2396 } 2397 2398 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2399 { 2400 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 2401 int ret; 2402 2403 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2404 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, 2405 rss_cfg->rss_algo, 2406 rss_cfg->rss_hash_key); 2407 if (ret) 2408 return ret; 2409 2410 ret = hclge_comm_set_rss_input_tuple(&hdev->nic, &hdev->hw.hw, 2411 false, rss_cfg); 2412 if (ret) 2413 return ret; 2414 } 2415 2416 ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, 2417 rss_cfg->rss_indirection_tbl); 2418 if (ret) 2419 return ret; 2420 2421 return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); 2422 } 2423 2424 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2425 { 2426 struct hnae3_handle *nic = &hdev->nic; 2427 int ret; 2428 2429 ret = hclgevf_en_hw_strip_rxvtag(nic, true); 2430 if (ret) { 2431 dev_err(&hdev->pdev->dev, 2432 "failed to enable rx vlan offload, ret = %d\n", ret); 2433 return ret; 2434 } 2435 2436 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2437 false); 2438 } 2439 2440 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2441 { 2442 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2443 2444 unsigned long last = hdev->serv_processed_cnt; 2445 int i = 0; 2446 2447 while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2448 i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2449 last == hdev->serv_processed_cnt) 2450 usleep_range(1, 1); 2451 } 2452 2453 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2454 { 2455 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2456 2457 if (enable) { 2458 hclgevf_task_schedule(hdev, 0); 2459 } else { 2460 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2461 2462 /* flush memory to make sure DOWN is seen by service task */ 2463 smp_mb__before_atomic(); 2464 hclgevf_flush_link_update(hdev); 2465 } 2466 } 2467 2468 static int hclgevf_ae_start(struct hnae3_handle *handle) 2469 { 2470 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2471 2472 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2473 clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state); 2474 2475 hclgevf_reset_tqp_stats(handle); 2476 2477 hclgevf_request_link_info(hdev); 2478 2479 hclgevf_update_link_mode(hdev); 2480 2481 return 0; 2482 } 2483 2484 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2485 { 2486 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2487 2488 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2489 2490 if (hdev->reset_type != HNAE3_VF_RESET) 2491 hclgevf_reset_tqp(handle); 2492 2493 hclgevf_reset_tqp_stats(handle); 2494 hclgevf_update_link_status(hdev, 0); 2495 } 2496 2497 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2498 { 2499 #define HCLGEVF_STATE_ALIVE 1 2500 #define HCLGEVF_STATE_NOT_ALIVE 0 2501 2502 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2503 struct hclge_vf_to_pf_msg send_msg; 2504 2505 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2506 send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2507 HCLGEVF_STATE_NOT_ALIVE; 2508 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2509 } 2510 2511 static int hclgevf_client_start(struct hnae3_handle *handle) 2512 { 2513 return hclgevf_set_alive(handle, true); 2514 } 2515 2516 static void hclgevf_client_stop(struct hnae3_handle *handle) 2517 { 2518 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2519 int ret; 2520 2521 ret = hclgevf_set_alive(handle, false); 2522 if (ret) 2523 dev_warn(&hdev->pdev->dev, 2524 "%s failed %d\n", __func__, ret); 2525 } 2526 2527 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2528 { 2529 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2530 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2531 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2532 2533 INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 2534 2535 mutex_init(&hdev->mbx_resp.mbx_mutex); 2536 sema_init(&hdev->reset_sem, 1); 2537 2538 spin_lock_init(&hdev->mac_table.mac_list_lock); 2539 INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); 2540 INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); 2541 2542 /* bring the device down */ 2543 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2544 } 2545 2546 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2547 { 2548 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2549 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2550 2551 if (hdev->service_task.work.func) 2552 cancel_delayed_work_sync(&hdev->service_task); 2553 2554 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2555 } 2556 2557 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2558 { 2559 struct pci_dev *pdev = hdev->pdev; 2560 int vectors; 2561 int i; 2562 2563 if (hnae3_dev_roce_supported(hdev)) 2564 vectors = pci_alloc_irq_vectors(pdev, 2565 hdev->roce_base_msix_offset + 1, 2566 hdev->num_msi, 2567 PCI_IRQ_MSIX); 2568 else 2569 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2570 hdev->num_msi, 2571 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2572 2573 if (vectors < 0) { 2574 dev_err(&pdev->dev, 2575 "failed(%d) to allocate MSI/MSI-X vectors\n", 2576 vectors); 2577 return vectors; 2578 } 2579 if (vectors < hdev->num_msi) 2580 dev_warn(&hdev->pdev->dev, 2581 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2582 hdev->num_msi, vectors); 2583 2584 hdev->num_msi = vectors; 2585 hdev->num_msi_left = vectors; 2586 2587 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2588 sizeof(u16), GFP_KERNEL); 2589 if (!hdev->vector_status) { 2590 pci_free_irq_vectors(pdev); 2591 return -ENOMEM; 2592 } 2593 2594 for (i = 0; i < hdev->num_msi; i++) 2595 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2596 2597 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2598 sizeof(int), GFP_KERNEL); 2599 if (!hdev->vector_irq) { 2600 devm_kfree(&pdev->dev, hdev->vector_status); 2601 pci_free_irq_vectors(pdev); 2602 return -ENOMEM; 2603 } 2604 2605 return 0; 2606 } 2607 2608 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2609 { 2610 struct pci_dev *pdev = hdev->pdev; 2611 2612 devm_kfree(&pdev->dev, hdev->vector_status); 2613 devm_kfree(&pdev->dev, hdev->vector_irq); 2614 pci_free_irq_vectors(pdev); 2615 } 2616 2617 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2618 { 2619 int ret; 2620 2621 hclgevf_get_misc_vector(hdev); 2622 2623 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2624 HCLGEVF_NAME, pci_name(hdev->pdev)); 2625 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2626 0, hdev->misc_vector.name, hdev); 2627 if (ret) { 2628 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2629 hdev->misc_vector.vector_irq); 2630 return ret; 2631 } 2632 2633 hclgevf_clear_event_cause(hdev, 0); 2634 2635 /* enable misc. vector(vector 0) */ 2636 hclgevf_enable_vector(&hdev->misc_vector, true); 2637 2638 return ret; 2639 } 2640 2641 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2642 { 2643 /* disable misc vector(vector 0) */ 2644 hclgevf_enable_vector(&hdev->misc_vector, false); 2645 synchronize_irq(hdev->misc_vector.vector_irq); 2646 free_irq(hdev->misc_vector.vector_irq, hdev); 2647 hclgevf_free_vector(hdev, 0); 2648 } 2649 2650 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2651 { 2652 struct device *dev = &hdev->pdev->dev; 2653 2654 dev_info(dev, "VF info begin:\n"); 2655 2656 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2657 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2658 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2659 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2660 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2661 dev_info(dev, "PF media type of this VF: %u\n", 2662 hdev->hw.mac.media_type); 2663 2664 dev_info(dev, "VF info end.\n"); 2665 } 2666 2667 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2668 struct hnae3_client *client) 2669 { 2670 struct hclgevf_dev *hdev = ae_dev->priv; 2671 int rst_cnt = hdev->rst_stats.rst_cnt; 2672 int ret; 2673 2674 ret = client->ops->init_instance(&hdev->nic); 2675 if (ret) 2676 return ret; 2677 2678 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2679 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 2680 rst_cnt != hdev->rst_stats.rst_cnt) { 2681 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2682 2683 client->ops->uninit_instance(&hdev->nic, 0); 2684 return -EBUSY; 2685 } 2686 2687 hnae3_set_client_init_flag(client, ae_dev, 1); 2688 2689 if (netif_msg_drv(&hdev->nic)) 2690 hclgevf_info_show(hdev); 2691 2692 return 0; 2693 } 2694 2695 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2696 struct hnae3_client *client) 2697 { 2698 struct hclgevf_dev *hdev = ae_dev->priv; 2699 int ret; 2700 2701 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2702 !hdev->nic_client) 2703 return 0; 2704 2705 ret = hclgevf_init_roce_base_info(hdev); 2706 if (ret) 2707 return ret; 2708 2709 ret = client->ops->init_instance(&hdev->roce); 2710 if (ret) 2711 return ret; 2712 2713 set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2714 hnae3_set_client_init_flag(client, ae_dev, 1); 2715 2716 return 0; 2717 } 2718 2719 static int hclgevf_init_client_instance(struct hnae3_client *client, 2720 struct hnae3_ae_dev *ae_dev) 2721 { 2722 struct hclgevf_dev *hdev = ae_dev->priv; 2723 int ret; 2724 2725 switch (client->type) { 2726 case HNAE3_CLIENT_KNIC: 2727 hdev->nic_client = client; 2728 hdev->nic.client = client; 2729 2730 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2731 if (ret) 2732 goto clear_nic; 2733 2734 ret = hclgevf_init_roce_client_instance(ae_dev, 2735 hdev->roce_client); 2736 if (ret) 2737 goto clear_roce; 2738 2739 break; 2740 case HNAE3_CLIENT_ROCE: 2741 if (hnae3_dev_roce_supported(hdev)) { 2742 hdev->roce_client = client; 2743 hdev->roce.client = client; 2744 } 2745 2746 ret = hclgevf_init_roce_client_instance(ae_dev, client); 2747 if (ret) 2748 goto clear_roce; 2749 2750 break; 2751 default: 2752 return -EINVAL; 2753 } 2754 2755 return 0; 2756 2757 clear_nic: 2758 hdev->nic_client = NULL; 2759 hdev->nic.client = NULL; 2760 return ret; 2761 clear_roce: 2762 hdev->roce_client = NULL; 2763 hdev->roce.client = NULL; 2764 return ret; 2765 } 2766 2767 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2768 struct hnae3_ae_dev *ae_dev) 2769 { 2770 struct hclgevf_dev *hdev = ae_dev->priv; 2771 2772 /* un-init roce, if it exists */ 2773 if (hdev->roce_client) { 2774 while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 2775 msleep(HCLGEVF_WAIT_RESET_DONE); 2776 clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2777 2778 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2779 hdev->roce_client = NULL; 2780 hdev->roce.client = NULL; 2781 } 2782 2783 /* un-init nic/unic, if this was not called by roce client */ 2784 if (client->ops->uninit_instance && hdev->nic_client && 2785 client->type != HNAE3_CLIENT_ROCE) { 2786 while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 2787 msleep(HCLGEVF_WAIT_RESET_DONE); 2788 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2789 2790 client->ops->uninit_instance(&hdev->nic, 0); 2791 hdev->nic_client = NULL; 2792 hdev->nic.client = NULL; 2793 } 2794 } 2795 2796 static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev) 2797 { 2798 #define HCLGEVF_MEM_BAR 4 2799 2800 struct pci_dev *pdev = hdev->pdev; 2801 struct hclgevf_hw *hw = &hdev->hw; 2802 2803 /* for device does not have device memory, return directly */ 2804 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR))) 2805 return 0; 2806 2807 hw->hw.mem_base = 2808 devm_ioremap_wc(&pdev->dev, 2809 pci_resource_start(pdev, HCLGEVF_MEM_BAR), 2810 pci_resource_len(pdev, HCLGEVF_MEM_BAR)); 2811 if (!hw->hw.mem_base) { 2812 dev_err(&pdev->dev, "failed to map device memory\n"); 2813 return -EFAULT; 2814 } 2815 2816 return 0; 2817 } 2818 2819 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2820 { 2821 struct pci_dev *pdev = hdev->pdev; 2822 struct hclgevf_hw *hw; 2823 int ret; 2824 2825 ret = pci_enable_device(pdev); 2826 if (ret) { 2827 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2828 return ret; 2829 } 2830 2831 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2832 if (ret) { 2833 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2834 goto err_disable_device; 2835 } 2836 2837 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2838 if (ret) { 2839 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2840 goto err_disable_device; 2841 } 2842 2843 pci_set_master(pdev); 2844 hw = &hdev->hw; 2845 hw->hw.io_base = pci_iomap(pdev, 2, 0); 2846 if (!hw->hw.io_base) { 2847 dev_err(&pdev->dev, "can't map configuration register space\n"); 2848 ret = -ENOMEM; 2849 goto err_clr_master; 2850 } 2851 2852 ret = hclgevf_dev_mem_map(hdev); 2853 if (ret) 2854 goto err_unmap_io_base; 2855 2856 return 0; 2857 2858 err_unmap_io_base: 2859 pci_iounmap(pdev, hdev->hw.hw.io_base); 2860 err_clr_master: 2861 pci_clear_master(pdev); 2862 pci_release_regions(pdev); 2863 err_disable_device: 2864 pci_disable_device(pdev); 2865 2866 return ret; 2867 } 2868 2869 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2870 { 2871 struct pci_dev *pdev = hdev->pdev; 2872 2873 if (hdev->hw.hw.mem_base) 2874 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); 2875 2876 pci_iounmap(pdev, hdev->hw.hw.io_base); 2877 pci_clear_master(pdev); 2878 pci_release_regions(pdev); 2879 pci_disable_device(pdev); 2880 } 2881 2882 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2883 { 2884 struct hclgevf_query_res_cmd *req; 2885 struct hclge_desc desc; 2886 int ret; 2887 2888 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2889 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2890 if (ret) { 2891 dev_err(&hdev->pdev->dev, 2892 "query vf resource failed, ret = %d.\n", ret); 2893 return ret; 2894 } 2895 2896 req = (struct hclgevf_query_res_cmd *)desc.data; 2897 2898 if (hnae3_dev_roce_supported(hdev)) { 2899 hdev->roce_base_msix_offset = 2900 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 2901 HCLGEVF_MSIX_OFT_ROCEE_M, 2902 HCLGEVF_MSIX_OFT_ROCEE_S); 2903 hdev->num_roce_msix = 2904 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 2905 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2906 2907 /* nic's msix numbers is always equals to the roce's. */ 2908 hdev->num_nic_msix = hdev->num_roce_msix; 2909 2910 /* VF should have NIC vectors and Roce vectors, NIC vectors 2911 * are queued before Roce vectors. The offset is fixed to 64. 2912 */ 2913 hdev->num_msi = hdev->num_roce_msix + 2914 hdev->roce_base_msix_offset; 2915 } else { 2916 hdev->num_msi = 2917 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 2918 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2919 2920 hdev->num_nic_msix = hdev->num_msi; 2921 } 2922 2923 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 2924 dev_err(&hdev->pdev->dev, 2925 "Just %u msi resources, not enough for vf(min:2).\n", 2926 hdev->num_nic_msix); 2927 return -EINVAL; 2928 } 2929 2930 return 0; 2931 } 2932 2933 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) 2934 { 2935 #define HCLGEVF_MAX_NON_TSO_BD_NUM 8U 2936 2937 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 2938 2939 ae_dev->dev_specs.max_non_tso_bd_num = 2940 HCLGEVF_MAX_NON_TSO_BD_NUM; 2941 ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 2942 ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; 2943 ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 2944 ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME; 2945 } 2946 2947 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, 2948 struct hclge_desc *desc) 2949 { 2950 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 2951 struct hclgevf_dev_specs_0_cmd *req0; 2952 struct hclgevf_dev_specs_1_cmd *req1; 2953 2954 req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data; 2955 req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data; 2956 2957 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; 2958 ae_dev->dev_specs.rss_ind_tbl_size = 2959 le16_to_cpu(req0->rss_ind_tbl_size); 2960 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); 2961 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); 2962 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); 2963 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); 2964 } 2965 2966 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) 2967 { 2968 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; 2969 2970 if (!dev_specs->max_non_tso_bd_num) 2971 dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM; 2972 if (!dev_specs->rss_ind_tbl_size) 2973 dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 2974 if (!dev_specs->rss_key_size) 2975 dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; 2976 if (!dev_specs->max_int_gl) 2977 dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 2978 if (!dev_specs->max_frm_size) 2979 dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME; 2980 } 2981 2982 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) 2983 { 2984 struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; 2985 int ret; 2986 int i; 2987 2988 /* set default specifications as devices lower than version V3 do not 2989 * support querying specifications from firmware. 2990 */ 2991 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 2992 hclgevf_set_default_dev_specs(hdev); 2993 return 0; 2994 } 2995 2996 for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 2997 hclgevf_cmd_setup_basic_desc(&desc[i], 2998 HCLGEVF_OPC_QUERY_DEV_SPECS, true); 2999 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 3000 } 3001 hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS, 3002 true); 3003 3004 ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM); 3005 if (ret) 3006 return ret; 3007 3008 hclgevf_parse_dev_specs(hdev, desc); 3009 hclgevf_check_dev_specs(hdev); 3010 3011 return 0; 3012 } 3013 3014 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 3015 { 3016 struct pci_dev *pdev = hdev->pdev; 3017 int ret = 0; 3018 3019 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 3020 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3021 hclgevf_misc_irq_uninit(hdev); 3022 hclgevf_uninit_msi(hdev); 3023 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3024 } 3025 3026 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3027 pci_set_master(pdev); 3028 ret = hclgevf_init_msi(hdev); 3029 if (ret) { 3030 dev_err(&pdev->dev, 3031 "failed(%d) to init MSI/MSI-X\n", ret); 3032 return ret; 3033 } 3034 3035 ret = hclgevf_misc_irq_init(hdev); 3036 if (ret) { 3037 hclgevf_uninit_msi(hdev); 3038 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 3039 ret); 3040 return ret; 3041 } 3042 3043 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3044 } 3045 3046 return ret; 3047 } 3048 3049 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) 3050 { 3051 struct hclge_vf_to_pf_msg send_msg; 3052 3053 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, 3054 HCLGE_MBX_VPORT_LIST_CLEAR); 3055 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3056 } 3057 3058 static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev) 3059 { 3060 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 3061 hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1); 3062 } 3063 3064 static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev) 3065 { 3066 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 3067 hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0); 3068 } 3069 3070 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 3071 { 3072 struct pci_dev *pdev = hdev->pdev; 3073 int ret; 3074 3075 ret = hclgevf_pci_reset(hdev); 3076 if (ret) { 3077 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 3078 return ret; 3079 } 3080 3081 hclgevf_arq_init(hdev); 3082 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, 3083 &hdev->fw_version, false, 3084 hdev->reset_pending); 3085 if (ret) { 3086 dev_err(&pdev->dev, "cmd failed %d\n", ret); 3087 return ret; 3088 } 3089 3090 ret = hclgevf_rss_init_hw(hdev); 3091 if (ret) { 3092 dev_err(&hdev->pdev->dev, 3093 "failed(%d) to initialize RSS\n", ret); 3094 return ret; 3095 } 3096 3097 ret = hclgevf_config_gro(hdev); 3098 if (ret) 3099 return ret; 3100 3101 ret = hclgevf_init_vlan_config(hdev); 3102 if (ret) { 3103 dev_err(&hdev->pdev->dev, 3104 "failed(%d) to initialize VLAN config\n", ret); 3105 return ret; 3106 } 3107 3108 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 3109 3110 hclgevf_init_rxd_adv_layout(hdev); 3111 3112 dev_info(&hdev->pdev->dev, "Reset done\n"); 3113 3114 return 0; 3115 } 3116 3117 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 3118 { 3119 struct pci_dev *pdev = hdev->pdev; 3120 int ret; 3121 3122 ret = hclgevf_pci_init(hdev); 3123 if (ret) 3124 return ret; 3125 3126 ret = hclgevf_devlink_init(hdev); 3127 if (ret) 3128 goto err_devlink_init; 3129 3130 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); 3131 if (ret) 3132 goto err_cmd_queue_init; 3133 3134 hclgevf_arq_init(hdev); 3135 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, 3136 &hdev->fw_version, false, 3137 hdev->reset_pending); 3138 if (ret) 3139 goto err_cmd_init; 3140 3141 /* Get vf resource */ 3142 ret = hclgevf_query_vf_resource(hdev); 3143 if (ret) 3144 goto err_cmd_init; 3145 3146 ret = hclgevf_query_dev_specs(hdev); 3147 if (ret) { 3148 dev_err(&pdev->dev, 3149 "failed to query dev specifications, ret = %d\n", ret); 3150 goto err_cmd_init; 3151 } 3152 3153 ret = hclgevf_init_msi(hdev); 3154 if (ret) { 3155 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 3156 goto err_cmd_init; 3157 } 3158 3159 hclgevf_state_init(hdev); 3160 hdev->reset_level = HNAE3_VF_FUNC_RESET; 3161 hdev->reset_type = HNAE3_NONE_RESET; 3162 3163 ret = hclgevf_misc_irq_init(hdev); 3164 if (ret) 3165 goto err_misc_irq_init; 3166 3167 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3168 3169 ret = hclgevf_configure(hdev); 3170 if (ret) { 3171 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 3172 goto err_config; 3173 } 3174 3175 ret = hclgevf_alloc_tqps(hdev); 3176 if (ret) { 3177 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 3178 goto err_config; 3179 } 3180 3181 ret = hclgevf_set_handle_info(hdev); 3182 if (ret) 3183 goto err_config; 3184 3185 ret = hclgevf_config_gro(hdev); 3186 if (ret) 3187 goto err_config; 3188 3189 /* Initialize RSS for this VF */ 3190 ret = hclgevf_rss_init_cfg(hdev); 3191 if (ret) { 3192 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); 3193 goto err_config; 3194 } 3195 3196 ret = hclgevf_rss_init_hw(hdev); 3197 if (ret) { 3198 dev_err(&hdev->pdev->dev, 3199 "failed(%d) to initialize RSS\n", ret); 3200 goto err_config; 3201 } 3202 3203 /* ensure vf tbl list as empty before init*/ 3204 ret = hclgevf_clear_vport_list(hdev); 3205 if (ret) { 3206 dev_err(&pdev->dev, 3207 "failed to clear tbl list configuration, ret = %d.\n", 3208 ret); 3209 goto err_config; 3210 } 3211 3212 ret = hclgevf_init_vlan_config(hdev); 3213 if (ret) { 3214 dev_err(&hdev->pdev->dev, 3215 "failed(%d) to initialize VLAN config\n", ret); 3216 goto err_config; 3217 } 3218 3219 hclgevf_init_rxd_adv_layout(hdev); 3220 3221 set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state); 3222 3223 hdev->last_reset_time = jiffies; 3224 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 3225 HCLGEVF_DRIVER_NAME); 3226 3227 hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 3228 3229 return 0; 3230 3231 err_config: 3232 hclgevf_misc_irq_uninit(hdev); 3233 err_misc_irq_init: 3234 hclgevf_state_uninit(hdev); 3235 hclgevf_uninit_msi(hdev); 3236 err_cmd_init: 3237 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 3238 err_cmd_queue_init: 3239 hclgevf_devlink_uninit(hdev); 3240 err_devlink_init: 3241 hclgevf_pci_uninit(hdev); 3242 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3243 return ret; 3244 } 3245 3246 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 3247 { 3248 struct hclge_vf_to_pf_msg send_msg; 3249 3250 hclgevf_state_uninit(hdev); 3251 hclgevf_uninit_rxd_adv_layout(hdev); 3252 3253 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 3254 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3255 3256 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3257 hclgevf_misc_irq_uninit(hdev); 3258 hclgevf_uninit_msi(hdev); 3259 } 3260 3261 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 3262 hclgevf_devlink_uninit(hdev); 3263 hclgevf_pci_uninit(hdev); 3264 hclgevf_uninit_mac_list(hdev); 3265 } 3266 3267 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 3268 { 3269 struct pci_dev *pdev = ae_dev->pdev; 3270 int ret; 3271 3272 ret = hclgevf_alloc_hdev(ae_dev); 3273 if (ret) { 3274 dev_err(&pdev->dev, "hclge device allocation failed\n"); 3275 return ret; 3276 } 3277 3278 ret = hclgevf_init_hdev(ae_dev->priv); 3279 if (ret) { 3280 dev_err(&pdev->dev, "hclge device initialization failed\n"); 3281 return ret; 3282 } 3283 3284 return 0; 3285 } 3286 3287 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 3288 { 3289 struct hclgevf_dev *hdev = ae_dev->priv; 3290 3291 hclgevf_uninit_hdev(hdev); 3292 ae_dev->priv = NULL; 3293 } 3294 3295 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3296 { 3297 struct hnae3_handle *nic = &hdev->nic; 3298 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3299 3300 return min_t(u32, hdev->rss_size_max, 3301 hdev->num_tqps / kinfo->tc_info.num_tc); 3302 } 3303 3304 /** 3305 * hclgevf_get_channels - Get the current channels enabled and max supported. 3306 * @handle: hardware information for network interface 3307 * @ch: ethtool channels structure 3308 * 3309 * We don't support separate tx and rx queues as channels. The other count 3310 * represents how many queues are being used for control. max_combined counts 3311 * how many queue pairs we can support. They may not be mapped 1 to 1 with 3312 * q_vectors since we support a lot more queue pairs than q_vectors. 3313 **/ 3314 static void hclgevf_get_channels(struct hnae3_handle *handle, 3315 struct ethtool_channels *ch) 3316 { 3317 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3318 3319 ch->max_combined = hclgevf_get_max_channels(hdev); 3320 ch->other_count = 0; 3321 ch->max_other = 0; 3322 ch->combined_count = handle->kinfo.rss_size; 3323 } 3324 3325 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 3326 u16 *alloc_tqps, u16 *max_rss_size) 3327 { 3328 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3329 3330 *alloc_tqps = hdev->num_tqps; 3331 *max_rss_size = hdev->rss_size_max; 3332 } 3333 3334 static void hclgevf_update_rss_size(struct hnae3_handle *handle, 3335 u32 new_tqps_num) 3336 { 3337 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3338 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3339 u16 max_rss_size; 3340 3341 kinfo->req_rss_size = new_tqps_num; 3342 3343 max_rss_size = min_t(u16, hdev->rss_size_max, 3344 hdev->num_tqps / kinfo->tc_info.num_tc); 3345 3346 /* Use the user's configuration when it is not larger than 3347 * max_rss_size, otherwise, use the maximum specification value. 3348 */ 3349 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 3350 kinfo->req_rss_size <= max_rss_size) 3351 kinfo->rss_size = kinfo->req_rss_size; 3352 else if (kinfo->rss_size > max_rss_size || 3353 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 3354 kinfo->rss_size = max_rss_size; 3355 3356 kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size; 3357 } 3358 3359 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 3360 bool rxfh_configured) 3361 { 3362 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3363 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3364 u16 cur_rss_size = kinfo->rss_size; 3365 u16 cur_tqps = kinfo->num_tqps; 3366 u32 *rss_indir; 3367 unsigned int i; 3368 int ret; 3369 3370 hclgevf_update_rss_size(handle, new_tqps_num); 3371 3372 ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); 3373 if (ret) 3374 return ret; 3375 3376 /* RSS indirection table has been configured by user */ 3377 if (rxfh_configured) 3378 goto out; 3379 3380 /* Reinitializes the rss indirect table according to the new RSS size */ 3381 rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size, 3382 sizeof(u32), GFP_KERNEL); 3383 if (!rss_indir) 3384 return -ENOMEM; 3385 3386 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 3387 rss_indir[i] = i % kinfo->rss_size; 3388 3389 hdev->rss_cfg.rss_size = kinfo->rss_size; 3390 3391 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 3392 if (ret) 3393 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 3394 ret); 3395 3396 kfree(rss_indir); 3397 3398 out: 3399 if (!ret) 3400 dev_info(&hdev->pdev->dev, 3401 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 3402 cur_rss_size, kinfo->rss_size, 3403 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); 3404 3405 return ret; 3406 } 3407 3408 static int hclgevf_get_status(struct hnae3_handle *handle) 3409 { 3410 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3411 3412 return hdev->hw.mac.link; 3413 } 3414 3415 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 3416 u8 *auto_neg, u32 *speed, 3417 u8 *duplex) 3418 { 3419 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3420 3421 if (speed) 3422 *speed = hdev->hw.mac.speed; 3423 if (duplex) 3424 *duplex = hdev->hw.mac.duplex; 3425 if (auto_neg) 3426 *auto_neg = AUTONEG_DISABLE; 3427 } 3428 3429 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 3430 u8 duplex) 3431 { 3432 hdev->hw.mac.speed = speed; 3433 hdev->hw.mac.duplex = duplex; 3434 } 3435 3436 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 3437 { 3438 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3439 bool gro_en_old = hdev->gro_en; 3440 int ret; 3441 3442 hdev->gro_en = enable; 3443 ret = hclgevf_config_gro(hdev); 3444 if (ret) 3445 hdev->gro_en = gro_en_old; 3446 3447 return ret; 3448 } 3449 3450 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 3451 u8 *module_type) 3452 { 3453 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3454 3455 if (media_type) 3456 *media_type = hdev->hw.mac.media_type; 3457 3458 if (module_type) 3459 *module_type = hdev->hw.mac.module_type; 3460 } 3461 3462 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 3463 { 3464 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3465 3466 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 3467 } 3468 3469 static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle) 3470 { 3471 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3472 3473 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 3474 } 3475 3476 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 3477 { 3478 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3479 3480 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 3481 } 3482 3483 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 3484 { 3485 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3486 3487 return hdev->rst_stats.hw_rst_done_cnt; 3488 } 3489 3490 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 3491 unsigned long *supported, 3492 unsigned long *advertising) 3493 { 3494 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3495 3496 *supported = hdev->hw.mac.supported; 3497 *advertising = hdev->hw.mac.advertising; 3498 } 3499 3500 #define MAX_SEPARATE_NUM 4 3501 #define SEPARATOR_VALUE 0xFDFCFBFA 3502 #define REG_NUM_PER_LINE 4 3503 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 3504 3505 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 3506 { 3507 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 3508 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3509 3510 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 3511 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 3512 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 3513 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 3514 3515 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 3516 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 3517 } 3518 3519 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 3520 void *data) 3521 { 3522 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3523 int i, j, reg_um, separator_num; 3524 u32 *reg = data; 3525 3526 *version = hdev->fw_version; 3527 3528 /* fetching per-VF registers values from VF PCIe register space */ 3529 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 3530 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3531 for (i = 0; i < reg_um; i++) 3532 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 3533 for (i = 0; i < separator_num; i++) 3534 *reg++ = SEPARATOR_VALUE; 3535 3536 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 3537 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3538 for (i = 0; i < reg_um; i++) 3539 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 3540 for (i = 0; i < separator_num; i++) 3541 *reg++ = SEPARATOR_VALUE; 3542 3543 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 3544 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3545 for (j = 0; j < hdev->num_tqps; j++) { 3546 for (i = 0; i < reg_um; i++) 3547 *reg++ = hclgevf_read_dev(&hdev->hw, 3548 ring_reg_addr_list[i] + 3549 0x200 * j); 3550 for (i = 0; i < separator_num; i++) 3551 *reg++ = SEPARATOR_VALUE; 3552 } 3553 3554 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 3555 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3556 for (j = 0; j < hdev->num_msi_used - 1; j++) { 3557 for (i = 0; i < reg_um; i++) 3558 *reg++ = hclgevf_read_dev(&hdev->hw, 3559 tqp_intr_reg_addr_list[i] + 3560 4 * j); 3561 for (i = 0; i < separator_num; i++) 3562 *reg++ = SEPARATOR_VALUE; 3563 } 3564 } 3565 3566 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 3567 u8 *port_base_vlan_info, u8 data_size) 3568 { 3569 struct hnae3_handle *nic = &hdev->nic; 3570 struct hclge_vf_to_pf_msg send_msg; 3571 int ret; 3572 3573 rtnl_lock(); 3574 3575 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 3576 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { 3577 dev_warn(&hdev->pdev->dev, 3578 "is resetting when updating port based vlan info\n"); 3579 rtnl_unlock(); 3580 return; 3581 } 3582 3583 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3584 if (ret) { 3585 rtnl_unlock(); 3586 return; 3587 } 3588 3589 /* send msg to PF and wait update port based vlan info */ 3590 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3591 HCLGE_MBX_PORT_BASE_VLAN_CFG); 3592 memcpy(send_msg.data, port_base_vlan_info, data_size); 3593 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3594 if (!ret) { 3595 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3596 nic->port_base_vlan_state = state; 3597 else 3598 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3599 } 3600 3601 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 3602 rtnl_unlock(); 3603 } 3604 3605 static const struct hnae3_ae_ops hclgevf_ops = { 3606 .init_ae_dev = hclgevf_init_ae_dev, 3607 .uninit_ae_dev = hclgevf_uninit_ae_dev, 3608 .reset_prepare = hclgevf_reset_prepare_general, 3609 .reset_done = hclgevf_reset_done, 3610 .init_client_instance = hclgevf_init_client_instance, 3611 .uninit_client_instance = hclgevf_uninit_client_instance, 3612 .start = hclgevf_ae_start, 3613 .stop = hclgevf_ae_stop, 3614 .client_start = hclgevf_client_start, 3615 .client_stop = hclgevf_client_stop, 3616 .map_ring_to_vector = hclgevf_map_ring_to_vector, 3617 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3618 .get_vector = hclgevf_get_vector, 3619 .put_vector = hclgevf_put_vector, 3620 .reset_queue = hclgevf_reset_tqp, 3621 .get_mac_addr = hclgevf_get_mac_addr, 3622 .set_mac_addr = hclgevf_set_mac_addr, 3623 .add_uc_addr = hclgevf_add_uc_addr, 3624 .rm_uc_addr = hclgevf_rm_uc_addr, 3625 .add_mc_addr = hclgevf_add_mc_addr, 3626 .rm_mc_addr = hclgevf_rm_mc_addr, 3627 .get_stats = hclgevf_get_stats, 3628 .update_stats = hclgevf_update_stats, 3629 .get_strings = hclgevf_get_strings, 3630 .get_sset_count = hclgevf_get_sset_count, 3631 .get_rss_key_size = hclge_comm_get_rss_key_size, 3632 .get_rss = hclgevf_get_rss, 3633 .set_rss = hclgevf_set_rss, 3634 .get_rss_tuple = hclgevf_get_rss_tuple, 3635 .set_rss_tuple = hclgevf_set_rss_tuple, 3636 .get_tc_size = hclgevf_get_tc_size, 3637 .get_fw_version = hclgevf_get_fw_version, 3638 .set_vlan_filter = hclgevf_set_vlan_filter, 3639 .enable_vlan_filter = hclgevf_enable_vlan_filter, 3640 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 3641 .reset_event = hclgevf_reset_event, 3642 .set_default_reset_request = hclgevf_set_def_reset_request, 3643 .set_channels = hclgevf_set_channels, 3644 .get_channels = hclgevf_get_channels, 3645 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 3646 .get_regs_len = hclgevf_get_regs_len, 3647 .get_regs = hclgevf_get_regs, 3648 .get_status = hclgevf_get_status, 3649 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3650 .get_media_type = hclgevf_get_media_type, 3651 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 3652 .ae_dev_resetting = hclgevf_ae_dev_resetting, 3653 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 3654 .set_gro_en = hclgevf_gro_en, 3655 .set_mtu = hclgevf_set_mtu, 3656 .get_global_queue_id = hclgevf_get_qid_global, 3657 .set_timer_task = hclgevf_set_timer_task, 3658 .get_link_mode = hclgevf_get_link_mode, 3659 .set_promisc_mode = hclgevf_set_promisc_mode, 3660 .request_update_promisc_mode = hclgevf_request_update_promisc_mode, 3661 .get_cmdq_stat = hclgevf_get_cmdq_stat, 3662 }; 3663 3664 static struct hnae3_ae_algo ae_algovf = { 3665 .ops = &hclgevf_ops, 3666 .pdev_id_table = ae_algovf_pci_tbl, 3667 }; 3668 3669 static int hclgevf_init(void) 3670 { 3671 pr_info("%s is initializing\n", HCLGEVF_NAME); 3672 3673 hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME); 3674 if (!hclgevf_wq) { 3675 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 3676 return -ENOMEM; 3677 } 3678 3679 hnae3_register_ae_algo(&ae_algovf); 3680 3681 return 0; 3682 } 3683 3684 static void hclgevf_exit(void) 3685 { 3686 hnae3_unregister_ae_algo(&ae_algovf); 3687 destroy_workqueue(hclgevf_wq); 3688 } 3689 module_init(hclgevf_init); 3690 module_exit(hclgevf_exit); 3691 3692 MODULE_LICENSE("GPL"); 3693 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3694 MODULE_DESCRIPTION("HCLGEVF Driver"); 3695 MODULE_VERSION(HCLGEVF_MOD_VERSION); 3696