1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 #define HCLGEVF_RESET_MAX_FAIL_CNT 5 15 16 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 17 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 18 unsigned long delay); 19 20 static struct hnae3_ae_algo ae_algovf; 21 22 static struct workqueue_struct *hclgevf_wq; 23 24 static const struct pci_device_id ae_algovf_pci_tbl[] = { 25 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, 26 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 27 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 28 /* required last entry */ 29 {0, } 30 }; 31 32 static const u8 hclgevf_hash_key[] = { 33 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 34 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 35 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 36 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 37 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 38 }; 39 40 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 41 42 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 43 HCLGEVF_CMDQ_TX_ADDR_H_REG, 44 HCLGEVF_CMDQ_TX_DEPTH_REG, 45 HCLGEVF_CMDQ_TX_TAIL_REG, 46 HCLGEVF_CMDQ_TX_HEAD_REG, 47 HCLGEVF_CMDQ_RX_ADDR_L_REG, 48 HCLGEVF_CMDQ_RX_ADDR_H_REG, 49 HCLGEVF_CMDQ_RX_DEPTH_REG, 50 HCLGEVF_CMDQ_RX_TAIL_REG, 51 HCLGEVF_CMDQ_RX_HEAD_REG, 52 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 53 HCLGEVF_VECTOR0_CMDQ_STATE_REG, 54 HCLGEVF_CMDQ_INTR_EN_REG, 55 HCLGEVF_CMDQ_INTR_GEN_REG}; 56 57 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 58 HCLGEVF_RST_ING, 59 HCLGEVF_GRO_EN_REG}; 60 61 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 62 HCLGEVF_RING_RX_ADDR_H_REG, 63 HCLGEVF_RING_RX_BD_NUM_REG, 64 HCLGEVF_RING_RX_BD_LENGTH_REG, 65 HCLGEVF_RING_RX_MERGE_EN_REG, 66 HCLGEVF_RING_RX_TAIL_REG, 67 HCLGEVF_RING_RX_HEAD_REG, 68 HCLGEVF_RING_RX_FBD_NUM_REG, 69 HCLGEVF_RING_RX_OFFSET_REG, 70 HCLGEVF_RING_RX_FBD_OFFSET_REG, 71 HCLGEVF_RING_RX_STASH_REG, 72 HCLGEVF_RING_RX_BD_ERR_REG, 73 HCLGEVF_RING_TX_ADDR_L_REG, 74 HCLGEVF_RING_TX_ADDR_H_REG, 75 HCLGEVF_RING_TX_BD_NUM_REG, 76 HCLGEVF_RING_TX_PRIORITY_REG, 77 HCLGEVF_RING_TX_TC_REG, 78 HCLGEVF_RING_TX_MERGE_EN_REG, 79 HCLGEVF_RING_TX_TAIL_REG, 80 HCLGEVF_RING_TX_HEAD_REG, 81 HCLGEVF_RING_TX_FBD_NUM_REG, 82 HCLGEVF_RING_TX_OFFSET_REG, 83 HCLGEVF_RING_TX_EBD_NUM_REG, 84 HCLGEVF_RING_TX_EBD_OFFSET_REG, 85 HCLGEVF_RING_TX_BD_ERR_REG, 86 HCLGEVF_RING_EN_REG}; 87 88 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 89 HCLGEVF_TQP_INTR_GL0_REG, 90 HCLGEVF_TQP_INTR_GL1_REG, 91 HCLGEVF_TQP_INTR_GL2_REG, 92 HCLGEVF_TQP_INTR_RL_REG}; 93 94 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 95 { 96 if (!handle->client) 97 return container_of(handle, struct hclgevf_dev, nic); 98 else if (handle->client->type == HNAE3_CLIENT_ROCE) 99 return container_of(handle, struct hclgevf_dev, roce); 100 else 101 return container_of(handle, struct hclgevf_dev, nic); 102 } 103 104 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 105 { 106 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 107 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 108 struct hclgevf_desc desc; 109 struct hclgevf_tqp *tqp; 110 int status; 111 int i; 112 113 for (i = 0; i < kinfo->num_tqps; i++) { 114 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 115 hclgevf_cmd_setup_basic_desc(&desc, 116 HCLGEVF_OPC_QUERY_RX_STATUS, 117 true); 118 119 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 120 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 121 if (status) { 122 dev_err(&hdev->pdev->dev, 123 "Query tqp stat fail, status = %d,queue = %d\n", 124 status, i); 125 return status; 126 } 127 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 128 le32_to_cpu(desc.data[1]); 129 130 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 131 true); 132 133 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 134 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 135 if (status) { 136 dev_err(&hdev->pdev->dev, 137 "Query tqp stat fail, status = %d,queue = %d\n", 138 status, i); 139 return status; 140 } 141 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 142 le32_to_cpu(desc.data[1]); 143 } 144 145 return 0; 146 } 147 148 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 149 { 150 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 151 struct hclgevf_tqp *tqp; 152 u64 *buff = data; 153 int i; 154 155 for (i = 0; i < kinfo->num_tqps; i++) { 156 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 157 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 158 } 159 for (i = 0; i < kinfo->num_tqps; i++) { 160 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 161 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 162 } 163 164 return buff; 165 } 166 167 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 168 { 169 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 170 171 return kinfo->num_tqps * 2; 172 } 173 174 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 175 { 176 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 177 u8 *buff = data; 178 int i; 179 180 for (i = 0; i < kinfo->num_tqps; i++) { 181 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 182 struct hclgevf_tqp, q); 183 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 184 tqp->index); 185 buff += ETH_GSTRING_LEN; 186 } 187 188 for (i = 0; i < kinfo->num_tqps; i++) { 189 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 190 struct hclgevf_tqp, q); 191 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 192 tqp->index); 193 buff += ETH_GSTRING_LEN; 194 } 195 196 return buff; 197 } 198 199 static void hclgevf_update_stats(struct hnae3_handle *handle, 200 struct net_device_stats *net_stats) 201 { 202 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 203 int status; 204 205 status = hclgevf_tqps_update_stats(handle); 206 if (status) 207 dev_err(&hdev->pdev->dev, 208 "VF update of TQPS stats fail, status = %d.\n", 209 status); 210 } 211 212 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 213 { 214 if (strset == ETH_SS_TEST) 215 return -EOPNOTSUPP; 216 else if (strset == ETH_SS_STATS) 217 return hclgevf_tqps_get_sset_count(handle, strset); 218 219 return 0; 220 } 221 222 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 223 u8 *data) 224 { 225 u8 *p = (char *)data; 226 227 if (strset == ETH_SS_STATS) 228 p = hclgevf_tqps_get_strings(handle, p); 229 } 230 231 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 232 { 233 hclgevf_tqps_get_stats(handle, data); 234 } 235 236 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 237 u8 subcode) 238 { 239 if (msg) { 240 memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 241 msg->code = code; 242 msg->subcode = subcode; 243 } 244 } 245 246 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 247 { 248 struct hclge_vf_to_pf_msg send_msg; 249 u8 resp_msg; 250 int status; 251 252 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0); 253 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 254 sizeof(resp_msg)); 255 if (status) { 256 dev_err(&hdev->pdev->dev, 257 "VF request to get TC info from PF failed %d", 258 status); 259 return status; 260 } 261 262 hdev->hw_tc_map = resp_msg; 263 264 return 0; 265 } 266 267 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 268 { 269 struct hnae3_handle *nic = &hdev->nic; 270 struct hclge_vf_to_pf_msg send_msg; 271 u8 resp_msg; 272 int ret; 273 274 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 275 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 276 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 277 sizeof(u8)); 278 if (ret) { 279 dev_err(&hdev->pdev->dev, 280 "VF request to get port based vlan state failed %d", 281 ret); 282 return ret; 283 } 284 285 nic->port_base_vlan_state = resp_msg; 286 287 return 0; 288 } 289 290 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 291 { 292 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 293 #define HCLGEVF_TQPS_ALLOC_OFFSET 0 294 #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 295 #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 296 297 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 298 struct hclge_vf_to_pf_msg send_msg; 299 int status; 300 301 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 302 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 303 HCLGEVF_TQPS_RSS_INFO_LEN); 304 if (status) { 305 dev_err(&hdev->pdev->dev, 306 "VF request to get tqp info from PF failed %d", 307 status); 308 return status; 309 } 310 311 memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], 312 sizeof(u16)); 313 memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], 314 sizeof(u16)); 315 memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], 316 sizeof(u16)); 317 318 return 0; 319 } 320 321 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 322 { 323 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 324 #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 325 #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 326 327 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 328 struct hclge_vf_to_pf_msg send_msg; 329 int ret; 330 331 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 332 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 333 HCLGEVF_TQPS_DEPTH_INFO_LEN); 334 if (ret) { 335 dev_err(&hdev->pdev->dev, 336 "VF request to get tqp depth info from PF failed %d", 337 ret); 338 return ret; 339 } 340 341 memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], 342 sizeof(u16)); 343 memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], 344 sizeof(u16)); 345 346 return 0; 347 } 348 349 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 350 { 351 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 352 struct hclge_vf_to_pf_msg send_msg; 353 u16 qid_in_pf = 0; 354 u8 resp_data[2]; 355 int ret; 356 357 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 358 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 359 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 360 sizeof(resp_data)); 361 if (!ret) 362 qid_in_pf = *(u16 *)resp_data; 363 364 return qid_in_pf; 365 } 366 367 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 368 { 369 struct hclge_vf_to_pf_msg send_msg; 370 u8 resp_msg[2]; 371 int ret; 372 373 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 374 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 375 sizeof(resp_msg)); 376 if (ret) { 377 dev_err(&hdev->pdev->dev, 378 "VF request to get the pf port media type failed %d", 379 ret); 380 return ret; 381 } 382 383 hdev->hw.mac.media_type = resp_msg[0]; 384 hdev->hw.mac.module_type = resp_msg[1]; 385 386 return 0; 387 } 388 389 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 390 { 391 struct hclgevf_tqp *tqp; 392 int i; 393 394 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 395 sizeof(struct hclgevf_tqp), GFP_KERNEL); 396 if (!hdev->htqp) 397 return -ENOMEM; 398 399 tqp = hdev->htqp; 400 401 for (i = 0; i < hdev->num_tqps; i++) { 402 tqp->dev = &hdev->pdev->dev; 403 tqp->index = i; 404 405 tqp->q.ae_algo = &ae_algovf; 406 tqp->q.buf_size = hdev->rx_buf_len; 407 tqp->q.tx_desc_num = hdev->num_tx_desc; 408 tqp->q.rx_desc_num = hdev->num_rx_desc; 409 410 /* need an extended offset to configure queues >= 411 * HCLGEVF_TQP_MAX_SIZE_DEV_V2. 412 */ 413 if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2) 414 tqp->q.io_base = hdev->hw.io_base + 415 HCLGEVF_TQP_REG_OFFSET + 416 i * HCLGEVF_TQP_REG_SIZE; 417 else 418 tqp->q.io_base = hdev->hw.io_base + 419 HCLGEVF_TQP_REG_OFFSET + 420 HCLGEVF_TQP_EXT_REG_OFFSET + 421 (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) * 422 HCLGEVF_TQP_REG_SIZE; 423 424 tqp++; 425 } 426 427 return 0; 428 } 429 430 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 431 { 432 struct hnae3_handle *nic = &hdev->nic; 433 struct hnae3_knic_private_info *kinfo; 434 u16 new_tqps = hdev->num_tqps; 435 unsigned int i; 436 437 kinfo = &nic->kinfo; 438 kinfo->num_tc = 0; 439 kinfo->num_tx_desc = hdev->num_tx_desc; 440 kinfo->num_rx_desc = hdev->num_rx_desc; 441 kinfo->rx_buf_len = hdev->rx_buf_len; 442 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 443 if (hdev->hw_tc_map & BIT(i)) 444 kinfo->num_tc++; 445 446 kinfo->rss_size 447 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 448 new_tqps = kinfo->rss_size * kinfo->num_tc; 449 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 450 451 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 452 sizeof(struct hnae3_queue *), GFP_KERNEL); 453 if (!kinfo->tqp) 454 return -ENOMEM; 455 456 for (i = 0; i < kinfo->num_tqps; i++) { 457 hdev->htqp[i].q.handle = &hdev->nic; 458 hdev->htqp[i].q.tqp_index = i; 459 kinfo->tqp[i] = &hdev->htqp[i].q; 460 } 461 462 /* after init the max rss_size and tqps, adjust the default tqp numbers 463 * and rss size with the actual vector numbers 464 */ 465 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 466 kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc, 467 kinfo->rss_size); 468 469 return 0; 470 } 471 472 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 473 { 474 struct hclge_vf_to_pf_msg send_msg; 475 int status; 476 477 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 478 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 479 if (status) 480 dev_err(&hdev->pdev->dev, 481 "VF failed to fetch link status(%d) from PF", status); 482 } 483 484 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 485 { 486 struct hnae3_handle *rhandle = &hdev->roce; 487 struct hnae3_handle *handle = &hdev->nic; 488 struct hnae3_client *rclient; 489 struct hnae3_client *client; 490 491 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 492 return; 493 494 client = handle->client; 495 rclient = hdev->roce_client; 496 497 link_state = 498 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 499 500 if (link_state != hdev->hw.mac.link) { 501 client->ops->link_status_change(handle, !!link_state); 502 if (rclient && rclient->ops->link_status_change) 503 rclient->ops->link_status_change(rhandle, !!link_state); 504 hdev->hw.mac.link = link_state; 505 } 506 507 clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 508 } 509 510 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 511 { 512 #define HCLGEVF_ADVERTISING 0 513 #define HCLGEVF_SUPPORTED 1 514 515 struct hclge_vf_to_pf_msg send_msg; 516 517 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 518 send_msg.data[0] = HCLGEVF_ADVERTISING; 519 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 520 send_msg.data[0] = HCLGEVF_SUPPORTED; 521 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 522 } 523 524 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 525 { 526 struct hnae3_handle *nic = &hdev->nic; 527 int ret; 528 529 nic->ae_algo = &ae_algovf; 530 nic->pdev = hdev->pdev; 531 nic->numa_node_mask = hdev->numa_node_mask; 532 nic->flags |= HNAE3_SUPPORT_VF; 533 534 ret = hclgevf_knic_setup(hdev); 535 if (ret) 536 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 537 ret); 538 return ret; 539 } 540 541 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 542 { 543 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 544 dev_warn(&hdev->pdev->dev, 545 "vector(vector_id %d) has been freed.\n", vector_id); 546 return; 547 } 548 549 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 550 hdev->num_msi_left += 1; 551 hdev->num_msi_used -= 1; 552 } 553 554 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 555 struct hnae3_vector_info *vector_info) 556 { 557 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 558 struct hnae3_vector_info *vector = vector_info; 559 int alloc = 0; 560 int i, j; 561 562 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 563 vector_num = min(hdev->num_msi_left, vector_num); 564 565 for (j = 0; j < vector_num; j++) { 566 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 567 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 568 vector->vector = pci_irq_vector(hdev->pdev, i); 569 vector->io_addr = hdev->hw.io_base + 570 HCLGEVF_VECTOR_REG_BASE + 571 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 572 hdev->vector_status[i] = 0; 573 hdev->vector_irq[i] = vector->vector; 574 575 vector++; 576 alloc++; 577 578 break; 579 } 580 } 581 } 582 hdev->num_msi_left -= alloc; 583 hdev->num_msi_used += alloc; 584 585 return alloc; 586 } 587 588 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 589 { 590 int i; 591 592 for (i = 0; i < hdev->num_msi; i++) 593 if (vector == hdev->vector_irq[i]) 594 return i; 595 596 return -EINVAL; 597 } 598 599 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 600 const u8 hfunc, const u8 *key) 601 { 602 struct hclgevf_rss_config_cmd *req; 603 unsigned int key_offset = 0; 604 struct hclgevf_desc desc; 605 int key_counts; 606 int key_size; 607 int ret; 608 609 key_counts = HCLGEVF_RSS_KEY_SIZE; 610 req = (struct hclgevf_rss_config_cmd *)desc.data; 611 612 while (key_counts) { 613 hclgevf_cmd_setup_basic_desc(&desc, 614 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 615 false); 616 617 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 618 req->hash_config |= 619 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 620 621 key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 622 memcpy(req->hash_key, 623 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 624 625 key_counts -= key_size; 626 key_offset++; 627 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 628 if (ret) { 629 dev_err(&hdev->pdev->dev, 630 "Configure RSS config fail, status = %d\n", 631 ret); 632 return ret; 633 } 634 } 635 636 return 0; 637 } 638 639 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 640 { 641 return HCLGEVF_RSS_KEY_SIZE; 642 } 643 644 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 645 { 646 return HCLGEVF_RSS_IND_TBL_SIZE; 647 } 648 649 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 650 { 651 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 652 struct hclgevf_rss_indirection_table_cmd *req; 653 struct hclgevf_desc desc; 654 int status; 655 int i, j; 656 657 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 658 659 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 660 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 661 false); 662 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 663 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 664 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 665 req->rss_result[j] = 666 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 667 668 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 669 if (status) { 670 dev_err(&hdev->pdev->dev, 671 "VF failed(=%d) to set RSS indirection table\n", 672 status); 673 return status; 674 } 675 } 676 677 return 0; 678 } 679 680 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 681 { 682 struct hclgevf_rss_tc_mode_cmd *req; 683 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 684 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 685 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 686 struct hclgevf_desc desc; 687 u16 roundup_size; 688 unsigned int i; 689 int status; 690 691 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 692 693 roundup_size = roundup_pow_of_two(rss_size); 694 roundup_size = ilog2(roundup_size); 695 696 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 697 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 698 tc_size[i] = roundup_size; 699 tc_offset[i] = rss_size * i; 700 } 701 702 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 703 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 704 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 705 (tc_valid[i] & 0x1)); 706 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 707 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 708 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 709 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 710 } 711 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 712 if (status) 713 dev_err(&hdev->pdev->dev, 714 "VF failed(=%d) to set rss tc mode\n", status); 715 716 return status; 717 } 718 719 /* for revision 0x20, vf shared the same rss config with pf */ 720 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 721 { 722 #define HCLGEVF_RSS_MBX_RESP_LEN 8 723 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 724 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 725 struct hclge_vf_to_pf_msg send_msg; 726 u16 msg_num, hash_key_index; 727 u8 index; 728 int ret; 729 730 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 731 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 732 HCLGEVF_RSS_MBX_RESP_LEN; 733 for (index = 0; index < msg_num; index++) { 734 send_msg.data[0] = index; 735 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 736 HCLGEVF_RSS_MBX_RESP_LEN); 737 if (ret) { 738 dev_err(&hdev->pdev->dev, 739 "VF get rss hash key from PF failed, ret=%d", 740 ret); 741 return ret; 742 } 743 744 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 745 if (index == msg_num - 1) 746 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 747 &resp_msg[0], 748 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 749 else 750 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 751 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 752 } 753 754 return 0; 755 } 756 757 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 758 u8 *hfunc) 759 { 760 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 761 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 762 int i, ret; 763 764 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 765 /* Get hash algorithm */ 766 if (hfunc) { 767 switch (rss_cfg->hash_algo) { 768 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 769 *hfunc = ETH_RSS_HASH_TOP; 770 break; 771 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 772 *hfunc = ETH_RSS_HASH_XOR; 773 break; 774 default: 775 *hfunc = ETH_RSS_HASH_UNKNOWN; 776 break; 777 } 778 } 779 780 /* Get the RSS Key required by the user */ 781 if (key) 782 memcpy(key, rss_cfg->rss_hash_key, 783 HCLGEVF_RSS_KEY_SIZE); 784 } else { 785 if (hfunc) 786 *hfunc = ETH_RSS_HASH_TOP; 787 if (key) { 788 ret = hclgevf_get_rss_hash_key(hdev); 789 if (ret) 790 return ret; 791 memcpy(key, rss_cfg->rss_hash_key, 792 HCLGEVF_RSS_KEY_SIZE); 793 } 794 } 795 796 if (indir) 797 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 798 indir[i] = rss_cfg->rss_indirection_tbl[i]; 799 800 return 0; 801 } 802 803 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 804 const u8 *key, const u8 hfunc) 805 { 806 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 807 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 808 int ret, i; 809 810 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 811 /* Set the RSS Hash Key if specififed by the user */ 812 if (key) { 813 switch (hfunc) { 814 case ETH_RSS_HASH_TOP: 815 rss_cfg->hash_algo = 816 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 817 break; 818 case ETH_RSS_HASH_XOR: 819 rss_cfg->hash_algo = 820 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 821 break; 822 case ETH_RSS_HASH_NO_CHANGE: 823 break; 824 default: 825 return -EINVAL; 826 } 827 828 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 829 key); 830 if (ret) 831 return ret; 832 833 /* Update the shadow RSS key with user specified qids */ 834 memcpy(rss_cfg->rss_hash_key, key, 835 HCLGEVF_RSS_KEY_SIZE); 836 } 837 } 838 839 /* update the shadow RSS table with user specified qids */ 840 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 841 rss_cfg->rss_indirection_tbl[i] = indir[i]; 842 843 /* update the hardware */ 844 return hclgevf_set_rss_indir_table(hdev); 845 } 846 847 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 848 { 849 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 850 851 if (nfc->data & RXH_L4_B_2_3) 852 hash_sets |= HCLGEVF_D_PORT_BIT; 853 else 854 hash_sets &= ~HCLGEVF_D_PORT_BIT; 855 856 if (nfc->data & RXH_IP_SRC) 857 hash_sets |= HCLGEVF_S_IP_BIT; 858 else 859 hash_sets &= ~HCLGEVF_S_IP_BIT; 860 861 if (nfc->data & RXH_IP_DST) 862 hash_sets |= HCLGEVF_D_IP_BIT; 863 else 864 hash_sets &= ~HCLGEVF_D_IP_BIT; 865 866 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 867 hash_sets |= HCLGEVF_V_TAG_BIT; 868 869 return hash_sets; 870 } 871 872 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 873 struct ethtool_rxnfc *nfc) 874 { 875 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 876 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 877 struct hclgevf_rss_input_tuple_cmd *req; 878 struct hclgevf_desc desc; 879 u8 tuple_sets; 880 int ret; 881 882 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 883 return -EOPNOTSUPP; 884 885 if (nfc->data & 886 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 887 return -EINVAL; 888 889 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 890 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 891 892 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 893 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 894 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 895 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 896 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 897 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 898 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 899 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 900 901 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 902 switch (nfc->flow_type) { 903 case TCP_V4_FLOW: 904 req->ipv4_tcp_en = tuple_sets; 905 break; 906 case TCP_V6_FLOW: 907 req->ipv6_tcp_en = tuple_sets; 908 break; 909 case UDP_V4_FLOW: 910 req->ipv4_udp_en = tuple_sets; 911 break; 912 case UDP_V6_FLOW: 913 req->ipv6_udp_en = tuple_sets; 914 break; 915 case SCTP_V4_FLOW: 916 req->ipv4_sctp_en = tuple_sets; 917 break; 918 case SCTP_V6_FLOW: 919 if ((nfc->data & RXH_L4_B_0_1) || 920 (nfc->data & RXH_L4_B_2_3)) 921 return -EINVAL; 922 923 req->ipv6_sctp_en = tuple_sets; 924 break; 925 case IPV4_FLOW: 926 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 927 break; 928 case IPV6_FLOW: 929 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 930 break; 931 default: 932 return -EINVAL; 933 } 934 935 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 936 if (ret) { 937 dev_err(&hdev->pdev->dev, 938 "Set rss tuple fail, status = %d\n", ret); 939 return ret; 940 } 941 942 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 943 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 944 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 945 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 946 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 947 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 948 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 949 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 950 return 0; 951 } 952 953 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 954 struct ethtool_rxnfc *nfc) 955 { 956 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 957 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 958 u8 tuple_sets; 959 960 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 961 return -EOPNOTSUPP; 962 963 nfc->data = 0; 964 965 switch (nfc->flow_type) { 966 case TCP_V4_FLOW: 967 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 968 break; 969 case UDP_V4_FLOW: 970 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 971 break; 972 case TCP_V6_FLOW: 973 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 974 break; 975 case UDP_V6_FLOW: 976 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 977 break; 978 case SCTP_V4_FLOW: 979 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 980 break; 981 case SCTP_V6_FLOW: 982 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 983 break; 984 case IPV4_FLOW: 985 case IPV6_FLOW: 986 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 987 break; 988 default: 989 return -EINVAL; 990 } 991 992 if (!tuple_sets) 993 return 0; 994 995 if (tuple_sets & HCLGEVF_D_PORT_BIT) 996 nfc->data |= RXH_L4_B_2_3; 997 if (tuple_sets & HCLGEVF_S_PORT_BIT) 998 nfc->data |= RXH_L4_B_0_1; 999 if (tuple_sets & HCLGEVF_D_IP_BIT) 1000 nfc->data |= RXH_IP_DST; 1001 if (tuple_sets & HCLGEVF_S_IP_BIT) 1002 nfc->data |= RXH_IP_SRC; 1003 1004 return 0; 1005 } 1006 1007 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 1008 struct hclgevf_rss_cfg *rss_cfg) 1009 { 1010 struct hclgevf_rss_input_tuple_cmd *req; 1011 struct hclgevf_desc desc; 1012 int ret; 1013 1014 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 1015 1016 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 1017 1018 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 1019 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 1020 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 1021 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 1022 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 1023 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 1024 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 1025 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 1026 1027 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1028 if (ret) 1029 dev_err(&hdev->pdev->dev, 1030 "Configure rss input fail, status = %d\n", ret); 1031 return ret; 1032 } 1033 1034 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 1035 { 1036 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1037 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1038 1039 return rss_cfg->rss_size; 1040 } 1041 1042 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 1043 int vector_id, 1044 struct hnae3_ring_chain_node *ring_chain) 1045 { 1046 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1047 struct hclge_vf_to_pf_msg send_msg; 1048 struct hnae3_ring_chain_node *node; 1049 int status; 1050 int i = 0; 1051 1052 memset(&send_msg, 0, sizeof(send_msg)); 1053 send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 1054 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1055 send_msg.vector_id = vector_id; 1056 1057 for (node = ring_chain; node; node = node->next) { 1058 send_msg.param[i].ring_type = 1059 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1060 1061 send_msg.param[i].tqp_index = node->tqp_index; 1062 send_msg.param[i].int_gl_index = 1063 hnae3_get_field(node->int_gl_idx, 1064 HNAE3_RING_GL_IDX_M, 1065 HNAE3_RING_GL_IDX_S); 1066 1067 i++; 1068 if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 1069 send_msg.ring_num = i; 1070 1071 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 1072 NULL, 0); 1073 if (status) { 1074 dev_err(&hdev->pdev->dev, 1075 "Map TQP fail, status is %d.\n", 1076 status); 1077 return status; 1078 } 1079 i = 0; 1080 } 1081 } 1082 1083 return 0; 1084 } 1085 1086 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1087 struct hnae3_ring_chain_node *ring_chain) 1088 { 1089 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1090 int vector_id; 1091 1092 vector_id = hclgevf_get_vector_index(hdev, vector); 1093 if (vector_id < 0) { 1094 dev_err(&handle->pdev->dev, 1095 "Get vector index fail. ret =%d\n", vector_id); 1096 return vector_id; 1097 } 1098 1099 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1100 } 1101 1102 static int hclgevf_unmap_ring_from_vector( 1103 struct hnae3_handle *handle, 1104 int vector, 1105 struct hnae3_ring_chain_node *ring_chain) 1106 { 1107 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1108 int ret, vector_id; 1109 1110 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1111 return 0; 1112 1113 vector_id = hclgevf_get_vector_index(hdev, vector); 1114 if (vector_id < 0) { 1115 dev_err(&handle->pdev->dev, 1116 "Get vector index fail. ret =%d\n", vector_id); 1117 return vector_id; 1118 } 1119 1120 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1121 if (ret) 1122 dev_err(&handle->pdev->dev, 1123 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1124 vector_id, 1125 ret); 1126 1127 return ret; 1128 } 1129 1130 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1131 { 1132 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1133 int vector_id; 1134 1135 vector_id = hclgevf_get_vector_index(hdev, vector); 1136 if (vector_id < 0) { 1137 dev_err(&handle->pdev->dev, 1138 "hclgevf_put_vector get vector index fail. ret =%d\n", 1139 vector_id); 1140 return vector_id; 1141 } 1142 1143 hclgevf_free_vector(hdev, vector_id); 1144 1145 return 0; 1146 } 1147 1148 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1149 bool en_uc_pmc, bool en_mc_pmc, 1150 bool en_bc_pmc) 1151 { 1152 struct hnae3_handle *handle = &hdev->nic; 1153 struct hclge_vf_to_pf_msg send_msg; 1154 int ret; 1155 1156 memset(&send_msg, 0, sizeof(send_msg)); 1157 send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 1158 send_msg.en_bc = en_bc_pmc ? 1 : 0; 1159 send_msg.en_uc = en_uc_pmc ? 1 : 0; 1160 send_msg.en_mc = en_mc_pmc ? 1 : 0; 1161 send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC, 1162 &handle->priv_flags) ? 1 : 0; 1163 1164 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1165 if (ret) 1166 dev_err(&hdev->pdev->dev, 1167 "Set promisc mode fail, status is %d.\n", ret); 1168 1169 return ret; 1170 } 1171 1172 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 1173 bool en_mc_pmc) 1174 { 1175 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1176 bool en_bc_pmc; 1177 1178 en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2; 1179 1180 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 1181 en_bc_pmc); 1182 } 1183 1184 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) 1185 { 1186 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1187 1188 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1189 hclgevf_task_schedule(hdev, 0); 1190 } 1191 1192 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) 1193 { 1194 struct hnae3_handle *handle = &hdev->nic; 1195 bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; 1196 bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; 1197 int ret; 1198 1199 if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { 1200 ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); 1201 if (!ret) 1202 clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1203 } 1204 } 1205 1206 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, 1207 int stream_id, bool enable) 1208 { 1209 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1210 struct hclgevf_desc desc; 1211 int status; 1212 1213 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1214 1215 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1216 false); 1217 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1218 req->stream_id = cpu_to_le16(stream_id); 1219 if (enable) 1220 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1221 1222 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1223 if (status) 1224 dev_err(&hdev->pdev->dev, 1225 "TQP enable fail, status =%d.\n", status); 1226 1227 return status; 1228 } 1229 1230 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1231 { 1232 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1233 struct hclgevf_tqp *tqp; 1234 int i; 1235 1236 for (i = 0; i < kinfo->num_tqps; i++) { 1237 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1238 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1239 } 1240 } 1241 1242 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 1243 { 1244 struct hclge_vf_to_pf_msg send_msg; 1245 u8 host_mac[ETH_ALEN]; 1246 int status; 1247 1248 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 1249 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 1250 ETH_ALEN); 1251 if (status) { 1252 dev_err(&hdev->pdev->dev, 1253 "fail to get VF MAC from host %d", status); 1254 return status; 1255 } 1256 1257 ether_addr_copy(p, host_mac); 1258 1259 return 0; 1260 } 1261 1262 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1263 { 1264 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1265 u8 host_mac_addr[ETH_ALEN]; 1266 1267 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 1268 return; 1269 1270 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 1271 if (hdev->has_pf_mac) 1272 ether_addr_copy(p, host_mac_addr); 1273 else 1274 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1275 } 1276 1277 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1278 bool is_first) 1279 { 1280 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1281 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1282 struct hclge_vf_to_pf_msg send_msg; 1283 u8 *new_mac_addr = (u8 *)p; 1284 int status; 1285 1286 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 1287 send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1288 ether_addr_copy(send_msg.data, new_mac_addr); 1289 if (is_first && !hdev->has_pf_mac) 1290 eth_zero_addr(&send_msg.data[ETH_ALEN]); 1291 else 1292 ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 1293 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1294 if (!status) 1295 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1296 1297 return status; 1298 } 1299 1300 static struct hclgevf_mac_addr_node * 1301 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) 1302 { 1303 struct hclgevf_mac_addr_node *mac_node, *tmp; 1304 1305 list_for_each_entry_safe(mac_node, tmp, list, node) 1306 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 1307 return mac_node; 1308 1309 return NULL; 1310 } 1311 1312 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, 1313 enum HCLGEVF_MAC_NODE_STATE state) 1314 { 1315 switch (state) { 1316 /* from set_rx_mode or tmp_add_list */ 1317 case HCLGEVF_MAC_TO_ADD: 1318 if (mac_node->state == HCLGEVF_MAC_TO_DEL) 1319 mac_node->state = HCLGEVF_MAC_ACTIVE; 1320 break; 1321 /* only from set_rx_mode */ 1322 case HCLGEVF_MAC_TO_DEL: 1323 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1324 list_del(&mac_node->node); 1325 kfree(mac_node); 1326 } else { 1327 mac_node->state = HCLGEVF_MAC_TO_DEL; 1328 } 1329 break; 1330 /* only from tmp_add_list, the mac_node->state won't be 1331 * HCLGEVF_MAC_ACTIVE 1332 */ 1333 case HCLGEVF_MAC_ACTIVE: 1334 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1335 mac_node->state = HCLGEVF_MAC_ACTIVE; 1336 break; 1337 } 1338 } 1339 1340 static int hclgevf_update_mac_list(struct hnae3_handle *handle, 1341 enum HCLGEVF_MAC_NODE_STATE state, 1342 enum HCLGEVF_MAC_ADDR_TYPE mac_type, 1343 const unsigned char *addr) 1344 { 1345 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1346 struct hclgevf_mac_addr_node *mac_node; 1347 struct list_head *list; 1348 1349 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1350 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1351 1352 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1353 1354 /* if the mac addr is already in the mac list, no need to add a new 1355 * one into it, just check the mac addr state, convert it to a new 1356 * new state, or just remove it, or do nothing. 1357 */ 1358 mac_node = hclgevf_find_mac_node(list, addr); 1359 if (mac_node) { 1360 hclgevf_update_mac_node(mac_node, state); 1361 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1362 return 0; 1363 } 1364 /* if this address is never added, unnecessary to delete */ 1365 if (state == HCLGEVF_MAC_TO_DEL) { 1366 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1367 return -ENOENT; 1368 } 1369 1370 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 1371 if (!mac_node) { 1372 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1373 return -ENOMEM; 1374 } 1375 1376 mac_node->state = state; 1377 ether_addr_copy(mac_node->mac_addr, addr); 1378 list_add_tail(&mac_node->node, list); 1379 1380 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1381 return 0; 1382 } 1383 1384 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1385 const unsigned char *addr) 1386 { 1387 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1388 HCLGEVF_MAC_ADDR_UC, addr); 1389 } 1390 1391 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1392 const unsigned char *addr) 1393 { 1394 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1395 HCLGEVF_MAC_ADDR_UC, addr); 1396 } 1397 1398 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1399 const unsigned char *addr) 1400 { 1401 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1402 HCLGEVF_MAC_ADDR_MC, addr); 1403 } 1404 1405 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1406 const unsigned char *addr) 1407 { 1408 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1409 HCLGEVF_MAC_ADDR_MC, addr); 1410 } 1411 1412 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, 1413 struct hclgevf_mac_addr_node *mac_node, 1414 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1415 { 1416 struct hclge_vf_to_pf_msg send_msg; 1417 u8 code, subcode; 1418 1419 if (mac_type == HCLGEVF_MAC_ADDR_UC) { 1420 code = HCLGE_MBX_SET_UNICAST; 1421 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1422 subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; 1423 else 1424 subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; 1425 } else { 1426 code = HCLGE_MBX_SET_MULTICAST; 1427 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1428 subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; 1429 else 1430 subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; 1431 } 1432 1433 hclgevf_build_send_msg(&send_msg, code, subcode); 1434 ether_addr_copy(send_msg.data, mac_node->mac_addr); 1435 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1436 } 1437 1438 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, 1439 struct list_head *list, 1440 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1441 { 1442 struct hclgevf_mac_addr_node *mac_node, *tmp; 1443 int ret; 1444 1445 list_for_each_entry_safe(mac_node, tmp, list, node) { 1446 ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); 1447 if (ret) { 1448 dev_err(&hdev->pdev->dev, 1449 "failed to configure mac %pM, state = %d, ret = %d\n", 1450 mac_node->mac_addr, mac_node->state, ret); 1451 return; 1452 } 1453 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1454 mac_node->state = HCLGEVF_MAC_ACTIVE; 1455 } else { 1456 list_del(&mac_node->node); 1457 kfree(mac_node); 1458 } 1459 } 1460 } 1461 1462 static void hclgevf_sync_from_add_list(struct list_head *add_list, 1463 struct list_head *mac_list) 1464 { 1465 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1466 1467 list_for_each_entry_safe(mac_node, tmp, add_list, node) { 1468 /* if the mac address from tmp_add_list is not in the 1469 * uc/mc_mac_list, it means have received a TO_DEL request 1470 * during the time window of sending mac config request to PF 1471 * If mac_node state is ACTIVE, then change its state to TO_DEL, 1472 * then it will be removed at next time. If is TO_ADD, it means 1473 * send TO_ADD request failed, so just remove the mac node. 1474 */ 1475 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1476 if (new_node) { 1477 hclgevf_update_mac_node(new_node, mac_node->state); 1478 list_del(&mac_node->node); 1479 kfree(mac_node); 1480 } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { 1481 mac_node->state = HCLGEVF_MAC_TO_DEL; 1482 list_del(&mac_node->node); 1483 list_add_tail(&mac_node->node, mac_list); 1484 } else { 1485 list_del(&mac_node->node); 1486 kfree(mac_node); 1487 } 1488 } 1489 } 1490 1491 static void hclgevf_sync_from_del_list(struct list_head *del_list, 1492 struct list_head *mac_list) 1493 { 1494 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1495 1496 list_for_each_entry_safe(mac_node, tmp, del_list, node) { 1497 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1498 if (new_node) { 1499 /* If the mac addr is exist in the mac list, it means 1500 * received a new request TO_ADD during the time window 1501 * of sending mac addr configurrequest to PF, so just 1502 * change the mac state to ACTIVE. 1503 */ 1504 new_node->state = HCLGEVF_MAC_ACTIVE; 1505 list_del(&mac_node->node); 1506 kfree(mac_node); 1507 } else { 1508 list_del(&mac_node->node); 1509 list_add_tail(&mac_node->node, mac_list); 1510 } 1511 } 1512 } 1513 1514 static void hclgevf_clear_list(struct list_head *list) 1515 { 1516 struct hclgevf_mac_addr_node *mac_node, *tmp; 1517 1518 list_for_each_entry_safe(mac_node, tmp, list, node) { 1519 list_del(&mac_node->node); 1520 kfree(mac_node); 1521 } 1522 } 1523 1524 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, 1525 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1526 { 1527 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1528 struct list_head tmp_add_list, tmp_del_list; 1529 struct list_head *list; 1530 1531 INIT_LIST_HEAD(&tmp_add_list); 1532 INIT_LIST_HEAD(&tmp_del_list); 1533 1534 /* move the mac addr to the tmp_add_list and tmp_del_list, then 1535 * we can add/delete these mac addr outside the spin lock 1536 */ 1537 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1538 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1539 1540 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1541 1542 list_for_each_entry_safe(mac_node, tmp, list, node) { 1543 switch (mac_node->state) { 1544 case HCLGEVF_MAC_TO_DEL: 1545 list_del(&mac_node->node); 1546 list_add_tail(&mac_node->node, &tmp_del_list); 1547 break; 1548 case HCLGEVF_MAC_TO_ADD: 1549 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 1550 if (!new_node) 1551 goto stop_traverse; 1552 1553 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 1554 new_node->state = mac_node->state; 1555 list_add_tail(&new_node->node, &tmp_add_list); 1556 break; 1557 default: 1558 break; 1559 } 1560 } 1561 1562 stop_traverse: 1563 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1564 1565 /* delete first, in order to get max mac table space for adding */ 1566 hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); 1567 hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); 1568 1569 /* if some mac addresses were added/deleted fail, move back to the 1570 * mac_list, and retry at next time. 1571 */ 1572 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1573 1574 hclgevf_sync_from_del_list(&tmp_del_list, list); 1575 hclgevf_sync_from_add_list(&tmp_add_list, list); 1576 1577 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1578 } 1579 1580 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) 1581 { 1582 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); 1583 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); 1584 } 1585 1586 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) 1587 { 1588 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1589 1590 hclgevf_clear_list(&hdev->mac_table.uc_mac_list); 1591 hclgevf_clear_list(&hdev->mac_table.mc_mac_list); 1592 1593 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1594 } 1595 1596 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1597 __be16 proto, u16 vlan_id, 1598 bool is_kill) 1599 { 1600 #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 1601 #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 1602 #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 1603 1604 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1605 struct hclge_vf_to_pf_msg send_msg; 1606 int ret; 1607 1608 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1609 return -EINVAL; 1610 1611 if (proto != htons(ETH_P_8021Q)) 1612 return -EPROTONOSUPPORT; 1613 1614 /* When device is resetting or reset failed, firmware is unable to 1615 * handle mailbox. Just record the vlan id, and remove it after 1616 * reset finished. 1617 */ 1618 if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 1619 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { 1620 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1621 return -EBUSY; 1622 } 1623 1624 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1625 HCLGE_MBX_VLAN_FILTER); 1626 send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; 1627 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, 1628 sizeof(vlan_id)); 1629 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, 1630 sizeof(proto)); 1631 /* when remove hw vlan filter failed, record the vlan id, 1632 * and try to remove it from hw later, to be consistence 1633 * with stack. 1634 */ 1635 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1636 if (is_kill && ret) 1637 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1638 1639 return ret; 1640 } 1641 1642 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1643 { 1644 #define HCLGEVF_MAX_SYNC_COUNT 60 1645 struct hnae3_handle *handle = &hdev->nic; 1646 int ret, sync_cnt = 0; 1647 u16 vlan_id; 1648 1649 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1650 while (vlan_id != VLAN_N_VID) { 1651 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1652 vlan_id, true); 1653 if (ret) 1654 return; 1655 1656 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1657 sync_cnt++; 1658 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1659 return; 1660 1661 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1662 } 1663 } 1664 1665 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1666 { 1667 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1668 struct hclge_vf_to_pf_msg send_msg; 1669 1670 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1671 HCLGE_MBX_VLAN_RX_OFF_CFG); 1672 send_msg.data[0] = enable ? 1 : 0; 1673 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1674 } 1675 1676 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1677 { 1678 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1679 struct hclge_vf_to_pf_msg send_msg; 1680 int ret; 1681 1682 /* disable vf queue before send queue reset msg to PF */ 1683 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1684 if (ret) 1685 return ret; 1686 1687 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1688 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 1689 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1690 } 1691 1692 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1693 { 1694 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1695 struct hclge_vf_to_pf_msg send_msg; 1696 1697 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1698 memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); 1699 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1700 } 1701 1702 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1703 enum hnae3_reset_notify_type type) 1704 { 1705 struct hnae3_client *client = hdev->nic_client; 1706 struct hnae3_handle *handle = &hdev->nic; 1707 int ret; 1708 1709 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1710 !client) 1711 return 0; 1712 1713 if (!client->ops->reset_notify) 1714 return -EOPNOTSUPP; 1715 1716 ret = client->ops->reset_notify(handle, type); 1717 if (ret) 1718 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1719 type, ret); 1720 1721 return ret; 1722 } 1723 1724 static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev, 1725 enum hnae3_reset_notify_type type) 1726 { 1727 struct hnae3_client *client = hdev->roce_client; 1728 struct hnae3_handle *handle = &hdev->roce; 1729 int ret; 1730 1731 if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client) 1732 return 0; 1733 1734 if (!client->ops->reset_notify) 1735 return -EOPNOTSUPP; 1736 1737 ret = client->ops->reset_notify(handle, type); 1738 if (ret) 1739 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", 1740 type, ret); 1741 return ret; 1742 } 1743 1744 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1745 { 1746 #define HCLGEVF_RESET_WAIT_US 20000 1747 #define HCLGEVF_RESET_WAIT_CNT 2000 1748 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1749 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1750 1751 u32 val; 1752 int ret; 1753 1754 if (hdev->reset_type == HNAE3_VF_RESET) 1755 ret = readl_poll_timeout(hdev->hw.io_base + 1756 HCLGEVF_VF_RST_ING, val, 1757 !(val & HCLGEVF_VF_RST_ING_BIT), 1758 HCLGEVF_RESET_WAIT_US, 1759 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1760 else 1761 ret = readl_poll_timeout(hdev->hw.io_base + 1762 HCLGEVF_RST_ING, val, 1763 !(val & HCLGEVF_RST_ING_BITS), 1764 HCLGEVF_RESET_WAIT_US, 1765 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1766 1767 /* hardware completion status should be available by this time */ 1768 if (ret) { 1769 dev_err(&hdev->pdev->dev, 1770 "couldn't get reset done status from h/w, timeout!\n"); 1771 return ret; 1772 } 1773 1774 /* we will wait a bit more to let reset of the stack to complete. This 1775 * might happen in case reset assertion was made by PF. Yes, this also 1776 * means we might end up waiting bit more even for VF reset. 1777 */ 1778 msleep(5000); 1779 1780 return 0; 1781 } 1782 1783 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 1784 { 1785 u32 reg_val; 1786 1787 reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); 1788 if (enable) 1789 reg_val |= HCLGEVF_NIC_SW_RST_RDY; 1790 else 1791 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 1792 1793 hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 1794 reg_val); 1795 } 1796 1797 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1798 { 1799 int ret; 1800 1801 /* uninitialize the nic client */ 1802 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1803 if (ret) 1804 return ret; 1805 1806 /* re-initialize the hclge device */ 1807 ret = hclgevf_reset_hdev(hdev); 1808 if (ret) { 1809 dev_err(&hdev->pdev->dev, 1810 "hclge device re-init failed, VF is disabled!\n"); 1811 return ret; 1812 } 1813 1814 /* bring up the nic client again */ 1815 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1816 if (ret) 1817 return ret; 1818 1819 /* clear handshake status with IMP */ 1820 hclgevf_reset_handshake(hdev, false); 1821 1822 /* bring up the nic to enable TX/RX again */ 1823 return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1824 } 1825 1826 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1827 { 1828 #define HCLGEVF_RESET_SYNC_TIME 100 1829 1830 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1831 struct hclge_vf_to_pf_msg send_msg; 1832 int ret; 1833 1834 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1835 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1836 if (ret) { 1837 dev_err(&hdev->pdev->dev, 1838 "failed to assert VF reset, ret = %d\n", ret); 1839 return ret; 1840 } 1841 hdev->rst_stats.vf_func_rst_cnt++; 1842 } 1843 1844 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1845 /* inform hardware that preparatory work is done */ 1846 msleep(HCLGEVF_RESET_SYNC_TIME); 1847 hclgevf_reset_handshake(hdev, true); 1848 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n", 1849 hdev->reset_type); 1850 1851 return 0; 1852 } 1853 1854 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 1855 { 1856 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 1857 hdev->rst_stats.vf_func_rst_cnt); 1858 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1859 hdev->rst_stats.flr_rst_cnt); 1860 dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 1861 hdev->rst_stats.vf_rst_cnt); 1862 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1863 hdev->rst_stats.rst_done_cnt); 1864 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1865 hdev->rst_stats.hw_rst_done_cnt); 1866 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1867 hdev->rst_stats.rst_cnt); 1868 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1869 hdev->rst_stats.rst_fail_cnt); 1870 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1871 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 1872 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 1873 hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG)); 1874 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1875 hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); 1876 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1877 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 1878 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1879 } 1880 1881 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1882 { 1883 /* recover handshake status with IMP when reset fail */ 1884 hclgevf_reset_handshake(hdev, true); 1885 hdev->rst_stats.rst_fail_cnt++; 1886 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1887 hdev->rst_stats.rst_fail_cnt); 1888 1889 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1890 set_bit(hdev->reset_type, &hdev->reset_pending); 1891 1892 if (hclgevf_is_reset_pending(hdev)) { 1893 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1894 hclgevf_reset_task_schedule(hdev); 1895 } else { 1896 set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1897 hclgevf_dump_rst_info(hdev); 1898 } 1899 } 1900 1901 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 1902 { 1903 int ret; 1904 1905 hdev->rst_stats.rst_cnt++; 1906 1907 /* perform reset of the stack & ae device for a client */ 1908 ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); 1909 if (ret) 1910 return ret; 1911 1912 rtnl_lock(); 1913 /* bring down the nic to stop any ongoing TX/RX */ 1914 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1915 rtnl_unlock(); 1916 if (ret) 1917 return ret; 1918 1919 return hclgevf_reset_prepare_wait(hdev); 1920 } 1921 1922 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 1923 { 1924 int ret; 1925 1926 hdev->rst_stats.hw_rst_done_cnt++; 1927 ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); 1928 if (ret) 1929 return ret; 1930 1931 rtnl_lock(); 1932 /* now, re-initialize the nic client and ae device */ 1933 ret = hclgevf_reset_stack(hdev); 1934 rtnl_unlock(); 1935 if (ret) { 1936 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1937 return ret; 1938 } 1939 1940 ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT); 1941 /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1 1942 * times 1943 */ 1944 if (ret && 1945 hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1) 1946 return ret; 1947 1948 ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT); 1949 if (ret) 1950 return ret; 1951 1952 hdev->last_reset_time = jiffies; 1953 hdev->rst_stats.rst_done_cnt++; 1954 hdev->rst_stats.rst_fail_cnt = 0; 1955 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1956 1957 return 0; 1958 } 1959 1960 static void hclgevf_reset(struct hclgevf_dev *hdev) 1961 { 1962 if (hclgevf_reset_prepare(hdev)) 1963 goto err_reset; 1964 1965 /* check if VF could successfully fetch the hardware reset completion 1966 * status from the hardware 1967 */ 1968 if (hclgevf_reset_wait(hdev)) { 1969 /* can't do much in this situation, will disable VF */ 1970 dev_err(&hdev->pdev->dev, 1971 "failed to fetch H/W reset completion status\n"); 1972 goto err_reset; 1973 } 1974 1975 if (hclgevf_reset_rebuild(hdev)) 1976 goto err_reset; 1977 1978 return; 1979 1980 err_reset: 1981 hclgevf_reset_err_handle(hdev); 1982 } 1983 1984 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1985 unsigned long *addr) 1986 { 1987 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1988 1989 /* return the highest priority reset level amongst all */ 1990 if (test_bit(HNAE3_VF_RESET, addr)) { 1991 rst_level = HNAE3_VF_RESET; 1992 clear_bit(HNAE3_VF_RESET, addr); 1993 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1994 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1995 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1996 rst_level = HNAE3_VF_FULL_RESET; 1997 clear_bit(HNAE3_VF_FULL_RESET, addr); 1998 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1999 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 2000 rst_level = HNAE3_VF_PF_FUNC_RESET; 2001 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 2002 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2003 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 2004 rst_level = HNAE3_VF_FUNC_RESET; 2005 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2006 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 2007 rst_level = HNAE3_FLR_RESET; 2008 clear_bit(HNAE3_FLR_RESET, addr); 2009 } 2010 2011 return rst_level; 2012 } 2013 2014 static void hclgevf_reset_event(struct pci_dev *pdev, 2015 struct hnae3_handle *handle) 2016 { 2017 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2018 struct hclgevf_dev *hdev = ae_dev->priv; 2019 2020 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 2021 2022 if (hdev->default_reset_request) 2023 hdev->reset_level = 2024 hclgevf_get_reset_level(hdev, 2025 &hdev->default_reset_request); 2026 else 2027 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2028 2029 /* reset of this VF requested */ 2030 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 2031 hclgevf_reset_task_schedule(hdev); 2032 2033 hdev->last_reset_time = jiffies; 2034 } 2035 2036 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 2037 enum hnae3_reset_type rst_type) 2038 { 2039 struct hclgevf_dev *hdev = ae_dev->priv; 2040 2041 set_bit(rst_type, &hdev->default_reset_request); 2042 } 2043 2044 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 2045 { 2046 writel(en ? 1 : 0, vector->addr); 2047 } 2048 2049 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 2050 { 2051 #define HCLGEVF_FLR_RETRY_WAIT_MS 500 2052 #define HCLGEVF_FLR_RETRY_CNT 5 2053 2054 struct hclgevf_dev *hdev = ae_dev->priv; 2055 int retry_cnt = 0; 2056 int ret; 2057 2058 retry: 2059 down(&hdev->reset_sem); 2060 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2061 hdev->reset_type = HNAE3_FLR_RESET; 2062 ret = hclgevf_reset_prepare(hdev); 2063 if (ret) { 2064 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", 2065 ret); 2066 if (hdev->reset_pending || 2067 retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) { 2068 dev_err(&hdev->pdev->dev, 2069 "reset_pending:0x%lx, retry_cnt:%d\n", 2070 hdev->reset_pending, retry_cnt); 2071 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2072 up(&hdev->reset_sem); 2073 msleep(HCLGEVF_FLR_RETRY_WAIT_MS); 2074 goto retry; 2075 } 2076 } 2077 2078 /* disable misc vector before FLR done */ 2079 hclgevf_enable_vector(&hdev->misc_vector, false); 2080 hdev->rst_stats.flr_rst_cnt++; 2081 } 2082 2083 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 2084 { 2085 struct hclgevf_dev *hdev = ae_dev->priv; 2086 int ret; 2087 2088 hclgevf_enable_vector(&hdev->misc_vector, true); 2089 2090 ret = hclgevf_reset_rebuild(hdev); 2091 if (ret) 2092 dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 2093 ret); 2094 2095 hdev->reset_type = HNAE3_NONE_RESET; 2096 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2097 up(&hdev->reset_sem); 2098 } 2099 2100 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 2101 { 2102 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2103 2104 return hdev->fw_version; 2105 } 2106 2107 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 2108 { 2109 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 2110 2111 vector->vector_irq = pci_irq_vector(hdev->pdev, 2112 HCLGEVF_MISC_VECTOR_NUM); 2113 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 2114 /* vector status always valid for Vector 0 */ 2115 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 2116 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 2117 2118 hdev->num_msi_left -= 1; 2119 hdev->num_msi_used += 1; 2120 } 2121 2122 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 2123 { 2124 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2125 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 2126 &hdev->state)) 2127 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2128 } 2129 2130 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 2131 { 2132 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2133 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 2134 &hdev->state)) 2135 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2136 } 2137 2138 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 2139 unsigned long delay) 2140 { 2141 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2142 !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2143 mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 2144 } 2145 2146 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 2147 { 2148 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 2149 2150 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 2151 return; 2152 2153 down(&hdev->reset_sem); 2154 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2155 2156 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 2157 &hdev->reset_state)) { 2158 /* PF has initmated that it is about to reset the hardware. 2159 * We now have to poll & check if hardware has actually 2160 * completed the reset sequence. On hardware reset completion, 2161 * VF needs to reset the client and ae device. 2162 */ 2163 hdev->reset_attempts = 0; 2164 2165 hdev->last_reset_time = jiffies; 2166 while ((hdev->reset_type = 2167 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 2168 != HNAE3_NONE_RESET) 2169 hclgevf_reset(hdev); 2170 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 2171 &hdev->reset_state)) { 2172 /* we could be here when either of below happens: 2173 * 1. reset was initiated due to watchdog timeout caused by 2174 * a. IMP was earlier reset and our TX got choked down and 2175 * which resulted in watchdog reacting and inducing VF 2176 * reset. This also means our cmdq would be unreliable. 2177 * b. problem in TX due to other lower layer(example link 2178 * layer not functioning properly etc.) 2179 * 2. VF reset might have been initiated due to some config 2180 * change. 2181 * 2182 * NOTE: Theres no clear way to detect above cases than to react 2183 * to the response of PF for this reset request. PF will ack the 2184 * 1b and 2. cases but we will not get any intimation about 1a 2185 * from PF as cmdq would be in unreliable state i.e. mailbox 2186 * communication between PF and VF would be broken. 2187 * 2188 * if we are never geting into pending state it means either: 2189 * 1. PF is not receiving our request which could be due to IMP 2190 * reset 2191 * 2. PF is screwed 2192 * We cannot do much for 2. but to check first we can try reset 2193 * our PCIe + stack and see if it alleviates the problem. 2194 */ 2195 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 2196 /* prepare for full reset of stack + pcie interface */ 2197 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 2198 2199 /* "defer" schedule the reset task again */ 2200 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2201 } else { 2202 hdev->reset_attempts++; 2203 2204 set_bit(hdev->reset_level, &hdev->reset_pending); 2205 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2206 } 2207 hclgevf_reset_task_schedule(hdev); 2208 } 2209 2210 hdev->reset_type = HNAE3_NONE_RESET; 2211 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2212 up(&hdev->reset_sem); 2213 } 2214 2215 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 2216 { 2217 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2218 return; 2219 2220 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 2221 return; 2222 2223 hclgevf_mbx_async_handler(hdev); 2224 2225 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2226 } 2227 2228 static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 2229 { 2230 struct hclge_vf_to_pf_msg send_msg; 2231 int ret; 2232 2233 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) 2234 return; 2235 2236 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 2237 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2238 if (ret) 2239 dev_err(&hdev->pdev->dev, 2240 "VF sends keep alive cmd failed(=%d)\n", ret); 2241 } 2242 2243 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 2244 { 2245 unsigned long delta = round_jiffies_relative(HZ); 2246 struct hnae3_handle *handle = &hdev->nic; 2247 2248 if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2249 return; 2250 2251 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 2252 delta = jiffies - hdev->last_serv_processed; 2253 2254 if (delta < round_jiffies_relative(HZ)) { 2255 delta = round_jiffies_relative(HZ) - delta; 2256 goto out; 2257 } 2258 } 2259 2260 hdev->serv_processed_cnt++; 2261 if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 2262 hclgevf_keep_alive(hdev); 2263 2264 if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 2265 hdev->last_serv_processed = jiffies; 2266 goto out; 2267 } 2268 2269 if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 2270 hclgevf_tqps_update_stats(handle); 2271 2272 /* request the link status from the PF. PF would be able to tell VF 2273 * about such updates in future so we might remove this later 2274 */ 2275 hclgevf_request_link_info(hdev); 2276 2277 hclgevf_update_link_mode(hdev); 2278 2279 hclgevf_sync_vlan_filter(hdev); 2280 2281 hclgevf_sync_mac_table(hdev); 2282 2283 hclgevf_sync_promisc_mode(hdev); 2284 2285 hdev->last_serv_processed = jiffies; 2286 2287 out: 2288 hclgevf_task_schedule(hdev, delta); 2289 } 2290 2291 static void hclgevf_service_task(struct work_struct *work) 2292 { 2293 struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 2294 service_task.work); 2295 2296 hclgevf_reset_service_task(hdev); 2297 hclgevf_mailbox_service_task(hdev); 2298 hclgevf_periodic_service_task(hdev); 2299 2300 /* Handle reset and mbx again in case periodical task delays the 2301 * handling by calling hclgevf_task_schedule() in 2302 * hclgevf_periodic_service_task() 2303 */ 2304 hclgevf_reset_service_task(hdev); 2305 hclgevf_mailbox_service_task(hdev); 2306 } 2307 2308 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 2309 { 2310 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 2311 } 2312 2313 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 2314 u32 *clearval) 2315 { 2316 u32 val, cmdq_stat_reg, rst_ing_reg; 2317 2318 /* fetch the events from their corresponding regs */ 2319 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 2320 HCLGEVF_VECTOR0_CMDQ_STATE_REG); 2321 2322 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 2323 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2324 dev_info(&hdev->pdev->dev, 2325 "receive reset interrupt 0x%x!\n", rst_ing_reg); 2326 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 2327 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2328 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 2329 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 2330 hdev->rst_stats.vf_rst_cnt++; 2331 /* set up VF hardware reset status, its PF will clear 2332 * this status when PF has initialized done. 2333 */ 2334 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 2335 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 2336 val | HCLGEVF_VF_RST_ING_BIT); 2337 return HCLGEVF_VECTOR0_EVENT_RST; 2338 } 2339 2340 /* check for vector0 mailbox(=CMDQ RX) event source */ 2341 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 2342 /* for revision 0x21, clearing interrupt is writing bit 0 2343 * to the clear register, writing bit 1 means to keep the 2344 * old value. 2345 * for revision 0x20, the clear register is a read & write 2346 * register, so we should just write 0 to the bit we are 2347 * handling, and keep other bits as cmdq_stat_reg. 2348 */ 2349 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) 2350 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2351 else 2352 *clearval = cmdq_stat_reg & 2353 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2354 2355 return HCLGEVF_VECTOR0_EVENT_MBX; 2356 } 2357 2358 /* print other vector0 event source */ 2359 dev_info(&hdev->pdev->dev, 2360 "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2361 cmdq_stat_reg); 2362 2363 return HCLGEVF_VECTOR0_EVENT_OTHER; 2364 } 2365 2366 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2367 { 2368 enum hclgevf_evt_cause event_cause; 2369 struct hclgevf_dev *hdev = data; 2370 u32 clearval; 2371 2372 hclgevf_enable_vector(&hdev->misc_vector, false); 2373 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2374 2375 switch (event_cause) { 2376 case HCLGEVF_VECTOR0_EVENT_RST: 2377 hclgevf_reset_task_schedule(hdev); 2378 break; 2379 case HCLGEVF_VECTOR0_EVENT_MBX: 2380 hclgevf_mbx_handler(hdev); 2381 break; 2382 default: 2383 break; 2384 } 2385 2386 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 2387 hclgevf_clear_event_cause(hdev, clearval); 2388 hclgevf_enable_vector(&hdev->misc_vector, true); 2389 } 2390 2391 return IRQ_HANDLED; 2392 } 2393 2394 static int hclgevf_configure(struct hclgevf_dev *hdev) 2395 { 2396 int ret; 2397 2398 /* get current port based vlan state from PF */ 2399 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2400 if (ret) 2401 return ret; 2402 2403 /* get queue configuration from PF */ 2404 ret = hclgevf_get_queue_info(hdev); 2405 if (ret) 2406 return ret; 2407 2408 /* get queue depth info from PF */ 2409 ret = hclgevf_get_queue_depth(hdev); 2410 if (ret) 2411 return ret; 2412 2413 ret = hclgevf_get_pf_media_type(hdev); 2414 if (ret) 2415 return ret; 2416 2417 /* get tc configuration from PF */ 2418 return hclgevf_get_tc_info(hdev); 2419 } 2420 2421 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 2422 { 2423 struct pci_dev *pdev = ae_dev->pdev; 2424 struct hclgevf_dev *hdev; 2425 2426 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 2427 if (!hdev) 2428 return -ENOMEM; 2429 2430 hdev->pdev = pdev; 2431 hdev->ae_dev = ae_dev; 2432 ae_dev->priv = hdev; 2433 2434 return 0; 2435 } 2436 2437 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2438 { 2439 struct hnae3_handle *roce = &hdev->roce; 2440 struct hnae3_handle *nic = &hdev->nic; 2441 2442 roce->rinfo.num_vectors = hdev->num_roce_msix; 2443 2444 if (hdev->num_msi_left < roce->rinfo.num_vectors || 2445 hdev->num_msi_left == 0) 2446 return -EINVAL; 2447 2448 roce->rinfo.base_vector = hdev->roce_base_vector; 2449 2450 roce->rinfo.netdev = nic->kinfo.netdev; 2451 roce->rinfo.roce_io_base = hdev->hw.io_base; 2452 roce->rinfo.roce_mem_base = hdev->hw.mem_base; 2453 2454 roce->pdev = nic->pdev; 2455 roce->ae_algo = nic->ae_algo; 2456 roce->numa_node_mask = nic->numa_node_mask; 2457 2458 return 0; 2459 } 2460 2461 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 2462 { 2463 struct hclgevf_cfg_gro_status_cmd *req; 2464 struct hclgevf_desc desc; 2465 int ret; 2466 2467 if (!hnae3_dev_gro_supported(hdev)) 2468 return 0; 2469 2470 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2471 false); 2472 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2473 2474 req->gro_en = en ? 1 : 0; 2475 2476 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2477 if (ret) 2478 dev_err(&hdev->pdev->dev, 2479 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2480 2481 return ret; 2482 } 2483 2484 static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) 2485 { 2486 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2487 struct hclgevf_rss_tuple_cfg *tuple_sets; 2488 u32 i; 2489 2490 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 2491 rss_cfg->rss_size = hdev->nic.kinfo.rss_size; 2492 tuple_sets = &rss_cfg->rss_tuple_sets; 2493 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2494 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 2495 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2496 HCLGEVF_RSS_KEY_SIZE); 2497 2498 tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2499 tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2500 tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2501 tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2502 tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2503 tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2504 tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2505 tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2506 } 2507 2508 /* Initialize RSS indirect table */ 2509 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 2510 rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 2511 } 2512 2513 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2514 { 2515 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2516 int ret; 2517 2518 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2519 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 2520 rss_cfg->rss_hash_key); 2521 if (ret) 2522 return ret; 2523 2524 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2525 if (ret) 2526 return ret; 2527 } 2528 2529 ret = hclgevf_set_rss_indir_table(hdev); 2530 if (ret) 2531 return ret; 2532 2533 return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); 2534 } 2535 2536 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2537 { 2538 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2539 false); 2540 } 2541 2542 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2543 { 2544 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2545 2546 unsigned long last = hdev->serv_processed_cnt; 2547 int i = 0; 2548 2549 while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2550 i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2551 last == hdev->serv_processed_cnt) 2552 usleep_range(1, 1); 2553 } 2554 2555 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2556 { 2557 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2558 2559 if (enable) { 2560 hclgevf_task_schedule(hdev, 0); 2561 } else { 2562 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2563 2564 /* flush memory to make sure DOWN is seen by service task */ 2565 smp_mb__before_atomic(); 2566 hclgevf_flush_link_update(hdev); 2567 } 2568 } 2569 2570 static int hclgevf_ae_start(struct hnae3_handle *handle) 2571 { 2572 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2573 2574 hclgevf_reset_tqp_stats(handle); 2575 2576 hclgevf_request_link_info(hdev); 2577 2578 hclgevf_update_link_mode(hdev); 2579 2580 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2581 2582 return 0; 2583 } 2584 2585 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2586 { 2587 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2588 int i; 2589 2590 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2591 2592 if (hdev->reset_type != HNAE3_VF_RESET) 2593 for (i = 0; i < handle->kinfo.num_tqps; i++) 2594 if (hclgevf_reset_tqp(handle, i)) 2595 break; 2596 2597 hclgevf_reset_tqp_stats(handle); 2598 hclgevf_update_link_status(hdev, 0); 2599 } 2600 2601 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2602 { 2603 #define HCLGEVF_STATE_ALIVE 1 2604 #define HCLGEVF_STATE_NOT_ALIVE 0 2605 2606 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2607 struct hclge_vf_to_pf_msg send_msg; 2608 2609 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2610 send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2611 HCLGEVF_STATE_NOT_ALIVE; 2612 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2613 } 2614 2615 static int hclgevf_client_start(struct hnae3_handle *handle) 2616 { 2617 return hclgevf_set_alive(handle, true); 2618 } 2619 2620 static void hclgevf_client_stop(struct hnae3_handle *handle) 2621 { 2622 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2623 int ret; 2624 2625 ret = hclgevf_set_alive(handle, false); 2626 if (ret) 2627 dev_warn(&hdev->pdev->dev, 2628 "%s failed %d\n", __func__, ret); 2629 } 2630 2631 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2632 { 2633 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2634 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2635 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2636 2637 INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 2638 2639 mutex_init(&hdev->mbx_resp.mbx_mutex); 2640 sema_init(&hdev->reset_sem, 1); 2641 2642 spin_lock_init(&hdev->mac_table.mac_list_lock); 2643 INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); 2644 INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); 2645 2646 /* bring the device down */ 2647 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2648 } 2649 2650 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2651 { 2652 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2653 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2654 2655 if (hdev->service_task.work.func) 2656 cancel_delayed_work_sync(&hdev->service_task); 2657 2658 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2659 } 2660 2661 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2662 { 2663 struct pci_dev *pdev = hdev->pdev; 2664 int vectors; 2665 int i; 2666 2667 if (hnae3_dev_roce_supported(hdev)) 2668 vectors = pci_alloc_irq_vectors(pdev, 2669 hdev->roce_base_msix_offset + 1, 2670 hdev->num_msi, 2671 PCI_IRQ_MSIX); 2672 else 2673 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2674 hdev->num_msi, 2675 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2676 2677 if (vectors < 0) { 2678 dev_err(&pdev->dev, 2679 "failed(%d) to allocate MSI/MSI-X vectors\n", 2680 vectors); 2681 return vectors; 2682 } 2683 if (vectors < hdev->num_msi) 2684 dev_warn(&hdev->pdev->dev, 2685 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2686 hdev->num_msi, vectors); 2687 2688 hdev->num_msi = vectors; 2689 hdev->num_msi_left = vectors; 2690 2691 hdev->base_msi_vector = pdev->irq; 2692 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2693 2694 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2695 sizeof(u16), GFP_KERNEL); 2696 if (!hdev->vector_status) { 2697 pci_free_irq_vectors(pdev); 2698 return -ENOMEM; 2699 } 2700 2701 for (i = 0; i < hdev->num_msi; i++) 2702 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2703 2704 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2705 sizeof(int), GFP_KERNEL); 2706 if (!hdev->vector_irq) { 2707 devm_kfree(&pdev->dev, hdev->vector_status); 2708 pci_free_irq_vectors(pdev); 2709 return -ENOMEM; 2710 } 2711 2712 return 0; 2713 } 2714 2715 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2716 { 2717 struct pci_dev *pdev = hdev->pdev; 2718 2719 devm_kfree(&pdev->dev, hdev->vector_status); 2720 devm_kfree(&pdev->dev, hdev->vector_irq); 2721 pci_free_irq_vectors(pdev); 2722 } 2723 2724 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2725 { 2726 int ret; 2727 2728 hclgevf_get_misc_vector(hdev); 2729 2730 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2731 HCLGEVF_NAME, pci_name(hdev->pdev)); 2732 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2733 0, hdev->misc_vector.name, hdev); 2734 if (ret) { 2735 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2736 hdev->misc_vector.vector_irq); 2737 return ret; 2738 } 2739 2740 hclgevf_clear_event_cause(hdev, 0); 2741 2742 /* enable misc. vector(vector 0) */ 2743 hclgevf_enable_vector(&hdev->misc_vector, true); 2744 2745 return ret; 2746 } 2747 2748 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2749 { 2750 /* disable misc vector(vector 0) */ 2751 hclgevf_enable_vector(&hdev->misc_vector, false); 2752 synchronize_irq(hdev->misc_vector.vector_irq); 2753 free_irq(hdev->misc_vector.vector_irq, hdev); 2754 hclgevf_free_vector(hdev, 0); 2755 } 2756 2757 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2758 { 2759 struct device *dev = &hdev->pdev->dev; 2760 2761 dev_info(dev, "VF info begin:\n"); 2762 2763 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2764 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2765 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2766 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2767 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2768 dev_info(dev, "PF media type of this VF: %u\n", 2769 hdev->hw.mac.media_type); 2770 2771 dev_info(dev, "VF info end.\n"); 2772 } 2773 2774 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2775 struct hnae3_client *client) 2776 { 2777 struct hclgevf_dev *hdev = ae_dev->priv; 2778 int rst_cnt = hdev->rst_stats.rst_cnt; 2779 int ret; 2780 2781 ret = client->ops->init_instance(&hdev->nic); 2782 if (ret) 2783 return ret; 2784 2785 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2786 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 2787 rst_cnt != hdev->rst_stats.rst_cnt) { 2788 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2789 2790 client->ops->uninit_instance(&hdev->nic, 0); 2791 return -EBUSY; 2792 } 2793 2794 hnae3_set_client_init_flag(client, ae_dev, 1); 2795 2796 if (netif_msg_drv(&hdev->nic)) 2797 hclgevf_info_show(hdev); 2798 2799 return 0; 2800 } 2801 2802 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2803 struct hnae3_client *client) 2804 { 2805 struct hclgevf_dev *hdev = ae_dev->priv; 2806 int ret; 2807 2808 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2809 !hdev->nic_client) 2810 return 0; 2811 2812 ret = hclgevf_init_roce_base_info(hdev); 2813 if (ret) 2814 return ret; 2815 2816 ret = client->ops->init_instance(&hdev->roce); 2817 if (ret) 2818 return ret; 2819 2820 set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2821 hnae3_set_client_init_flag(client, ae_dev, 1); 2822 2823 return 0; 2824 } 2825 2826 static int hclgevf_init_client_instance(struct hnae3_client *client, 2827 struct hnae3_ae_dev *ae_dev) 2828 { 2829 struct hclgevf_dev *hdev = ae_dev->priv; 2830 int ret; 2831 2832 switch (client->type) { 2833 case HNAE3_CLIENT_KNIC: 2834 hdev->nic_client = client; 2835 hdev->nic.client = client; 2836 2837 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2838 if (ret) 2839 goto clear_nic; 2840 2841 ret = hclgevf_init_roce_client_instance(ae_dev, 2842 hdev->roce_client); 2843 if (ret) 2844 goto clear_roce; 2845 2846 break; 2847 case HNAE3_CLIENT_ROCE: 2848 if (hnae3_dev_roce_supported(hdev)) { 2849 hdev->roce_client = client; 2850 hdev->roce.client = client; 2851 } 2852 2853 ret = hclgevf_init_roce_client_instance(ae_dev, client); 2854 if (ret) 2855 goto clear_roce; 2856 2857 break; 2858 default: 2859 return -EINVAL; 2860 } 2861 2862 return 0; 2863 2864 clear_nic: 2865 hdev->nic_client = NULL; 2866 hdev->nic.client = NULL; 2867 return ret; 2868 clear_roce: 2869 hdev->roce_client = NULL; 2870 hdev->roce.client = NULL; 2871 return ret; 2872 } 2873 2874 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2875 struct hnae3_ae_dev *ae_dev) 2876 { 2877 struct hclgevf_dev *hdev = ae_dev->priv; 2878 2879 /* un-init roce, if it exists */ 2880 if (hdev->roce_client) { 2881 clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2882 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2883 hdev->roce_client = NULL; 2884 hdev->roce.client = NULL; 2885 } 2886 2887 /* un-init nic/unic, if this was not called by roce client */ 2888 if (client->ops->uninit_instance && hdev->nic_client && 2889 client->type != HNAE3_CLIENT_ROCE) { 2890 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2891 2892 client->ops->uninit_instance(&hdev->nic, 0); 2893 hdev->nic_client = NULL; 2894 hdev->nic.client = NULL; 2895 } 2896 } 2897 2898 static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev) 2899 { 2900 #define HCLGEVF_MEM_BAR 4 2901 2902 struct pci_dev *pdev = hdev->pdev; 2903 struct hclgevf_hw *hw = &hdev->hw; 2904 2905 /* for device does not have device memory, return directly */ 2906 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR))) 2907 return 0; 2908 2909 hw->mem_base = devm_ioremap_wc(&pdev->dev, 2910 pci_resource_start(pdev, 2911 HCLGEVF_MEM_BAR), 2912 pci_resource_len(pdev, HCLGEVF_MEM_BAR)); 2913 if (!hw->mem_base) { 2914 dev_err(&pdev->dev, "failed to map device memory\n"); 2915 return -EFAULT; 2916 } 2917 2918 return 0; 2919 } 2920 2921 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2922 { 2923 struct pci_dev *pdev = hdev->pdev; 2924 struct hclgevf_hw *hw; 2925 int ret; 2926 2927 ret = pci_enable_device(pdev); 2928 if (ret) { 2929 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2930 return ret; 2931 } 2932 2933 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2934 if (ret) { 2935 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2936 goto err_disable_device; 2937 } 2938 2939 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2940 if (ret) { 2941 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2942 goto err_disable_device; 2943 } 2944 2945 pci_set_master(pdev); 2946 hw = &hdev->hw; 2947 hw->hdev = hdev; 2948 hw->io_base = pci_iomap(pdev, 2, 0); 2949 if (!hw->io_base) { 2950 dev_err(&pdev->dev, "can't map configuration register space\n"); 2951 ret = -ENOMEM; 2952 goto err_clr_master; 2953 } 2954 2955 ret = hclgevf_dev_mem_map(hdev); 2956 if (ret) 2957 goto err_unmap_io_base; 2958 2959 return 0; 2960 2961 err_unmap_io_base: 2962 pci_iounmap(pdev, hdev->hw.io_base); 2963 err_clr_master: 2964 pci_clear_master(pdev); 2965 pci_release_regions(pdev); 2966 err_disable_device: 2967 pci_disable_device(pdev); 2968 2969 return ret; 2970 } 2971 2972 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2973 { 2974 struct pci_dev *pdev = hdev->pdev; 2975 2976 if (hdev->hw.mem_base) 2977 devm_iounmap(&pdev->dev, hdev->hw.mem_base); 2978 2979 pci_iounmap(pdev, hdev->hw.io_base); 2980 pci_clear_master(pdev); 2981 pci_release_regions(pdev); 2982 pci_disable_device(pdev); 2983 } 2984 2985 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2986 { 2987 struct hclgevf_query_res_cmd *req; 2988 struct hclgevf_desc desc; 2989 int ret; 2990 2991 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2992 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2993 if (ret) { 2994 dev_err(&hdev->pdev->dev, 2995 "query vf resource failed, ret = %d.\n", ret); 2996 return ret; 2997 } 2998 2999 req = (struct hclgevf_query_res_cmd *)desc.data; 3000 3001 if (hnae3_dev_roce_supported(hdev)) { 3002 hdev->roce_base_msix_offset = 3003 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 3004 HCLGEVF_MSIX_OFT_ROCEE_M, 3005 HCLGEVF_MSIX_OFT_ROCEE_S); 3006 hdev->num_roce_msix = 3007 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 3008 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 3009 3010 /* nic's msix numbers is always equals to the roce's. */ 3011 hdev->num_nic_msix = hdev->num_roce_msix; 3012 3013 /* VF should have NIC vectors and Roce vectors, NIC vectors 3014 * are queued before Roce vectors. The offset is fixed to 64. 3015 */ 3016 hdev->num_msi = hdev->num_roce_msix + 3017 hdev->roce_base_msix_offset; 3018 } else { 3019 hdev->num_msi = 3020 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 3021 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 3022 3023 hdev->num_nic_msix = hdev->num_msi; 3024 } 3025 3026 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 3027 dev_err(&hdev->pdev->dev, 3028 "Just %u msi resources, not enough for vf(min:2).\n", 3029 hdev->num_nic_msix); 3030 return -EINVAL; 3031 } 3032 3033 return 0; 3034 } 3035 3036 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) 3037 { 3038 #define HCLGEVF_MAX_NON_TSO_BD_NUM 8U 3039 3040 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3041 3042 ae_dev->dev_specs.max_non_tso_bd_num = 3043 HCLGEVF_MAX_NON_TSO_BD_NUM; 3044 ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 3045 ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE; 3046 ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 3047 } 3048 3049 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, 3050 struct hclgevf_desc *desc) 3051 { 3052 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3053 struct hclgevf_dev_specs_0_cmd *req0; 3054 struct hclgevf_dev_specs_1_cmd *req1; 3055 3056 req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data; 3057 req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data; 3058 3059 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; 3060 ae_dev->dev_specs.rss_ind_tbl_size = 3061 le16_to_cpu(req0->rss_ind_tbl_size); 3062 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); 3063 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); 3064 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); 3065 } 3066 3067 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) 3068 { 3069 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; 3070 3071 if (!dev_specs->max_non_tso_bd_num) 3072 dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM; 3073 if (!dev_specs->rss_ind_tbl_size) 3074 dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 3075 if (!dev_specs->rss_key_size) 3076 dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE; 3077 if (!dev_specs->max_int_gl) 3078 dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 3079 } 3080 3081 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) 3082 { 3083 struct hclgevf_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; 3084 int ret; 3085 int i; 3086 3087 /* set default specifications as devices lower than version V3 do not 3088 * support querying specifications from firmware. 3089 */ 3090 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 3091 hclgevf_set_default_dev_specs(hdev); 3092 return 0; 3093 } 3094 3095 for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 3096 hclgevf_cmd_setup_basic_desc(&desc[i], 3097 HCLGEVF_OPC_QUERY_DEV_SPECS, true); 3098 desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT); 3099 } 3100 hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS, 3101 true); 3102 3103 ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM); 3104 if (ret) 3105 return ret; 3106 3107 hclgevf_parse_dev_specs(hdev, desc); 3108 hclgevf_check_dev_specs(hdev); 3109 3110 return 0; 3111 } 3112 3113 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 3114 { 3115 struct pci_dev *pdev = hdev->pdev; 3116 int ret = 0; 3117 3118 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 3119 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3120 hclgevf_misc_irq_uninit(hdev); 3121 hclgevf_uninit_msi(hdev); 3122 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3123 } 3124 3125 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3126 pci_set_master(pdev); 3127 ret = hclgevf_init_msi(hdev); 3128 if (ret) { 3129 dev_err(&pdev->dev, 3130 "failed(%d) to init MSI/MSI-X\n", ret); 3131 return ret; 3132 } 3133 3134 ret = hclgevf_misc_irq_init(hdev); 3135 if (ret) { 3136 hclgevf_uninit_msi(hdev); 3137 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 3138 ret); 3139 return ret; 3140 } 3141 3142 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3143 } 3144 3145 return ret; 3146 } 3147 3148 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) 3149 { 3150 struct hclge_vf_to_pf_msg send_msg; 3151 3152 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, 3153 HCLGE_MBX_VPORT_LIST_CLEAR); 3154 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3155 } 3156 3157 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 3158 { 3159 struct pci_dev *pdev = hdev->pdev; 3160 int ret; 3161 3162 ret = hclgevf_pci_reset(hdev); 3163 if (ret) { 3164 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 3165 return ret; 3166 } 3167 3168 ret = hclgevf_cmd_init(hdev); 3169 if (ret) { 3170 dev_err(&pdev->dev, "cmd failed %d\n", ret); 3171 return ret; 3172 } 3173 3174 ret = hclgevf_rss_init_hw(hdev); 3175 if (ret) { 3176 dev_err(&hdev->pdev->dev, 3177 "failed(%d) to initialize RSS\n", ret); 3178 return ret; 3179 } 3180 3181 ret = hclgevf_config_gro(hdev, true); 3182 if (ret) 3183 return ret; 3184 3185 ret = hclgevf_init_vlan_config(hdev); 3186 if (ret) { 3187 dev_err(&hdev->pdev->dev, 3188 "failed(%d) to initialize VLAN config\n", ret); 3189 return ret; 3190 } 3191 3192 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 3193 3194 dev_info(&hdev->pdev->dev, "Reset done\n"); 3195 3196 return 0; 3197 } 3198 3199 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 3200 { 3201 struct pci_dev *pdev = hdev->pdev; 3202 int ret; 3203 3204 ret = hclgevf_pci_init(hdev); 3205 if (ret) 3206 return ret; 3207 3208 ret = hclgevf_cmd_queue_init(hdev); 3209 if (ret) 3210 goto err_cmd_queue_init; 3211 3212 ret = hclgevf_cmd_init(hdev); 3213 if (ret) 3214 goto err_cmd_init; 3215 3216 /* Get vf resource */ 3217 ret = hclgevf_query_vf_resource(hdev); 3218 if (ret) 3219 goto err_cmd_init; 3220 3221 ret = hclgevf_query_dev_specs(hdev); 3222 if (ret) { 3223 dev_err(&pdev->dev, 3224 "failed to query dev specifications, ret = %d\n", ret); 3225 goto err_cmd_init; 3226 } 3227 3228 ret = hclgevf_init_msi(hdev); 3229 if (ret) { 3230 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 3231 goto err_cmd_init; 3232 } 3233 3234 hclgevf_state_init(hdev); 3235 hdev->reset_level = HNAE3_VF_FUNC_RESET; 3236 hdev->reset_type = HNAE3_NONE_RESET; 3237 3238 ret = hclgevf_misc_irq_init(hdev); 3239 if (ret) 3240 goto err_misc_irq_init; 3241 3242 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3243 3244 ret = hclgevf_configure(hdev); 3245 if (ret) { 3246 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 3247 goto err_config; 3248 } 3249 3250 ret = hclgevf_alloc_tqps(hdev); 3251 if (ret) { 3252 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 3253 goto err_config; 3254 } 3255 3256 ret = hclgevf_set_handle_info(hdev); 3257 if (ret) 3258 goto err_config; 3259 3260 ret = hclgevf_config_gro(hdev, true); 3261 if (ret) 3262 goto err_config; 3263 3264 /* Initialize RSS for this VF */ 3265 hclgevf_rss_init_cfg(hdev); 3266 ret = hclgevf_rss_init_hw(hdev); 3267 if (ret) { 3268 dev_err(&hdev->pdev->dev, 3269 "failed(%d) to initialize RSS\n", ret); 3270 goto err_config; 3271 } 3272 3273 /* ensure vf tbl list as empty before init*/ 3274 ret = hclgevf_clear_vport_list(hdev); 3275 if (ret) { 3276 dev_err(&pdev->dev, 3277 "failed to clear tbl list configuration, ret = %d.\n", 3278 ret); 3279 goto err_config; 3280 } 3281 3282 ret = hclgevf_init_vlan_config(hdev); 3283 if (ret) { 3284 dev_err(&hdev->pdev->dev, 3285 "failed(%d) to initialize VLAN config\n", ret); 3286 goto err_config; 3287 } 3288 3289 hdev->last_reset_time = jiffies; 3290 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 3291 HCLGEVF_DRIVER_NAME); 3292 3293 hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 3294 3295 return 0; 3296 3297 err_config: 3298 hclgevf_misc_irq_uninit(hdev); 3299 err_misc_irq_init: 3300 hclgevf_state_uninit(hdev); 3301 hclgevf_uninit_msi(hdev); 3302 err_cmd_init: 3303 hclgevf_cmd_uninit(hdev); 3304 err_cmd_queue_init: 3305 hclgevf_pci_uninit(hdev); 3306 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3307 return ret; 3308 } 3309 3310 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 3311 { 3312 struct hclge_vf_to_pf_msg send_msg; 3313 3314 hclgevf_state_uninit(hdev); 3315 3316 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 3317 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3318 3319 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3320 hclgevf_misc_irq_uninit(hdev); 3321 hclgevf_uninit_msi(hdev); 3322 } 3323 3324 hclgevf_cmd_uninit(hdev); 3325 hclgevf_pci_uninit(hdev); 3326 hclgevf_uninit_mac_list(hdev); 3327 } 3328 3329 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 3330 { 3331 struct pci_dev *pdev = ae_dev->pdev; 3332 int ret; 3333 3334 ret = hclgevf_alloc_hdev(ae_dev); 3335 if (ret) { 3336 dev_err(&pdev->dev, "hclge device allocation failed\n"); 3337 return ret; 3338 } 3339 3340 ret = hclgevf_init_hdev(ae_dev->priv); 3341 if (ret) { 3342 dev_err(&pdev->dev, "hclge device initialization failed\n"); 3343 return ret; 3344 } 3345 3346 return 0; 3347 } 3348 3349 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 3350 { 3351 struct hclgevf_dev *hdev = ae_dev->priv; 3352 3353 hclgevf_uninit_hdev(hdev); 3354 ae_dev->priv = NULL; 3355 } 3356 3357 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3358 { 3359 struct hnae3_handle *nic = &hdev->nic; 3360 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3361 3362 return min_t(u32, hdev->rss_size_max, 3363 hdev->num_tqps / kinfo->num_tc); 3364 } 3365 3366 /** 3367 * hclgevf_get_channels - Get the current channels enabled and max supported. 3368 * @handle: hardware information for network interface 3369 * @ch: ethtool channels structure 3370 * 3371 * We don't support separate tx and rx queues as channels. The other count 3372 * represents how many queues are being used for control. max_combined counts 3373 * how many queue pairs we can support. They may not be mapped 1 to 1 with 3374 * q_vectors since we support a lot more queue pairs than q_vectors. 3375 **/ 3376 static void hclgevf_get_channels(struct hnae3_handle *handle, 3377 struct ethtool_channels *ch) 3378 { 3379 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3380 3381 ch->max_combined = hclgevf_get_max_channels(hdev); 3382 ch->other_count = 0; 3383 ch->max_other = 0; 3384 ch->combined_count = handle->kinfo.rss_size; 3385 } 3386 3387 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 3388 u16 *alloc_tqps, u16 *max_rss_size) 3389 { 3390 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3391 3392 *alloc_tqps = hdev->num_tqps; 3393 *max_rss_size = hdev->rss_size_max; 3394 } 3395 3396 static void hclgevf_update_rss_size(struct hnae3_handle *handle, 3397 u32 new_tqps_num) 3398 { 3399 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3400 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3401 u16 max_rss_size; 3402 3403 kinfo->req_rss_size = new_tqps_num; 3404 3405 max_rss_size = min_t(u16, hdev->rss_size_max, 3406 hdev->num_tqps / kinfo->num_tc); 3407 3408 /* Use the user's configuration when it is not larger than 3409 * max_rss_size, otherwise, use the maximum specification value. 3410 */ 3411 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 3412 kinfo->req_rss_size <= max_rss_size) 3413 kinfo->rss_size = kinfo->req_rss_size; 3414 else if (kinfo->rss_size > max_rss_size || 3415 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 3416 kinfo->rss_size = max_rss_size; 3417 3418 kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; 3419 } 3420 3421 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 3422 bool rxfh_configured) 3423 { 3424 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3425 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3426 u16 cur_rss_size = kinfo->rss_size; 3427 u16 cur_tqps = kinfo->num_tqps; 3428 u32 *rss_indir; 3429 unsigned int i; 3430 int ret; 3431 3432 hclgevf_update_rss_size(handle, new_tqps_num); 3433 3434 ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); 3435 if (ret) 3436 return ret; 3437 3438 /* RSS indirection table has been configuared by user */ 3439 if (rxfh_configured) 3440 goto out; 3441 3442 /* Reinitializes the rss indirect table according to the new RSS size */ 3443 rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 3444 if (!rss_indir) 3445 return -ENOMEM; 3446 3447 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 3448 rss_indir[i] = i % kinfo->rss_size; 3449 3450 hdev->rss_cfg.rss_size = kinfo->rss_size; 3451 3452 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 3453 if (ret) 3454 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 3455 ret); 3456 3457 kfree(rss_indir); 3458 3459 out: 3460 if (!ret) 3461 dev_info(&hdev->pdev->dev, 3462 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 3463 cur_rss_size, kinfo->rss_size, 3464 cur_tqps, kinfo->rss_size * kinfo->num_tc); 3465 3466 return ret; 3467 } 3468 3469 static int hclgevf_get_status(struct hnae3_handle *handle) 3470 { 3471 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3472 3473 return hdev->hw.mac.link; 3474 } 3475 3476 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 3477 u8 *auto_neg, u32 *speed, 3478 u8 *duplex) 3479 { 3480 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3481 3482 if (speed) 3483 *speed = hdev->hw.mac.speed; 3484 if (duplex) 3485 *duplex = hdev->hw.mac.duplex; 3486 if (auto_neg) 3487 *auto_neg = AUTONEG_DISABLE; 3488 } 3489 3490 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 3491 u8 duplex) 3492 { 3493 hdev->hw.mac.speed = speed; 3494 hdev->hw.mac.duplex = duplex; 3495 } 3496 3497 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 3498 { 3499 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3500 3501 return hclgevf_config_gro(hdev, enable); 3502 } 3503 3504 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 3505 u8 *module_type) 3506 { 3507 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3508 3509 if (media_type) 3510 *media_type = hdev->hw.mac.media_type; 3511 3512 if (module_type) 3513 *module_type = hdev->hw.mac.module_type; 3514 } 3515 3516 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 3517 { 3518 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3519 3520 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 3521 } 3522 3523 static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle) 3524 { 3525 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3526 3527 return test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 3528 } 3529 3530 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 3531 { 3532 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3533 3534 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 3535 } 3536 3537 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 3538 { 3539 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3540 3541 return hdev->rst_stats.hw_rst_done_cnt; 3542 } 3543 3544 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 3545 unsigned long *supported, 3546 unsigned long *advertising) 3547 { 3548 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3549 3550 *supported = hdev->hw.mac.supported; 3551 *advertising = hdev->hw.mac.advertising; 3552 } 3553 3554 #define MAX_SEPARATE_NUM 4 3555 #define SEPARATOR_VALUE 0xFFFFFFFF 3556 #define REG_NUM_PER_LINE 4 3557 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 3558 3559 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 3560 { 3561 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 3562 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3563 3564 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 3565 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 3566 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 3567 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 3568 3569 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 3570 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 3571 } 3572 3573 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 3574 void *data) 3575 { 3576 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3577 int i, j, reg_um, separator_num; 3578 u32 *reg = data; 3579 3580 *version = hdev->fw_version; 3581 3582 /* fetching per-VF registers values from VF PCIe register space */ 3583 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 3584 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3585 for (i = 0; i < reg_um; i++) 3586 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 3587 for (i = 0; i < separator_num; i++) 3588 *reg++ = SEPARATOR_VALUE; 3589 3590 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 3591 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3592 for (i = 0; i < reg_um; i++) 3593 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 3594 for (i = 0; i < separator_num; i++) 3595 *reg++ = SEPARATOR_VALUE; 3596 3597 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 3598 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3599 for (j = 0; j < hdev->num_tqps; j++) { 3600 for (i = 0; i < reg_um; i++) 3601 *reg++ = hclgevf_read_dev(&hdev->hw, 3602 ring_reg_addr_list[i] + 3603 0x200 * j); 3604 for (i = 0; i < separator_num; i++) 3605 *reg++ = SEPARATOR_VALUE; 3606 } 3607 3608 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 3609 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3610 for (j = 0; j < hdev->num_msi_used - 1; j++) { 3611 for (i = 0; i < reg_um; i++) 3612 *reg++ = hclgevf_read_dev(&hdev->hw, 3613 tqp_intr_reg_addr_list[i] + 3614 4 * j); 3615 for (i = 0; i < separator_num; i++) 3616 *reg++ = SEPARATOR_VALUE; 3617 } 3618 } 3619 3620 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 3621 u8 *port_base_vlan_info, u8 data_size) 3622 { 3623 struct hnae3_handle *nic = &hdev->nic; 3624 struct hclge_vf_to_pf_msg send_msg; 3625 int ret; 3626 3627 rtnl_lock(); 3628 3629 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 3630 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { 3631 dev_warn(&hdev->pdev->dev, 3632 "is resetting when updating port based vlan info\n"); 3633 rtnl_unlock(); 3634 return; 3635 } 3636 3637 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3638 if (ret) { 3639 rtnl_unlock(); 3640 return; 3641 } 3642 3643 /* send msg to PF and wait update port based vlan info */ 3644 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3645 HCLGE_MBX_PORT_BASE_VLAN_CFG); 3646 memcpy(send_msg.data, port_base_vlan_info, data_size); 3647 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3648 if (!ret) { 3649 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3650 nic->port_base_vlan_state = state; 3651 else 3652 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3653 } 3654 3655 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 3656 rtnl_unlock(); 3657 } 3658 3659 static const struct hnae3_ae_ops hclgevf_ops = { 3660 .init_ae_dev = hclgevf_init_ae_dev, 3661 .uninit_ae_dev = hclgevf_uninit_ae_dev, 3662 .flr_prepare = hclgevf_flr_prepare, 3663 .flr_done = hclgevf_flr_done, 3664 .init_client_instance = hclgevf_init_client_instance, 3665 .uninit_client_instance = hclgevf_uninit_client_instance, 3666 .start = hclgevf_ae_start, 3667 .stop = hclgevf_ae_stop, 3668 .client_start = hclgevf_client_start, 3669 .client_stop = hclgevf_client_stop, 3670 .map_ring_to_vector = hclgevf_map_ring_to_vector, 3671 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3672 .get_vector = hclgevf_get_vector, 3673 .put_vector = hclgevf_put_vector, 3674 .reset_queue = hclgevf_reset_tqp, 3675 .get_mac_addr = hclgevf_get_mac_addr, 3676 .set_mac_addr = hclgevf_set_mac_addr, 3677 .add_uc_addr = hclgevf_add_uc_addr, 3678 .rm_uc_addr = hclgevf_rm_uc_addr, 3679 .add_mc_addr = hclgevf_add_mc_addr, 3680 .rm_mc_addr = hclgevf_rm_mc_addr, 3681 .get_stats = hclgevf_get_stats, 3682 .update_stats = hclgevf_update_stats, 3683 .get_strings = hclgevf_get_strings, 3684 .get_sset_count = hclgevf_get_sset_count, 3685 .get_rss_key_size = hclgevf_get_rss_key_size, 3686 .get_rss_indir_size = hclgevf_get_rss_indir_size, 3687 .get_rss = hclgevf_get_rss, 3688 .set_rss = hclgevf_set_rss, 3689 .get_rss_tuple = hclgevf_get_rss_tuple, 3690 .set_rss_tuple = hclgevf_set_rss_tuple, 3691 .get_tc_size = hclgevf_get_tc_size, 3692 .get_fw_version = hclgevf_get_fw_version, 3693 .set_vlan_filter = hclgevf_set_vlan_filter, 3694 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 3695 .reset_event = hclgevf_reset_event, 3696 .set_default_reset_request = hclgevf_set_def_reset_request, 3697 .set_channels = hclgevf_set_channels, 3698 .get_channels = hclgevf_get_channels, 3699 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 3700 .get_regs_len = hclgevf_get_regs_len, 3701 .get_regs = hclgevf_get_regs, 3702 .get_status = hclgevf_get_status, 3703 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3704 .get_media_type = hclgevf_get_media_type, 3705 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 3706 .ae_dev_resetting = hclgevf_ae_dev_resetting, 3707 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 3708 .set_gro_en = hclgevf_gro_en, 3709 .set_mtu = hclgevf_set_mtu, 3710 .get_global_queue_id = hclgevf_get_qid_global, 3711 .set_timer_task = hclgevf_set_timer_task, 3712 .get_link_mode = hclgevf_get_link_mode, 3713 .set_promisc_mode = hclgevf_set_promisc_mode, 3714 .request_update_promisc_mode = hclgevf_request_update_promisc_mode, 3715 .get_cmdq_stat = hclgevf_get_cmdq_stat, 3716 }; 3717 3718 static struct hnae3_ae_algo ae_algovf = { 3719 .ops = &hclgevf_ops, 3720 .pdev_id_table = ae_algovf_pci_tbl, 3721 }; 3722 3723 static int hclgevf_init(void) 3724 { 3725 pr_info("%s is initializing\n", HCLGEVF_NAME); 3726 3727 hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME); 3728 if (!hclgevf_wq) { 3729 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 3730 return -ENOMEM; 3731 } 3732 3733 hnae3_register_ae_algo(&ae_algovf); 3734 3735 return 0; 3736 } 3737 3738 static void hclgevf_exit(void) 3739 { 3740 hnae3_unregister_ae_algo(&ae_algovf); 3741 destroy_workqueue(hclgevf_wq); 3742 } 3743 module_init(hclgevf_init); 3744 module_exit(hclgevf_exit); 3745 3746 MODULE_LICENSE("GPL"); 3747 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3748 MODULE_DESCRIPTION("HCLGEVF Driver"); 3749 MODULE_VERSION(HCLGEVF_MOD_VERSION); 3750