1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 #define HCLGEVF_RESET_MAX_FAIL_CNT 5 15 16 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 17 static struct hnae3_ae_algo ae_algovf; 18 19 static struct workqueue_struct *hclgevf_wq; 20 21 static const struct pci_device_id ae_algovf_pci_tbl[] = { 22 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, 23 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 24 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 25 /* required last entry */ 26 {0, } 27 }; 28 29 static const u8 hclgevf_hash_key[] = { 30 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 31 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 32 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 33 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 34 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 35 }; 36 37 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 38 39 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 40 HCLGEVF_CMDQ_TX_ADDR_H_REG, 41 HCLGEVF_CMDQ_TX_DEPTH_REG, 42 HCLGEVF_CMDQ_TX_TAIL_REG, 43 HCLGEVF_CMDQ_TX_HEAD_REG, 44 HCLGEVF_CMDQ_RX_ADDR_L_REG, 45 HCLGEVF_CMDQ_RX_ADDR_H_REG, 46 HCLGEVF_CMDQ_RX_DEPTH_REG, 47 HCLGEVF_CMDQ_RX_TAIL_REG, 48 HCLGEVF_CMDQ_RX_HEAD_REG, 49 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 50 HCLGEVF_VECTOR0_CMDQ_STATE_REG, 51 HCLGEVF_CMDQ_INTR_EN_REG, 52 HCLGEVF_CMDQ_INTR_GEN_REG}; 53 54 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 55 HCLGEVF_RST_ING, 56 HCLGEVF_GRO_EN_REG}; 57 58 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 59 HCLGEVF_RING_RX_ADDR_H_REG, 60 HCLGEVF_RING_RX_BD_NUM_REG, 61 HCLGEVF_RING_RX_BD_LENGTH_REG, 62 HCLGEVF_RING_RX_MERGE_EN_REG, 63 HCLGEVF_RING_RX_TAIL_REG, 64 HCLGEVF_RING_RX_HEAD_REG, 65 HCLGEVF_RING_RX_FBD_NUM_REG, 66 HCLGEVF_RING_RX_OFFSET_REG, 67 HCLGEVF_RING_RX_FBD_OFFSET_REG, 68 HCLGEVF_RING_RX_STASH_REG, 69 HCLGEVF_RING_RX_BD_ERR_REG, 70 HCLGEVF_RING_TX_ADDR_L_REG, 71 HCLGEVF_RING_TX_ADDR_H_REG, 72 HCLGEVF_RING_TX_BD_NUM_REG, 73 HCLGEVF_RING_TX_PRIORITY_REG, 74 HCLGEVF_RING_TX_TC_REG, 75 HCLGEVF_RING_TX_MERGE_EN_REG, 76 HCLGEVF_RING_TX_TAIL_REG, 77 HCLGEVF_RING_TX_HEAD_REG, 78 HCLGEVF_RING_TX_FBD_NUM_REG, 79 HCLGEVF_RING_TX_OFFSET_REG, 80 HCLGEVF_RING_TX_EBD_NUM_REG, 81 HCLGEVF_RING_TX_EBD_OFFSET_REG, 82 HCLGEVF_RING_TX_BD_ERR_REG, 83 HCLGEVF_RING_EN_REG}; 84 85 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 86 HCLGEVF_TQP_INTR_GL0_REG, 87 HCLGEVF_TQP_INTR_GL1_REG, 88 HCLGEVF_TQP_INTR_GL2_REG, 89 HCLGEVF_TQP_INTR_RL_REG}; 90 91 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 92 { 93 if (!handle->client) 94 return container_of(handle, struct hclgevf_dev, nic); 95 else if (handle->client->type == HNAE3_CLIENT_ROCE) 96 return container_of(handle, struct hclgevf_dev, roce); 97 else 98 return container_of(handle, struct hclgevf_dev, nic); 99 } 100 101 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 102 { 103 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 104 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 105 struct hclgevf_desc desc; 106 struct hclgevf_tqp *tqp; 107 int status; 108 int i; 109 110 for (i = 0; i < kinfo->num_tqps; i++) { 111 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 112 hclgevf_cmd_setup_basic_desc(&desc, 113 HCLGEVF_OPC_QUERY_RX_STATUS, 114 true); 115 116 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 117 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 118 if (status) { 119 dev_err(&hdev->pdev->dev, 120 "Query tqp stat fail, status = %d,queue = %d\n", 121 status, i); 122 return status; 123 } 124 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 125 le32_to_cpu(desc.data[1]); 126 127 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 128 true); 129 130 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 131 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 132 if (status) { 133 dev_err(&hdev->pdev->dev, 134 "Query tqp stat fail, status = %d,queue = %d\n", 135 status, i); 136 return status; 137 } 138 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 139 le32_to_cpu(desc.data[1]); 140 } 141 142 return 0; 143 } 144 145 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 146 { 147 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 148 struct hclgevf_tqp *tqp; 149 u64 *buff = data; 150 int i; 151 152 for (i = 0; i < kinfo->num_tqps; i++) { 153 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 154 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 155 } 156 for (i = 0; i < kinfo->num_tqps; i++) { 157 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 158 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 159 } 160 161 return buff; 162 } 163 164 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 165 { 166 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 167 168 return kinfo->num_tqps * 2; 169 } 170 171 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 172 { 173 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 174 u8 *buff = data; 175 int i; 176 177 for (i = 0; i < kinfo->num_tqps; i++) { 178 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 179 struct hclgevf_tqp, q); 180 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 181 tqp->index); 182 buff += ETH_GSTRING_LEN; 183 } 184 185 for (i = 0; i < kinfo->num_tqps; i++) { 186 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 187 struct hclgevf_tqp, q); 188 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 189 tqp->index); 190 buff += ETH_GSTRING_LEN; 191 } 192 193 return buff; 194 } 195 196 static void hclgevf_update_stats(struct hnae3_handle *handle, 197 struct net_device_stats *net_stats) 198 { 199 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 200 int status; 201 202 status = hclgevf_tqps_update_stats(handle); 203 if (status) 204 dev_err(&hdev->pdev->dev, 205 "VF update of TQPS stats fail, status = %d.\n", 206 status); 207 } 208 209 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 210 { 211 if (strset == ETH_SS_TEST) 212 return -EOPNOTSUPP; 213 else if (strset == ETH_SS_STATS) 214 return hclgevf_tqps_get_sset_count(handle, strset); 215 216 return 0; 217 } 218 219 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 220 u8 *data) 221 { 222 u8 *p = (char *)data; 223 224 if (strset == ETH_SS_STATS) 225 p = hclgevf_tqps_get_strings(handle, p); 226 } 227 228 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 229 { 230 hclgevf_tqps_get_stats(handle, data); 231 } 232 233 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 234 u8 subcode) 235 { 236 if (msg) { 237 memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 238 msg->code = code; 239 msg->subcode = subcode; 240 } 241 } 242 243 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 244 { 245 struct hclge_vf_to_pf_msg send_msg; 246 u8 resp_msg; 247 int status; 248 249 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0); 250 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 251 sizeof(resp_msg)); 252 if (status) { 253 dev_err(&hdev->pdev->dev, 254 "VF request to get TC info from PF failed %d", 255 status); 256 return status; 257 } 258 259 hdev->hw_tc_map = resp_msg; 260 261 return 0; 262 } 263 264 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 265 { 266 struct hnae3_handle *nic = &hdev->nic; 267 struct hclge_vf_to_pf_msg send_msg; 268 u8 resp_msg; 269 int ret; 270 271 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 272 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 273 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 274 sizeof(u8)); 275 if (ret) { 276 dev_err(&hdev->pdev->dev, 277 "VF request to get port based vlan state failed %d", 278 ret); 279 return ret; 280 } 281 282 nic->port_base_vlan_state = resp_msg; 283 284 return 0; 285 } 286 287 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 288 { 289 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 290 #define HCLGEVF_TQPS_ALLOC_OFFSET 0 291 #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 292 #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 293 294 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 295 struct hclge_vf_to_pf_msg send_msg; 296 int status; 297 298 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 299 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 300 HCLGEVF_TQPS_RSS_INFO_LEN); 301 if (status) { 302 dev_err(&hdev->pdev->dev, 303 "VF request to get tqp info from PF failed %d", 304 status); 305 return status; 306 } 307 308 memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], 309 sizeof(u16)); 310 memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], 311 sizeof(u16)); 312 memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], 313 sizeof(u16)); 314 315 return 0; 316 } 317 318 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 319 { 320 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 321 #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 322 #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 323 324 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 325 struct hclge_vf_to_pf_msg send_msg; 326 int ret; 327 328 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 329 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 330 HCLGEVF_TQPS_DEPTH_INFO_LEN); 331 if (ret) { 332 dev_err(&hdev->pdev->dev, 333 "VF request to get tqp depth info from PF failed %d", 334 ret); 335 return ret; 336 } 337 338 memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], 339 sizeof(u16)); 340 memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], 341 sizeof(u16)); 342 343 return 0; 344 } 345 346 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 347 { 348 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 349 struct hclge_vf_to_pf_msg send_msg; 350 u16 qid_in_pf = 0; 351 u8 resp_data[2]; 352 int ret; 353 354 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 355 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 356 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 357 sizeof(resp_data)); 358 if (!ret) 359 qid_in_pf = *(u16 *)resp_data; 360 361 return qid_in_pf; 362 } 363 364 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 365 { 366 struct hclge_vf_to_pf_msg send_msg; 367 u8 resp_msg[2]; 368 int ret; 369 370 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 371 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 372 sizeof(resp_msg)); 373 if (ret) { 374 dev_err(&hdev->pdev->dev, 375 "VF request to get the pf port media type failed %d", 376 ret); 377 return ret; 378 } 379 380 hdev->hw.mac.media_type = resp_msg[0]; 381 hdev->hw.mac.module_type = resp_msg[1]; 382 383 return 0; 384 } 385 386 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 387 { 388 struct hclgevf_tqp *tqp; 389 int i; 390 391 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 392 sizeof(struct hclgevf_tqp), GFP_KERNEL); 393 if (!hdev->htqp) 394 return -ENOMEM; 395 396 tqp = hdev->htqp; 397 398 for (i = 0; i < hdev->num_tqps; i++) { 399 tqp->dev = &hdev->pdev->dev; 400 tqp->index = i; 401 402 tqp->q.ae_algo = &ae_algovf; 403 tqp->q.buf_size = hdev->rx_buf_len; 404 tqp->q.tx_desc_num = hdev->num_tx_desc; 405 tqp->q.rx_desc_num = hdev->num_rx_desc; 406 407 /* need an extended offset to configure queues >= 408 * HCLGEVF_TQP_MAX_SIZE_DEV_V2. 409 */ 410 if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2) 411 tqp->q.io_base = hdev->hw.io_base + 412 HCLGEVF_TQP_REG_OFFSET + 413 i * HCLGEVF_TQP_REG_SIZE; 414 else 415 tqp->q.io_base = hdev->hw.io_base + 416 HCLGEVF_TQP_REG_OFFSET + 417 HCLGEVF_TQP_EXT_REG_OFFSET + 418 (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) * 419 HCLGEVF_TQP_REG_SIZE; 420 421 tqp++; 422 } 423 424 return 0; 425 } 426 427 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 428 { 429 struct hnae3_handle *nic = &hdev->nic; 430 struct hnae3_knic_private_info *kinfo; 431 u16 new_tqps = hdev->num_tqps; 432 unsigned int i; 433 434 kinfo = &nic->kinfo; 435 kinfo->num_tc = 0; 436 kinfo->num_tx_desc = hdev->num_tx_desc; 437 kinfo->num_rx_desc = hdev->num_rx_desc; 438 kinfo->rx_buf_len = hdev->rx_buf_len; 439 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 440 if (hdev->hw_tc_map & BIT(i)) 441 kinfo->num_tc++; 442 443 kinfo->rss_size 444 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 445 new_tqps = kinfo->rss_size * kinfo->num_tc; 446 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 447 448 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 449 sizeof(struct hnae3_queue *), GFP_KERNEL); 450 if (!kinfo->tqp) 451 return -ENOMEM; 452 453 for (i = 0; i < kinfo->num_tqps; i++) { 454 hdev->htqp[i].q.handle = &hdev->nic; 455 hdev->htqp[i].q.tqp_index = i; 456 kinfo->tqp[i] = &hdev->htqp[i].q; 457 } 458 459 /* after init the max rss_size and tqps, adjust the default tqp numbers 460 * and rss size with the actual vector numbers 461 */ 462 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 463 kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc, 464 kinfo->rss_size); 465 466 return 0; 467 } 468 469 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 470 { 471 struct hclge_vf_to_pf_msg send_msg; 472 int status; 473 474 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 475 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 476 if (status) 477 dev_err(&hdev->pdev->dev, 478 "VF failed to fetch link status(%d) from PF", status); 479 } 480 481 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 482 { 483 struct hnae3_handle *rhandle = &hdev->roce; 484 struct hnae3_handle *handle = &hdev->nic; 485 struct hnae3_client *rclient; 486 struct hnae3_client *client; 487 488 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 489 return; 490 491 client = handle->client; 492 rclient = hdev->roce_client; 493 494 link_state = 495 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 496 497 if (link_state != hdev->hw.mac.link) { 498 client->ops->link_status_change(handle, !!link_state); 499 if (rclient && rclient->ops->link_status_change) 500 rclient->ops->link_status_change(rhandle, !!link_state); 501 hdev->hw.mac.link = link_state; 502 } 503 504 clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 505 } 506 507 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 508 { 509 #define HCLGEVF_ADVERTISING 0 510 #define HCLGEVF_SUPPORTED 1 511 512 struct hclge_vf_to_pf_msg send_msg; 513 514 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 515 send_msg.data[0] = HCLGEVF_ADVERTISING; 516 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 517 send_msg.data[0] = HCLGEVF_SUPPORTED; 518 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 519 } 520 521 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 522 { 523 struct hnae3_handle *nic = &hdev->nic; 524 int ret; 525 526 nic->ae_algo = &ae_algovf; 527 nic->pdev = hdev->pdev; 528 nic->numa_node_mask = hdev->numa_node_mask; 529 nic->flags |= HNAE3_SUPPORT_VF; 530 531 ret = hclgevf_knic_setup(hdev); 532 if (ret) 533 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 534 ret); 535 return ret; 536 } 537 538 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 539 { 540 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 541 dev_warn(&hdev->pdev->dev, 542 "vector(vector_id %d) has been freed.\n", vector_id); 543 return; 544 } 545 546 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 547 hdev->num_msi_left += 1; 548 hdev->num_msi_used -= 1; 549 } 550 551 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 552 struct hnae3_vector_info *vector_info) 553 { 554 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 555 struct hnae3_vector_info *vector = vector_info; 556 int alloc = 0; 557 int i, j; 558 559 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 560 vector_num = min(hdev->num_msi_left, vector_num); 561 562 for (j = 0; j < vector_num; j++) { 563 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 564 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 565 vector->vector = pci_irq_vector(hdev->pdev, i); 566 vector->io_addr = hdev->hw.io_base + 567 HCLGEVF_VECTOR_REG_BASE + 568 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 569 hdev->vector_status[i] = 0; 570 hdev->vector_irq[i] = vector->vector; 571 572 vector++; 573 alloc++; 574 575 break; 576 } 577 } 578 } 579 hdev->num_msi_left -= alloc; 580 hdev->num_msi_used += alloc; 581 582 return alloc; 583 } 584 585 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 586 { 587 int i; 588 589 for (i = 0; i < hdev->num_msi; i++) 590 if (vector == hdev->vector_irq[i]) 591 return i; 592 593 return -EINVAL; 594 } 595 596 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 597 const u8 hfunc, const u8 *key) 598 { 599 struct hclgevf_rss_config_cmd *req; 600 unsigned int key_offset = 0; 601 struct hclgevf_desc desc; 602 int key_counts; 603 int key_size; 604 int ret; 605 606 key_counts = HCLGEVF_RSS_KEY_SIZE; 607 req = (struct hclgevf_rss_config_cmd *)desc.data; 608 609 while (key_counts) { 610 hclgevf_cmd_setup_basic_desc(&desc, 611 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 612 false); 613 614 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 615 req->hash_config |= 616 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 617 618 key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 619 memcpy(req->hash_key, 620 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 621 622 key_counts -= key_size; 623 key_offset++; 624 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 625 if (ret) { 626 dev_err(&hdev->pdev->dev, 627 "Configure RSS config fail, status = %d\n", 628 ret); 629 return ret; 630 } 631 } 632 633 return 0; 634 } 635 636 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 637 { 638 return HCLGEVF_RSS_KEY_SIZE; 639 } 640 641 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 642 { 643 return HCLGEVF_RSS_IND_TBL_SIZE; 644 } 645 646 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 647 { 648 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 649 struct hclgevf_rss_indirection_table_cmd *req; 650 struct hclgevf_desc desc; 651 int status; 652 int i, j; 653 654 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 655 656 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 657 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 658 false); 659 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 660 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 661 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 662 req->rss_result[j] = 663 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 664 665 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 666 if (status) { 667 dev_err(&hdev->pdev->dev, 668 "VF failed(=%d) to set RSS indirection table\n", 669 status); 670 return status; 671 } 672 } 673 674 return 0; 675 } 676 677 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 678 { 679 struct hclgevf_rss_tc_mode_cmd *req; 680 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 681 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 682 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 683 struct hclgevf_desc desc; 684 u16 roundup_size; 685 unsigned int i; 686 int status; 687 688 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 689 690 roundup_size = roundup_pow_of_two(rss_size); 691 roundup_size = ilog2(roundup_size); 692 693 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 694 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 695 tc_size[i] = roundup_size; 696 tc_offset[i] = rss_size * i; 697 } 698 699 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 700 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 701 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 702 (tc_valid[i] & 0x1)); 703 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 704 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 705 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 706 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 707 } 708 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 709 if (status) 710 dev_err(&hdev->pdev->dev, 711 "VF failed(=%d) to set rss tc mode\n", status); 712 713 return status; 714 } 715 716 /* for revision 0x20, vf shared the same rss config with pf */ 717 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 718 { 719 #define HCLGEVF_RSS_MBX_RESP_LEN 8 720 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 721 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 722 struct hclge_vf_to_pf_msg send_msg; 723 u16 msg_num, hash_key_index; 724 u8 index; 725 int ret; 726 727 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 728 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 729 HCLGEVF_RSS_MBX_RESP_LEN; 730 for (index = 0; index < msg_num; index++) { 731 send_msg.data[0] = index; 732 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 733 HCLGEVF_RSS_MBX_RESP_LEN); 734 if (ret) { 735 dev_err(&hdev->pdev->dev, 736 "VF get rss hash key from PF failed, ret=%d", 737 ret); 738 return ret; 739 } 740 741 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 742 if (index == msg_num - 1) 743 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 744 &resp_msg[0], 745 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 746 else 747 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 748 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 749 } 750 751 return 0; 752 } 753 754 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 755 u8 *hfunc) 756 { 757 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 758 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 759 int i, ret; 760 761 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 762 /* Get hash algorithm */ 763 if (hfunc) { 764 switch (rss_cfg->hash_algo) { 765 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 766 *hfunc = ETH_RSS_HASH_TOP; 767 break; 768 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 769 *hfunc = ETH_RSS_HASH_XOR; 770 break; 771 default: 772 *hfunc = ETH_RSS_HASH_UNKNOWN; 773 break; 774 } 775 } 776 777 /* Get the RSS Key required by the user */ 778 if (key) 779 memcpy(key, rss_cfg->rss_hash_key, 780 HCLGEVF_RSS_KEY_SIZE); 781 } else { 782 if (hfunc) 783 *hfunc = ETH_RSS_HASH_TOP; 784 if (key) { 785 ret = hclgevf_get_rss_hash_key(hdev); 786 if (ret) 787 return ret; 788 memcpy(key, rss_cfg->rss_hash_key, 789 HCLGEVF_RSS_KEY_SIZE); 790 } 791 } 792 793 if (indir) 794 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 795 indir[i] = rss_cfg->rss_indirection_tbl[i]; 796 797 return 0; 798 } 799 800 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 801 const u8 *key, const u8 hfunc) 802 { 803 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 804 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 805 int ret, i; 806 807 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 808 /* Set the RSS Hash Key if specififed by the user */ 809 if (key) { 810 switch (hfunc) { 811 case ETH_RSS_HASH_TOP: 812 rss_cfg->hash_algo = 813 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 814 break; 815 case ETH_RSS_HASH_XOR: 816 rss_cfg->hash_algo = 817 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 818 break; 819 case ETH_RSS_HASH_NO_CHANGE: 820 break; 821 default: 822 return -EINVAL; 823 } 824 825 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 826 key); 827 if (ret) 828 return ret; 829 830 /* Update the shadow RSS key with user specified qids */ 831 memcpy(rss_cfg->rss_hash_key, key, 832 HCLGEVF_RSS_KEY_SIZE); 833 } 834 } 835 836 /* update the shadow RSS table with user specified qids */ 837 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 838 rss_cfg->rss_indirection_tbl[i] = indir[i]; 839 840 /* update the hardware */ 841 return hclgevf_set_rss_indir_table(hdev); 842 } 843 844 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 845 { 846 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 847 848 if (nfc->data & RXH_L4_B_2_3) 849 hash_sets |= HCLGEVF_D_PORT_BIT; 850 else 851 hash_sets &= ~HCLGEVF_D_PORT_BIT; 852 853 if (nfc->data & RXH_IP_SRC) 854 hash_sets |= HCLGEVF_S_IP_BIT; 855 else 856 hash_sets &= ~HCLGEVF_S_IP_BIT; 857 858 if (nfc->data & RXH_IP_DST) 859 hash_sets |= HCLGEVF_D_IP_BIT; 860 else 861 hash_sets &= ~HCLGEVF_D_IP_BIT; 862 863 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 864 hash_sets |= HCLGEVF_V_TAG_BIT; 865 866 return hash_sets; 867 } 868 869 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 870 struct ethtool_rxnfc *nfc) 871 { 872 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 873 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 874 struct hclgevf_rss_input_tuple_cmd *req; 875 struct hclgevf_desc desc; 876 u8 tuple_sets; 877 int ret; 878 879 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 880 return -EOPNOTSUPP; 881 882 if (nfc->data & 883 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 884 return -EINVAL; 885 886 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 887 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 888 889 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 890 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 891 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 892 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 893 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 894 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 895 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 896 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 897 898 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 899 switch (nfc->flow_type) { 900 case TCP_V4_FLOW: 901 req->ipv4_tcp_en = tuple_sets; 902 break; 903 case TCP_V6_FLOW: 904 req->ipv6_tcp_en = tuple_sets; 905 break; 906 case UDP_V4_FLOW: 907 req->ipv4_udp_en = tuple_sets; 908 break; 909 case UDP_V6_FLOW: 910 req->ipv6_udp_en = tuple_sets; 911 break; 912 case SCTP_V4_FLOW: 913 req->ipv4_sctp_en = tuple_sets; 914 break; 915 case SCTP_V6_FLOW: 916 if ((nfc->data & RXH_L4_B_0_1) || 917 (nfc->data & RXH_L4_B_2_3)) 918 return -EINVAL; 919 920 req->ipv6_sctp_en = tuple_sets; 921 break; 922 case IPV4_FLOW: 923 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 924 break; 925 case IPV6_FLOW: 926 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 927 break; 928 default: 929 return -EINVAL; 930 } 931 932 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 933 if (ret) { 934 dev_err(&hdev->pdev->dev, 935 "Set rss tuple fail, status = %d\n", ret); 936 return ret; 937 } 938 939 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 940 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 941 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 942 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 943 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 944 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 945 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 946 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 947 return 0; 948 } 949 950 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 951 struct ethtool_rxnfc *nfc) 952 { 953 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 954 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 955 u8 tuple_sets; 956 957 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 958 return -EOPNOTSUPP; 959 960 nfc->data = 0; 961 962 switch (nfc->flow_type) { 963 case TCP_V4_FLOW: 964 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 965 break; 966 case UDP_V4_FLOW: 967 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 968 break; 969 case TCP_V6_FLOW: 970 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 971 break; 972 case UDP_V6_FLOW: 973 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 974 break; 975 case SCTP_V4_FLOW: 976 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 977 break; 978 case SCTP_V6_FLOW: 979 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 980 break; 981 case IPV4_FLOW: 982 case IPV6_FLOW: 983 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 984 break; 985 default: 986 return -EINVAL; 987 } 988 989 if (!tuple_sets) 990 return 0; 991 992 if (tuple_sets & HCLGEVF_D_PORT_BIT) 993 nfc->data |= RXH_L4_B_2_3; 994 if (tuple_sets & HCLGEVF_S_PORT_BIT) 995 nfc->data |= RXH_L4_B_0_1; 996 if (tuple_sets & HCLGEVF_D_IP_BIT) 997 nfc->data |= RXH_IP_DST; 998 if (tuple_sets & HCLGEVF_S_IP_BIT) 999 nfc->data |= RXH_IP_SRC; 1000 1001 return 0; 1002 } 1003 1004 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 1005 struct hclgevf_rss_cfg *rss_cfg) 1006 { 1007 struct hclgevf_rss_input_tuple_cmd *req; 1008 struct hclgevf_desc desc; 1009 int ret; 1010 1011 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 1012 1013 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 1014 1015 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 1016 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 1017 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 1018 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 1019 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 1020 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 1021 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 1022 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 1023 1024 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1025 if (ret) 1026 dev_err(&hdev->pdev->dev, 1027 "Configure rss input fail, status = %d\n", ret); 1028 return ret; 1029 } 1030 1031 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 1032 { 1033 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1034 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1035 1036 return rss_cfg->rss_size; 1037 } 1038 1039 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 1040 int vector_id, 1041 struct hnae3_ring_chain_node *ring_chain) 1042 { 1043 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1044 struct hclge_vf_to_pf_msg send_msg; 1045 struct hnae3_ring_chain_node *node; 1046 int status; 1047 int i = 0; 1048 1049 memset(&send_msg, 0, sizeof(send_msg)); 1050 send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 1051 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1052 send_msg.vector_id = vector_id; 1053 1054 for (node = ring_chain; node; node = node->next) { 1055 send_msg.param[i].ring_type = 1056 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1057 1058 send_msg.param[i].tqp_index = node->tqp_index; 1059 send_msg.param[i].int_gl_index = 1060 hnae3_get_field(node->int_gl_idx, 1061 HNAE3_RING_GL_IDX_M, 1062 HNAE3_RING_GL_IDX_S); 1063 1064 i++; 1065 if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 1066 send_msg.ring_num = i; 1067 1068 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 1069 NULL, 0); 1070 if (status) { 1071 dev_err(&hdev->pdev->dev, 1072 "Map TQP fail, status is %d.\n", 1073 status); 1074 return status; 1075 } 1076 i = 0; 1077 } 1078 } 1079 1080 return 0; 1081 } 1082 1083 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1084 struct hnae3_ring_chain_node *ring_chain) 1085 { 1086 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1087 int vector_id; 1088 1089 vector_id = hclgevf_get_vector_index(hdev, vector); 1090 if (vector_id < 0) { 1091 dev_err(&handle->pdev->dev, 1092 "Get vector index fail. ret =%d\n", vector_id); 1093 return vector_id; 1094 } 1095 1096 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1097 } 1098 1099 static int hclgevf_unmap_ring_from_vector( 1100 struct hnae3_handle *handle, 1101 int vector, 1102 struct hnae3_ring_chain_node *ring_chain) 1103 { 1104 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1105 int ret, vector_id; 1106 1107 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1108 return 0; 1109 1110 vector_id = hclgevf_get_vector_index(hdev, vector); 1111 if (vector_id < 0) { 1112 dev_err(&handle->pdev->dev, 1113 "Get vector index fail. ret =%d\n", vector_id); 1114 return vector_id; 1115 } 1116 1117 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1118 if (ret) 1119 dev_err(&handle->pdev->dev, 1120 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1121 vector_id, 1122 ret); 1123 1124 return ret; 1125 } 1126 1127 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1128 { 1129 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1130 int vector_id; 1131 1132 vector_id = hclgevf_get_vector_index(hdev, vector); 1133 if (vector_id < 0) { 1134 dev_err(&handle->pdev->dev, 1135 "hclgevf_put_vector get vector index fail. ret =%d\n", 1136 vector_id); 1137 return vector_id; 1138 } 1139 1140 hclgevf_free_vector(hdev, vector_id); 1141 1142 return 0; 1143 } 1144 1145 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1146 bool en_uc_pmc, bool en_mc_pmc, 1147 bool en_bc_pmc) 1148 { 1149 struct hclge_vf_to_pf_msg send_msg; 1150 int ret; 1151 1152 memset(&send_msg, 0, sizeof(send_msg)); 1153 send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 1154 send_msg.en_bc = en_bc_pmc ? 1 : 0; 1155 send_msg.en_uc = en_uc_pmc ? 1 : 0; 1156 send_msg.en_mc = en_mc_pmc ? 1 : 0; 1157 1158 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1159 if (ret) 1160 dev_err(&hdev->pdev->dev, 1161 "Set promisc mode fail, status is %d.\n", ret); 1162 1163 return ret; 1164 } 1165 1166 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 1167 bool en_mc_pmc) 1168 { 1169 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1170 bool en_bc_pmc; 1171 1172 en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2; 1173 1174 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 1175 en_bc_pmc); 1176 } 1177 1178 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) 1179 { 1180 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1181 1182 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1183 } 1184 1185 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) 1186 { 1187 struct hnae3_handle *handle = &hdev->nic; 1188 bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; 1189 bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; 1190 int ret; 1191 1192 if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { 1193 ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); 1194 if (!ret) 1195 clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1196 } 1197 } 1198 1199 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, 1200 int stream_id, bool enable) 1201 { 1202 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1203 struct hclgevf_desc desc; 1204 int status; 1205 1206 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1207 1208 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1209 false); 1210 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1211 req->stream_id = cpu_to_le16(stream_id); 1212 if (enable) 1213 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1214 1215 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1216 if (status) 1217 dev_err(&hdev->pdev->dev, 1218 "TQP enable fail, status =%d.\n", status); 1219 1220 return status; 1221 } 1222 1223 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1224 { 1225 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1226 struct hclgevf_tqp *tqp; 1227 int i; 1228 1229 for (i = 0; i < kinfo->num_tqps; i++) { 1230 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1231 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1232 } 1233 } 1234 1235 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 1236 { 1237 struct hclge_vf_to_pf_msg send_msg; 1238 u8 host_mac[ETH_ALEN]; 1239 int status; 1240 1241 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 1242 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 1243 ETH_ALEN); 1244 if (status) { 1245 dev_err(&hdev->pdev->dev, 1246 "fail to get VF MAC from host %d", status); 1247 return status; 1248 } 1249 1250 ether_addr_copy(p, host_mac); 1251 1252 return 0; 1253 } 1254 1255 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1256 { 1257 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1258 u8 host_mac_addr[ETH_ALEN]; 1259 1260 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 1261 return; 1262 1263 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 1264 if (hdev->has_pf_mac) 1265 ether_addr_copy(p, host_mac_addr); 1266 else 1267 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1268 } 1269 1270 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1271 bool is_first) 1272 { 1273 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1274 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1275 struct hclge_vf_to_pf_msg send_msg; 1276 u8 *new_mac_addr = (u8 *)p; 1277 int status; 1278 1279 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 1280 send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1281 ether_addr_copy(send_msg.data, new_mac_addr); 1282 if (is_first && !hdev->has_pf_mac) 1283 eth_zero_addr(&send_msg.data[ETH_ALEN]); 1284 else 1285 ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 1286 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1287 if (!status) 1288 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1289 1290 return status; 1291 } 1292 1293 static struct hclgevf_mac_addr_node * 1294 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) 1295 { 1296 struct hclgevf_mac_addr_node *mac_node, *tmp; 1297 1298 list_for_each_entry_safe(mac_node, tmp, list, node) 1299 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 1300 return mac_node; 1301 1302 return NULL; 1303 } 1304 1305 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, 1306 enum HCLGEVF_MAC_NODE_STATE state) 1307 { 1308 switch (state) { 1309 /* from set_rx_mode or tmp_add_list */ 1310 case HCLGEVF_MAC_TO_ADD: 1311 if (mac_node->state == HCLGEVF_MAC_TO_DEL) 1312 mac_node->state = HCLGEVF_MAC_ACTIVE; 1313 break; 1314 /* only from set_rx_mode */ 1315 case HCLGEVF_MAC_TO_DEL: 1316 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1317 list_del(&mac_node->node); 1318 kfree(mac_node); 1319 } else { 1320 mac_node->state = HCLGEVF_MAC_TO_DEL; 1321 } 1322 break; 1323 /* only from tmp_add_list, the mac_node->state won't be 1324 * HCLGEVF_MAC_ACTIVE 1325 */ 1326 case HCLGEVF_MAC_ACTIVE: 1327 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1328 mac_node->state = HCLGEVF_MAC_ACTIVE; 1329 break; 1330 } 1331 } 1332 1333 static int hclgevf_update_mac_list(struct hnae3_handle *handle, 1334 enum HCLGEVF_MAC_NODE_STATE state, 1335 enum HCLGEVF_MAC_ADDR_TYPE mac_type, 1336 const unsigned char *addr) 1337 { 1338 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1339 struct hclgevf_mac_addr_node *mac_node; 1340 struct list_head *list; 1341 1342 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1343 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1344 1345 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1346 1347 /* if the mac addr is already in the mac list, no need to add a new 1348 * one into it, just check the mac addr state, convert it to a new 1349 * new state, or just remove it, or do nothing. 1350 */ 1351 mac_node = hclgevf_find_mac_node(list, addr); 1352 if (mac_node) { 1353 hclgevf_update_mac_node(mac_node, state); 1354 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1355 return 0; 1356 } 1357 /* if this address is never added, unnecessary to delete */ 1358 if (state == HCLGEVF_MAC_TO_DEL) { 1359 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1360 return -ENOENT; 1361 } 1362 1363 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 1364 if (!mac_node) { 1365 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1366 return -ENOMEM; 1367 } 1368 1369 mac_node->state = state; 1370 ether_addr_copy(mac_node->mac_addr, addr); 1371 list_add_tail(&mac_node->node, list); 1372 1373 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1374 return 0; 1375 } 1376 1377 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1378 const unsigned char *addr) 1379 { 1380 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1381 HCLGEVF_MAC_ADDR_UC, addr); 1382 } 1383 1384 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1385 const unsigned char *addr) 1386 { 1387 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1388 HCLGEVF_MAC_ADDR_UC, addr); 1389 } 1390 1391 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1392 const unsigned char *addr) 1393 { 1394 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1395 HCLGEVF_MAC_ADDR_MC, addr); 1396 } 1397 1398 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1399 const unsigned char *addr) 1400 { 1401 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1402 HCLGEVF_MAC_ADDR_MC, addr); 1403 } 1404 1405 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, 1406 struct hclgevf_mac_addr_node *mac_node, 1407 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1408 { 1409 struct hclge_vf_to_pf_msg send_msg; 1410 u8 code, subcode; 1411 1412 if (mac_type == HCLGEVF_MAC_ADDR_UC) { 1413 code = HCLGE_MBX_SET_UNICAST; 1414 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1415 subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; 1416 else 1417 subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; 1418 } else { 1419 code = HCLGE_MBX_SET_MULTICAST; 1420 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1421 subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; 1422 else 1423 subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; 1424 } 1425 1426 hclgevf_build_send_msg(&send_msg, code, subcode); 1427 ether_addr_copy(send_msg.data, mac_node->mac_addr); 1428 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1429 } 1430 1431 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, 1432 struct list_head *list, 1433 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1434 { 1435 struct hclgevf_mac_addr_node *mac_node, *tmp; 1436 int ret; 1437 1438 list_for_each_entry_safe(mac_node, tmp, list, node) { 1439 ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); 1440 if (ret) { 1441 dev_err(&hdev->pdev->dev, 1442 "failed to configure mac %pM, state = %d, ret = %d\n", 1443 mac_node->mac_addr, mac_node->state, ret); 1444 return; 1445 } 1446 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1447 mac_node->state = HCLGEVF_MAC_ACTIVE; 1448 } else { 1449 list_del(&mac_node->node); 1450 kfree(mac_node); 1451 } 1452 } 1453 } 1454 1455 static void hclgevf_sync_from_add_list(struct list_head *add_list, 1456 struct list_head *mac_list) 1457 { 1458 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1459 1460 list_for_each_entry_safe(mac_node, tmp, add_list, node) { 1461 /* if the mac address from tmp_add_list is not in the 1462 * uc/mc_mac_list, it means have received a TO_DEL request 1463 * during the time window of sending mac config request to PF 1464 * If mac_node state is ACTIVE, then change its state to TO_DEL, 1465 * then it will be removed at next time. If is TO_ADD, it means 1466 * send TO_ADD request failed, so just remove the mac node. 1467 */ 1468 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1469 if (new_node) { 1470 hclgevf_update_mac_node(new_node, mac_node->state); 1471 list_del(&mac_node->node); 1472 kfree(mac_node); 1473 } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { 1474 mac_node->state = HCLGEVF_MAC_TO_DEL; 1475 list_del(&mac_node->node); 1476 list_add_tail(&mac_node->node, mac_list); 1477 } else { 1478 list_del(&mac_node->node); 1479 kfree(mac_node); 1480 } 1481 } 1482 } 1483 1484 static void hclgevf_sync_from_del_list(struct list_head *del_list, 1485 struct list_head *mac_list) 1486 { 1487 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1488 1489 list_for_each_entry_safe(mac_node, tmp, del_list, node) { 1490 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1491 if (new_node) { 1492 /* If the mac addr is exist in the mac list, it means 1493 * received a new request TO_ADD during the time window 1494 * of sending mac addr configurrequest to PF, so just 1495 * change the mac state to ACTIVE. 1496 */ 1497 new_node->state = HCLGEVF_MAC_ACTIVE; 1498 list_del(&mac_node->node); 1499 kfree(mac_node); 1500 } else { 1501 list_del(&mac_node->node); 1502 list_add_tail(&mac_node->node, mac_list); 1503 } 1504 } 1505 } 1506 1507 static void hclgevf_clear_list(struct list_head *list) 1508 { 1509 struct hclgevf_mac_addr_node *mac_node, *tmp; 1510 1511 list_for_each_entry_safe(mac_node, tmp, list, node) { 1512 list_del(&mac_node->node); 1513 kfree(mac_node); 1514 } 1515 } 1516 1517 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, 1518 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1519 { 1520 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1521 struct list_head tmp_add_list, tmp_del_list; 1522 struct list_head *list; 1523 1524 INIT_LIST_HEAD(&tmp_add_list); 1525 INIT_LIST_HEAD(&tmp_del_list); 1526 1527 /* move the mac addr to the tmp_add_list and tmp_del_list, then 1528 * we can add/delete these mac addr outside the spin lock 1529 */ 1530 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1531 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1532 1533 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1534 1535 list_for_each_entry_safe(mac_node, tmp, list, node) { 1536 switch (mac_node->state) { 1537 case HCLGEVF_MAC_TO_DEL: 1538 list_del(&mac_node->node); 1539 list_add_tail(&mac_node->node, &tmp_del_list); 1540 break; 1541 case HCLGEVF_MAC_TO_ADD: 1542 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 1543 if (!new_node) 1544 goto stop_traverse; 1545 1546 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 1547 new_node->state = mac_node->state; 1548 list_add_tail(&new_node->node, &tmp_add_list); 1549 break; 1550 default: 1551 break; 1552 } 1553 } 1554 1555 stop_traverse: 1556 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1557 1558 /* delete first, in order to get max mac table space for adding */ 1559 hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); 1560 hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); 1561 1562 /* if some mac addresses were added/deleted fail, move back to the 1563 * mac_list, and retry at next time. 1564 */ 1565 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1566 1567 hclgevf_sync_from_del_list(&tmp_del_list, list); 1568 hclgevf_sync_from_add_list(&tmp_add_list, list); 1569 1570 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1571 } 1572 1573 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) 1574 { 1575 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); 1576 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); 1577 } 1578 1579 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) 1580 { 1581 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1582 1583 hclgevf_clear_list(&hdev->mac_table.uc_mac_list); 1584 hclgevf_clear_list(&hdev->mac_table.mc_mac_list); 1585 1586 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1587 } 1588 1589 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1590 __be16 proto, u16 vlan_id, 1591 bool is_kill) 1592 { 1593 #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 1594 #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 1595 #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 1596 1597 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1598 struct hclge_vf_to_pf_msg send_msg; 1599 int ret; 1600 1601 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1602 return -EINVAL; 1603 1604 if (proto != htons(ETH_P_8021Q)) 1605 return -EPROTONOSUPPORT; 1606 1607 /* When device is resetting or reset failed, firmware is unable to 1608 * handle mailbox. Just record the vlan id, and remove it after 1609 * reset finished. 1610 */ 1611 if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 1612 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { 1613 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1614 return -EBUSY; 1615 } 1616 1617 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1618 HCLGE_MBX_VLAN_FILTER); 1619 send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; 1620 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, 1621 sizeof(vlan_id)); 1622 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, 1623 sizeof(proto)); 1624 /* when remove hw vlan filter failed, record the vlan id, 1625 * and try to remove it from hw later, to be consistence 1626 * with stack. 1627 */ 1628 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1629 if (is_kill && ret) 1630 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1631 1632 return ret; 1633 } 1634 1635 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1636 { 1637 #define HCLGEVF_MAX_SYNC_COUNT 60 1638 struct hnae3_handle *handle = &hdev->nic; 1639 int ret, sync_cnt = 0; 1640 u16 vlan_id; 1641 1642 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1643 while (vlan_id != VLAN_N_VID) { 1644 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1645 vlan_id, true); 1646 if (ret) 1647 return; 1648 1649 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1650 sync_cnt++; 1651 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1652 return; 1653 1654 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1655 } 1656 } 1657 1658 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1659 { 1660 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1661 struct hclge_vf_to_pf_msg send_msg; 1662 1663 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1664 HCLGE_MBX_VLAN_RX_OFF_CFG); 1665 send_msg.data[0] = enable ? 1 : 0; 1666 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1667 } 1668 1669 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1670 { 1671 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1672 struct hclge_vf_to_pf_msg send_msg; 1673 int ret; 1674 1675 /* disable vf queue before send queue reset msg to PF */ 1676 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1677 if (ret) 1678 return ret; 1679 1680 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1681 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 1682 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1683 } 1684 1685 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1686 { 1687 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1688 struct hclge_vf_to_pf_msg send_msg; 1689 1690 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1691 memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); 1692 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1693 } 1694 1695 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1696 enum hnae3_reset_notify_type type) 1697 { 1698 struct hnae3_client *client = hdev->nic_client; 1699 struct hnae3_handle *handle = &hdev->nic; 1700 int ret; 1701 1702 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1703 !client) 1704 return 0; 1705 1706 if (!client->ops->reset_notify) 1707 return -EOPNOTSUPP; 1708 1709 ret = client->ops->reset_notify(handle, type); 1710 if (ret) 1711 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1712 type, ret); 1713 1714 return ret; 1715 } 1716 1717 static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev, 1718 enum hnae3_reset_notify_type type) 1719 { 1720 struct hnae3_client *client = hdev->roce_client; 1721 struct hnae3_handle *handle = &hdev->roce; 1722 int ret; 1723 1724 if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client) 1725 return 0; 1726 1727 if (!client->ops->reset_notify) 1728 return -EOPNOTSUPP; 1729 1730 ret = client->ops->reset_notify(handle, type); 1731 if (ret) 1732 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", 1733 type, ret); 1734 return ret; 1735 } 1736 1737 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1738 { 1739 #define HCLGEVF_RESET_WAIT_US 20000 1740 #define HCLGEVF_RESET_WAIT_CNT 2000 1741 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1742 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1743 1744 u32 val; 1745 int ret; 1746 1747 if (hdev->reset_type == HNAE3_VF_RESET) 1748 ret = readl_poll_timeout(hdev->hw.io_base + 1749 HCLGEVF_VF_RST_ING, val, 1750 !(val & HCLGEVF_VF_RST_ING_BIT), 1751 HCLGEVF_RESET_WAIT_US, 1752 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1753 else 1754 ret = readl_poll_timeout(hdev->hw.io_base + 1755 HCLGEVF_RST_ING, val, 1756 !(val & HCLGEVF_RST_ING_BITS), 1757 HCLGEVF_RESET_WAIT_US, 1758 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1759 1760 /* hardware completion status should be available by this time */ 1761 if (ret) { 1762 dev_err(&hdev->pdev->dev, 1763 "couldn't get reset done status from h/w, timeout!\n"); 1764 return ret; 1765 } 1766 1767 /* we will wait a bit more to let reset of the stack to complete. This 1768 * might happen in case reset assertion was made by PF. Yes, this also 1769 * means we might end up waiting bit more even for VF reset. 1770 */ 1771 msleep(5000); 1772 1773 return 0; 1774 } 1775 1776 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 1777 { 1778 u32 reg_val; 1779 1780 reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); 1781 if (enable) 1782 reg_val |= HCLGEVF_NIC_SW_RST_RDY; 1783 else 1784 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 1785 1786 hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 1787 reg_val); 1788 } 1789 1790 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1791 { 1792 int ret; 1793 1794 /* uninitialize the nic client */ 1795 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1796 if (ret) 1797 return ret; 1798 1799 /* re-initialize the hclge device */ 1800 ret = hclgevf_reset_hdev(hdev); 1801 if (ret) { 1802 dev_err(&hdev->pdev->dev, 1803 "hclge device re-init failed, VF is disabled!\n"); 1804 return ret; 1805 } 1806 1807 /* bring up the nic client again */ 1808 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1809 if (ret) 1810 return ret; 1811 1812 /* clear handshake status with IMP */ 1813 hclgevf_reset_handshake(hdev, false); 1814 1815 /* bring up the nic to enable TX/RX again */ 1816 return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1817 } 1818 1819 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1820 { 1821 #define HCLGEVF_RESET_SYNC_TIME 100 1822 1823 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1824 struct hclge_vf_to_pf_msg send_msg; 1825 int ret; 1826 1827 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1828 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1829 if (ret) { 1830 dev_err(&hdev->pdev->dev, 1831 "failed to assert VF reset, ret = %d\n", ret); 1832 return ret; 1833 } 1834 hdev->rst_stats.vf_func_rst_cnt++; 1835 } 1836 1837 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1838 /* inform hardware that preparatory work is done */ 1839 msleep(HCLGEVF_RESET_SYNC_TIME); 1840 hclgevf_reset_handshake(hdev, true); 1841 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n", 1842 hdev->reset_type); 1843 1844 return 0; 1845 } 1846 1847 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 1848 { 1849 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 1850 hdev->rst_stats.vf_func_rst_cnt); 1851 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1852 hdev->rst_stats.flr_rst_cnt); 1853 dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 1854 hdev->rst_stats.vf_rst_cnt); 1855 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1856 hdev->rst_stats.rst_done_cnt); 1857 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1858 hdev->rst_stats.hw_rst_done_cnt); 1859 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1860 hdev->rst_stats.rst_cnt); 1861 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1862 hdev->rst_stats.rst_fail_cnt); 1863 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1864 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 1865 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 1866 hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG)); 1867 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1868 hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); 1869 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1870 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 1871 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1872 } 1873 1874 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1875 { 1876 /* recover handshake status with IMP when reset fail */ 1877 hclgevf_reset_handshake(hdev, true); 1878 hdev->rst_stats.rst_fail_cnt++; 1879 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1880 hdev->rst_stats.rst_fail_cnt); 1881 1882 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1883 set_bit(hdev->reset_type, &hdev->reset_pending); 1884 1885 if (hclgevf_is_reset_pending(hdev)) { 1886 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1887 hclgevf_reset_task_schedule(hdev); 1888 } else { 1889 set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1890 hclgevf_dump_rst_info(hdev); 1891 } 1892 } 1893 1894 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 1895 { 1896 int ret; 1897 1898 hdev->rst_stats.rst_cnt++; 1899 1900 /* perform reset of the stack & ae device for a client */ 1901 ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); 1902 if (ret) 1903 return ret; 1904 1905 rtnl_lock(); 1906 /* bring down the nic to stop any ongoing TX/RX */ 1907 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1908 rtnl_unlock(); 1909 if (ret) 1910 return ret; 1911 1912 return hclgevf_reset_prepare_wait(hdev); 1913 } 1914 1915 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 1916 { 1917 int ret; 1918 1919 hdev->rst_stats.hw_rst_done_cnt++; 1920 ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); 1921 if (ret) 1922 return ret; 1923 1924 rtnl_lock(); 1925 /* now, re-initialize the nic client and ae device */ 1926 ret = hclgevf_reset_stack(hdev); 1927 rtnl_unlock(); 1928 if (ret) { 1929 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1930 return ret; 1931 } 1932 1933 ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT); 1934 /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1 1935 * times 1936 */ 1937 if (ret && 1938 hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1) 1939 return ret; 1940 1941 ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT); 1942 if (ret) 1943 return ret; 1944 1945 hdev->last_reset_time = jiffies; 1946 hdev->rst_stats.rst_done_cnt++; 1947 hdev->rst_stats.rst_fail_cnt = 0; 1948 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1949 1950 return 0; 1951 } 1952 1953 static void hclgevf_reset(struct hclgevf_dev *hdev) 1954 { 1955 if (hclgevf_reset_prepare(hdev)) 1956 goto err_reset; 1957 1958 /* check if VF could successfully fetch the hardware reset completion 1959 * status from the hardware 1960 */ 1961 if (hclgevf_reset_wait(hdev)) { 1962 /* can't do much in this situation, will disable VF */ 1963 dev_err(&hdev->pdev->dev, 1964 "failed to fetch H/W reset completion status\n"); 1965 goto err_reset; 1966 } 1967 1968 if (hclgevf_reset_rebuild(hdev)) 1969 goto err_reset; 1970 1971 return; 1972 1973 err_reset: 1974 hclgevf_reset_err_handle(hdev); 1975 } 1976 1977 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1978 unsigned long *addr) 1979 { 1980 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1981 1982 /* return the highest priority reset level amongst all */ 1983 if (test_bit(HNAE3_VF_RESET, addr)) { 1984 rst_level = HNAE3_VF_RESET; 1985 clear_bit(HNAE3_VF_RESET, addr); 1986 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1987 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1988 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1989 rst_level = HNAE3_VF_FULL_RESET; 1990 clear_bit(HNAE3_VF_FULL_RESET, addr); 1991 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1992 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1993 rst_level = HNAE3_VF_PF_FUNC_RESET; 1994 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1995 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1996 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1997 rst_level = HNAE3_VF_FUNC_RESET; 1998 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1999 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 2000 rst_level = HNAE3_FLR_RESET; 2001 clear_bit(HNAE3_FLR_RESET, addr); 2002 } 2003 2004 return rst_level; 2005 } 2006 2007 static void hclgevf_reset_event(struct pci_dev *pdev, 2008 struct hnae3_handle *handle) 2009 { 2010 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2011 struct hclgevf_dev *hdev = ae_dev->priv; 2012 2013 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 2014 2015 if (hdev->default_reset_request) 2016 hdev->reset_level = 2017 hclgevf_get_reset_level(hdev, 2018 &hdev->default_reset_request); 2019 else 2020 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2021 2022 /* reset of this VF requested */ 2023 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 2024 hclgevf_reset_task_schedule(hdev); 2025 2026 hdev->last_reset_time = jiffies; 2027 } 2028 2029 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 2030 enum hnae3_reset_type rst_type) 2031 { 2032 struct hclgevf_dev *hdev = ae_dev->priv; 2033 2034 set_bit(rst_type, &hdev->default_reset_request); 2035 } 2036 2037 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 2038 { 2039 writel(en ? 1 : 0, vector->addr); 2040 } 2041 2042 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 2043 { 2044 #define HCLGEVF_FLR_RETRY_WAIT_MS 500 2045 #define HCLGEVF_FLR_RETRY_CNT 5 2046 2047 struct hclgevf_dev *hdev = ae_dev->priv; 2048 int retry_cnt = 0; 2049 int ret; 2050 2051 retry: 2052 down(&hdev->reset_sem); 2053 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2054 hdev->reset_type = HNAE3_FLR_RESET; 2055 ret = hclgevf_reset_prepare(hdev); 2056 if (ret) { 2057 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", 2058 ret); 2059 if (hdev->reset_pending || 2060 retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) { 2061 dev_err(&hdev->pdev->dev, 2062 "reset_pending:0x%lx, retry_cnt:%d\n", 2063 hdev->reset_pending, retry_cnt); 2064 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2065 up(&hdev->reset_sem); 2066 msleep(HCLGEVF_FLR_RETRY_WAIT_MS); 2067 goto retry; 2068 } 2069 } 2070 2071 /* disable misc vector before FLR done */ 2072 hclgevf_enable_vector(&hdev->misc_vector, false); 2073 hdev->rst_stats.flr_rst_cnt++; 2074 } 2075 2076 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 2077 { 2078 struct hclgevf_dev *hdev = ae_dev->priv; 2079 int ret; 2080 2081 hclgevf_enable_vector(&hdev->misc_vector, true); 2082 2083 ret = hclgevf_reset_rebuild(hdev); 2084 if (ret) 2085 dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 2086 ret); 2087 2088 hdev->reset_type = HNAE3_NONE_RESET; 2089 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2090 up(&hdev->reset_sem); 2091 } 2092 2093 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 2094 { 2095 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2096 2097 return hdev->fw_version; 2098 } 2099 2100 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 2101 { 2102 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 2103 2104 vector->vector_irq = pci_irq_vector(hdev->pdev, 2105 HCLGEVF_MISC_VECTOR_NUM); 2106 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 2107 /* vector status always valid for Vector 0 */ 2108 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 2109 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 2110 2111 hdev->num_msi_left -= 1; 2112 hdev->num_msi_used += 1; 2113 } 2114 2115 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 2116 { 2117 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2118 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 2119 &hdev->state)) 2120 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2121 } 2122 2123 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 2124 { 2125 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2126 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 2127 &hdev->state)) 2128 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2129 } 2130 2131 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 2132 unsigned long delay) 2133 { 2134 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2135 !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2136 mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 2137 } 2138 2139 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 2140 { 2141 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 2142 2143 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 2144 return; 2145 2146 down(&hdev->reset_sem); 2147 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2148 2149 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 2150 &hdev->reset_state)) { 2151 /* PF has initmated that it is about to reset the hardware. 2152 * We now have to poll & check if hardware has actually 2153 * completed the reset sequence. On hardware reset completion, 2154 * VF needs to reset the client and ae device. 2155 */ 2156 hdev->reset_attempts = 0; 2157 2158 hdev->last_reset_time = jiffies; 2159 while ((hdev->reset_type = 2160 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 2161 != HNAE3_NONE_RESET) 2162 hclgevf_reset(hdev); 2163 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 2164 &hdev->reset_state)) { 2165 /* we could be here when either of below happens: 2166 * 1. reset was initiated due to watchdog timeout caused by 2167 * a. IMP was earlier reset and our TX got choked down and 2168 * which resulted in watchdog reacting and inducing VF 2169 * reset. This also means our cmdq would be unreliable. 2170 * b. problem in TX due to other lower layer(example link 2171 * layer not functioning properly etc.) 2172 * 2. VF reset might have been initiated due to some config 2173 * change. 2174 * 2175 * NOTE: Theres no clear way to detect above cases than to react 2176 * to the response of PF for this reset request. PF will ack the 2177 * 1b and 2. cases but we will not get any intimation about 1a 2178 * from PF as cmdq would be in unreliable state i.e. mailbox 2179 * communication between PF and VF would be broken. 2180 * 2181 * if we are never geting into pending state it means either: 2182 * 1. PF is not receiving our request which could be due to IMP 2183 * reset 2184 * 2. PF is screwed 2185 * We cannot do much for 2. but to check first we can try reset 2186 * our PCIe + stack and see if it alleviates the problem. 2187 */ 2188 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 2189 /* prepare for full reset of stack + pcie interface */ 2190 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 2191 2192 /* "defer" schedule the reset task again */ 2193 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2194 } else { 2195 hdev->reset_attempts++; 2196 2197 set_bit(hdev->reset_level, &hdev->reset_pending); 2198 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2199 } 2200 hclgevf_reset_task_schedule(hdev); 2201 } 2202 2203 hdev->reset_type = HNAE3_NONE_RESET; 2204 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2205 up(&hdev->reset_sem); 2206 } 2207 2208 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 2209 { 2210 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2211 return; 2212 2213 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 2214 return; 2215 2216 hclgevf_mbx_async_handler(hdev); 2217 2218 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2219 } 2220 2221 static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 2222 { 2223 struct hclge_vf_to_pf_msg send_msg; 2224 int ret; 2225 2226 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) 2227 return; 2228 2229 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 2230 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2231 if (ret) 2232 dev_err(&hdev->pdev->dev, 2233 "VF sends keep alive cmd failed(=%d)\n", ret); 2234 } 2235 2236 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 2237 { 2238 unsigned long delta = round_jiffies_relative(HZ); 2239 struct hnae3_handle *handle = &hdev->nic; 2240 2241 if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2242 return; 2243 2244 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 2245 delta = jiffies - hdev->last_serv_processed; 2246 2247 if (delta < round_jiffies_relative(HZ)) { 2248 delta = round_jiffies_relative(HZ) - delta; 2249 goto out; 2250 } 2251 } 2252 2253 hdev->serv_processed_cnt++; 2254 if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 2255 hclgevf_keep_alive(hdev); 2256 2257 if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 2258 hdev->last_serv_processed = jiffies; 2259 goto out; 2260 } 2261 2262 if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 2263 hclgevf_tqps_update_stats(handle); 2264 2265 /* request the link status from the PF. PF would be able to tell VF 2266 * about such updates in future so we might remove this later 2267 */ 2268 hclgevf_request_link_info(hdev); 2269 2270 hclgevf_update_link_mode(hdev); 2271 2272 hclgevf_sync_vlan_filter(hdev); 2273 2274 hclgevf_sync_mac_table(hdev); 2275 2276 hclgevf_sync_promisc_mode(hdev); 2277 2278 hdev->last_serv_processed = jiffies; 2279 2280 out: 2281 hclgevf_task_schedule(hdev, delta); 2282 } 2283 2284 static void hclgevf_service_task(struct work_struct *work) 2285 { 2286 struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 2287 service_task.work); 2288 2289 hclgevf_reset_service_task(hdev); 2290 hclgevf_mailbox_service_task(hdev); 2291 hclgevf_periodic_service_task(hdev); 2292 2293 /* Handle reset and mbx again in case periodical task delays the 2294 * handling by calling hclgevf_task_schedule() in 2295 * hclgevf_periodic_service_task() 2296 */ 2297 hclgevf_reset_service_task(hdev); 2298 hclgevf_mailbox_service_task(hdev); 2299 } 2300 2301 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 2302 { 2303 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 2304 } 2305 2306 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 2307 u32 *clearval) 2308 { 2309 u32 val, cmdq_stat_reg, rst_ing_reg; 2310 2311 /* fetch the events from their corresponding regs */ 2312 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 2313 HCLGEVF_VECTOR0_CMDQ_STATE_REG); 2314 2315 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 2316 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2317 dev_info(&hdev->pdev->dev, 2318 "receive reset interrupt 0x%x!\n", rst_ing_reg); 2319 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 2320 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2321 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 2322 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 2323 hdev->rst_stats.vf_rst_cnt++; 2324 /* set up VF hardware reset status, its PF will clear 2325 * this status when PF has initialized done. 2326 */ 2327 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 2328 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 2329 val | HCLGEVF_VF_RST_ING_BIT); 2330 return HCLGEVF_VECTOR0_EVENT_RST; 2331 } 2332 2333 /* check for vector0 mailbox(=CMDQ RX) event source */ 2334 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 2335 /* for revision 0x21, clearing interrupt is writing bit 0 2336 * to the clear register, writing bit 1 means to keep the 2337 * old value. 2338 * for revision 0x20, the clear register is a read & write 2339 * register, so we should just write 0 to the bit we are 2340 * handling, and keep other bits as cmdq_stat_reg. 2341 */ 2342 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) 2343 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2344 else 2345 *clearval = cmdq_stat_reg & 2346 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2347 2348 return HCLGEVF_VECTOR0_EVENT_MBX; 2349 } 2350 2351 /* print other vector0 event source */ 2352 dev_info(&hdev->pdev->dev, 2353 "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2354 cmdq_stat_reg); 2355 2356 return HCLGEVF_VECTOR0_EVENT_OTHER; 2357 } 2358 2359 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2360 { 2361 enum hclgevf_evt_cause event_cause; 2362 struct hclgevf_dev *hdev = data; 2363 u32 clearval; 2364 2365 hclgevf_enable_vector(&hdev->misc_vector, false); 2366 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2367 2368 switch (event_cause) { 2369 case HCLGEVF_VECTOR0_EVENT_RST: 2370 hclgevf_reset_task_schedule(hdev); 2371 break; 2372 case HCLGEVF_VECTOR0_EVENT_MBX: 2373 hclgevf_mbx_handler(hdev); 2374 break; 2375 default: 2376 break; 2377 } 2378 2379 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 2380 hclgevf_clear_event_cause(hdev, clearval); 2381 hclgevf_enable_vector(&hdev->misc_vector, true); 2382 } 2383 2384 return IRQ_HANDLED; 2385 } 2386 2387 static int hclgevf_configure(struct hclgevf_dev *hdev) 2388 { 2389 int ret; 2390 2391 /* get current port based vlan state from PF */ 2392 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2393 if (ret) 2394 return ret; 2395 2396 /* get queue configuration from PF */ 2397 ret = hclgevf_get_queue_info(hdev); 2398 if (ret) 2399 return ret; 2400 2401 /* get queue depth info from PF */ 2402 ret = hclgevf_get_queue_depth(hdev); 2403 if (ret) 2404 return ret; 2405 2406 ret = hclgevf_get_pf_media_type(hdev); 2407 if (ret) 2408 return ret; 2409 2410 /* get tc configuration from PF */ 2411 return hclgevf_get_tc_info(hdev); 2412 } 2413 2414 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 2415 { 2416 struct pci_dev *pdev = ae_dev->pdev; 2417 struct hclgevf_dev *hdev; 2418 2419 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 2420 if (!hdev) 2421 return -ENOMEM; 2422 2423 hdev->pdev = pdev; 2424 hdev->ae_dev = ae_dev; 2425 ae_dev->priv = hdev; 2426 2427 return 0; 2428 } 2429 2430 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2431 { 2432 struct hnae3_handle *roce = &hdev->roce; 2433 struct hnae3_handle *nic = &hdev->nic; 2434 2435 roce->rinfo.num_vectors = hdev->num_roce_msix; 2436 2437 if (hdev->num_msi_left < roce->rinfo.num_vectors || 2438 hdev->num_msi_left == 0) 2439 return -EINVAL; 2440 2441 roce->rinfo.base_vector = hdev->roce_base_vector; 2442 2443 roce->rinfo.netdev = nic->kinfo.netdev; 2444 roce->rinfo.roce_io_base = hdev->hw.io_base; 2445 roce->rinfo.roce_mem_base = hdev->hw.mem_base; 2446 2447 roce->pdev = nic->pdev; 2448 roce->ae_algo = nic->ae_algo; 2449 roce->numa_node_mask = nic->numa_node_mask; 2450 2451 return 0; 2452 } 2453 2454 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 2455 { 2456 struct hclgevf_cfg_gro_status_cmd *req; 2457 struct hclgevf_desc desc; 2458 int ret; 2459 2460 if (!hnae3_dev_gro_supported(hdev)) 2461 return 0; 2462 2463 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2464 false); 2465 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2466 2467 req->gro_en = en ? 1 : 0; 2468 2469 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2470 if (ret) 2471 dev_err(&hdev->pdev->dev, 2472 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2473 2474 return ret; 2475 } 2476 2477 static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) 2478 { 2479 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2480 struct hclgevf_rss_tuple_cfg *tuple_sets; 2481 u32 i; 2482 2483 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 2484 rss_cfg->rss_size = hdev->nic.kinfo.rss_size; 2485 tuple_sets = &rss_cfg->rss_tuple_sets; 2486 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2487 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 2488 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2489 HCLGEVF_RSS_KEY_SIZE); 2490 2491 tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2492 tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2493 tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2494 tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2495 tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2496 tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2497 tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2498 tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2499 } 2500 2501 /* Initialize RSS indirect table */ 2502 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 2503 rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 2504 } 2505 2506 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2507 { 2508 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2509 int ret; 2510 2511 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2512 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 2513 rss_cfg->rss_hash_key); 2514 if (ret) 2515 return ret; 2516 2517 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2518 if (ret) 2519 return ret; 2520 } 2521 2522 ret = hclgevf_set_rss_indir_table(hdev); 2523 if (ret) 2524 return ret; 2525 2526 return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); 2527 } 2528 2529 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2530 { 2531 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2532 false); 2533 } 2534 2535 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2536 { 2537 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2538 2539 unsigned long last = hdev->serv_processed_cnt; 2540 int i = 0; 2541 2542 while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2543 i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2544 last == hdev->serv_processed_cnt) 2545 usleep_range(1, 1); 2546 } 2547 2548 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2549 { 2550 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2551 2552 if (enable) { 2553 hclgevf_task_schedule(hdev, 0); 2554 } else { 2555 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2556 2557 /* flush memory to make sure DOWN is seen by service task */ 2558 smp_mb__before_atomic(); 2559 hclgevf_flush_link_update(hdev); 2560 } 2561 } 2562 2563 static int hclgevf_ae_start(struct hnae3_handle *handle) 2564 { 2565 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2566 2567 hclgevf_reset_tqp_stats(handle); 2568 2569 hclgevf_request_link_info(hdev); 2570 2571 hclgevf_update_link_mode(hdev); 2572 2573 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2574 2575 return 0; 2576 } 2577 2578 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2579 { 2580 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2581 int i; 2582 2583 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2584 2585 if (hdev->reset_type != HNAE3_VF_RESET) 2586 for (i = 0; i < handle->kinfo.num_tqps; i++) 2587 if (hclgevf_reset_tqp(handle, i)) 2588 break; 2589 2590 hclgevf_reset_tqp_stats(handle); 2591 hclgevf_update_link_status(hdev, 0); 2592 } 2593 2594 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2595 { 2596 #define HCLGEVF_STATE_ALIVE 1 2597 #define HCLGEVF_STATE_NOT_ALIVE 0 2598 2599 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2600 struct hclge_vf_to_pf_msg send_msg; 2601 2602 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2603 send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2604 HCLGEVF_STATE_NOT_ALIVE; 2605 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2606 } 2607 2608 static int hclgevf_client_start(struct hnae3_handle *handle) 2609 { 2610 return hclgevf_set_alive(handle, true); 2611 } 2612 2613 static void hclgevf_client_stop(struct hnae3_handle *handle) 2614 { 2615 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2616 int ret; 2617 2618 ret = hclgevf_set_alive(handle, false); 2619 if (ret) 2620 dev_warn(&hdev->pdev->dev, 2621 "%s failed %d\n", __func__, ret); 2622 } 2623 2624 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2625 { 2626 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2627 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2628 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2629 2630 INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 2631 2632 mutex_init(&hdev->mbx_resp.mbx_mutex); 2633 sema_init(&hdev->reset_sem, 1); 2634 2635 spin_lock_init(&hdev->mac_table.mac_list_lock); 2636 INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); 2637 INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); 2638 2639 /* bring the device down */ 2640 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2641 } 2642 2643 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2644 { 2645 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2646 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2647 2648 if (hdev->service_task.work.func) 2649 cancel_delayed_work_sync(&hdev->service_task); 2650 2651 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2652 } 2653 2654 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2655 { 2656 struct pci_dev *pdev = hdev->pdev; 2657 int vectors; 2658 int i; 2659 2660 if (hnae3_dev_roce_supported(hdev)) 2661 vectors = pci_alloc_irq_vectors(pdev, 2662 hdev->roce_base_msix_offset + 1, 2663 hdev->num_msi, 2664 PCI_IRQ_MSIX); 2665 else 2666 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2667 hdev->num_msi, 2668 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2669 2670 if (vectors < 0) { 2671 dev_err(&pdev->dev, 2672 "failed(%d) to allocate MSI/MSI-X vectors\n", 2673 vectors); 2674 return vectors; 2675 } 2676 if (vectors < hdev->num_msi) 2677 dev_warn(&hdev->pdev->dev, 2678 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2679 hdev->num_msi, vectors); 2680 2681 hdev->num_msi = vectors; 2682 hdev->num_msi_left = vectors; 2683 2684 hdev->base_msi_vector = pdev->irq; 2685 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2686 2687 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2688 sizeof(u16), GFP_KERNEL); 2689 if (!hdev->vector_status) { 2690 pci_free_irq_vectors(pdev); 2691 return -ENOMEM; 2692 } 2693 2694 for (i = 0; i < hdev->num_msi; i++) 2695 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2696 2697 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2698 sizeof(int), GFP_KERNEL); 2699 if (!hdev->vector_irq) { 2700 devm_kfree(&pdev->dev, hdev->vector_status); 2701 pci_free_irq_vectors(pdev); 2702 return -ENOMEM; 2703 } 2704 2705 return 0; 2706 } 2707 2708 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2709 { 2710 struct pci_dev *pdev = hdev->pdev; 2711 2712 devm_kfree(&pdev->dev, hdev->vector_status); 2713 devm_kfree(&pdev->dev, hdev->vector_irq); 2714 pci_free_irq_vectors(pdev); 2715 } 2716 2717 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2718 { 2719 int ret; 2720 2721 hclgevf_get_misc_vector(hdev); 2722 2723 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2724 HCLGEVF_NAME, pci_name(hdev->pdev)); 2725 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2726 0, hdev->misc_vector.name, hdev); 2727 if (ret) { 2728 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2729 hdev->misc_vector.vector_irq); 2730 return ret; 2731 } 2732 2733 hclgevf_clear_event_cause(hdev, 0); 2734 2735 /* enable misc. vector(vector 0) */ 2736 hclgevf_enable_vector(&hdev->misc_vector, true); 2737 2738 return ret; 2739 } 2740 2741 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2742 { 2743 /* disable misc vector(vector 0) */ 2744 hclgevf_enable_vector(&hdev->misc_vector, false); 2745 synchronize_irq(hdev->misc_vector.vector_irq); 2746 free_irq(hdev->misc_vector.vector_irq, hdev); 2747 hclgevf_free_vector(hdev, 0); 2748 } 2749 2750 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2751 { 2752 struct device *dev = &hdev->pdev->dev; 2753 2754 dev_info(dev, "VF info begin:\n"); 2755 2756 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2757 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2758 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2759 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2760 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2761 dev_info(dev, "PF media type of this VF: %u\n", 2762 hdev->hw.mac.media_type); 2763 2764 dev_info(dev, "VF info end.\n"); 2765 } 2766 2767 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2768 struct hnae3_client *client) 2769 { 2770 struct hclgevf_dev *hdev = ae_dev->priv; 2771 int rst_cnt = hdev->rst_stats.rst_cnt; 2772 int ret; 2773 2774 ret = client->ops->init_instance(&hdev->nic); 2775 if (ret) 2776 return ret; 2777 2778 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2779 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 2780 rst_cnt != hdev->rst_stats.rst_cnt) { 2781 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2782 2783 client->ops->uninit_instance(&hdev->nic, 0); 2784 return -EBUSY; 2785 } 2786 2787 hnae3_set_client_init_flag(client, ae_dev, 1); 2788 2789 if (netif_msg_drv(&hdev->nic)) 2790 hclgevf_info_show(hdev); 2791 2792 return 0; 2793 } 2794 2795 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2796 struct hnae3_client *client) 2797 { 2798 struct hclgevf_dev *hdev = ae_dev->priv; 2799 int ret; 2800 2801 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2802 !hdev->nic_client) 2803 return 0; 2804 2805 ret = hclgevf_init_roce_base_info(hdev); 2806 if (ret) 2807 return ret; 2808 2809 ret = client->ops->init_instance(&hdev->roce); 2810 if (ret) 2811 return ret; 2812 2813 set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2814 hnae3_set_client_init_flag(client, ae_dev, 1); 2815 2816 return 0; 2817 } 2818 2819 static int hclgevf_init_client_instance(struct hnae3_client *client, 2820 struct hnae3_ae_dev *ae_dev) 2821 { 2822 struct hclgevf_dev *hdev = ae_dev->priv; 2823 int ret; 2824 2825 switch (client->type) { 2826 case HNAE3_CLIENT_KNIC: 2827 hdev->nic_client = client; 2828 hdev->nic.client = client; 2829 2830 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2831 if (ret) 2832 goto clear_nic; 2833 2834 ret = hclgevf_init_roce_client_instance(ae_dev, 2835 hdev->roce_client); 2836 if (ret) 2837 goto clear_roce; 2838 2839 break; 2840 case HNAE3_CLIENT_ROCE: 2841 if (hnae3_dev_roce_supported(hdev)) { 2842 hdev->roce_client = client; 2843 hdev->roce.client = client; 2844 } 2845 2846 ret = hclgevf_init_roce_client_instance(ae_dev, client); 2847 if (ret) 2848 goto clear_roce; 2849 2850 break; 2851 default: 2852 return -EINVAL; 2853 } 2854 2855 return 0; 2856 2857 clear_nic: 2858 hdev->nic_client = NULL; 2859 hdev->nic.client = NULL; 2860 return ret; 2861 clear_roce: 2862 hdev->roce_client = NULL; 2863 hdev->roce.client = NULL; 2864 return ret; 2865 } 2866 2867 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2868 struct hnae3_ae_dev *ae_dev) 2869 { 2870 struct hclgevf_dev *hdev = ae_dev->priv; 2871 2872 /* un-init roce, if it exists */ 2873 if (hdev->roce_client) { 2874 clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2875 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2876 hdev->roce_client = NULL; 2877 hdev->roce.client = NULL; 2878 } 2879 2880 /* un-init nic/unic, if this was not called by roce client */ 2881 if (client->ops->uninit_instance && hdev->nic_client && 2882 client->type != HNAE3_CLIENT_ROCE) { 2883 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2884 2885 client->ops->uninit_instance(&hdev->nic, 0); 2886 hdev->nic_client = NULL; 2887 hdev->nic.client = NULL; 2888 } 2889 } 2890 2891 static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev) 2892 { 2893 #define HCLGEVF_MEM_BAR 4 2894 2895 struct pci_dev *pdev = hdev->pdev; 2896 struct hclgevf_hw *hw = &hdev->hw; 2897 2898 /* for device does not have device memory, return directly */ 2899 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR))) 2900 return 0; 2901 2902 hw->mem_base = devm_ioremap_wc(&pdev->dev, 2903 pci_resource_start(pdev, 2904 HCLGEVF_MEM_BAR), 2905 pci_resource_len(pdev, HCLGEVF_MEM_BAR)); 2906 if (!hw->mem_base) { 2907 dev_err(&pdev->dev, "failed to map device memory\n"); 2908 return -EFAULT; 2909 } 2910 2911 return 0; 2912 } 2913 2914 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2915 { 2916 struct pci_dev *pdev = hdev->pdev; 2917 struct hclgevf_hw *hw; 2918 int ret; 2919 2920 ret = pci_enable_device(pdev); 2921 if (ret) { 2922 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2923 return ret; 2924 } 2925 2926 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2927 if (ret) { 2928 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2929 goto err_disable_device; 2930 } 2931 2932 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2933 if (ret) { 2934 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2935 goto err_disable_device; 2936 } 2937 2938 pci_set_master(pdev); 2939 hw = &hdev->hw; 2940 hw->hdev = hdev; 2941 hw->io_base = pci_iomap(pdev, 2, 0); 2942 if (!hw->io_base) { 2943 dev_err(&pdev->dev, "can't map configuration register space\n"); 2944 ret = -ENOMEM; 2945 goto err_clr_master; 2946 } 2947 2948 ret = hclgevf_dev_mem_map(hdev); 2949 if (ret) 2950 goto err_unmap_io_base; 2951 2952 return 0; 2953 2954 err_unmap_io_base: 2955 pci_iounmap(pdev, hdev->hw.io_base); 2956 err_clr_master: 2957 pci_clear_master(pdev); 2958 pci_release_regions(pdev); 2959 err_disable_device: 2960 pci_disable_device(pdev); 2961 2962 return ret; 2963 } 2964 2965 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2966 { 2967 struct pci_dev *pdev = hdev->pdev; 2968 2969 if (hdev->hw.mem_base) 2970 devm_iounmap(&pdev->dev, hdev->hw.mem_base); 2971 2972 pci_iounmap(pdev, hdev->hw.io_base); 2973 pci_clear_master(pdev); 2974 pci_release_regions(pdev); 2975 pci_disable_device(pdev); 2976 } 2977 2978 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2979 { 2980 struct hclgevf_query_res_cmd *req; 2981 struct hclgevf_desc desc; 2982 int ret; 2983 2984 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2985 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2986 if (ret) { 2987 dev_err(&hdev->pdev->dev, 2988 "query vf resource failed, ret = %d.\n", ret); 2989 return ret; 2990 } 2991 2992 req = (struct hclgevf_query_res_cmd *)desc.data; 2993 2994 if (hnae3_dev_roce_supported(hdev)) { 2995 hdev->roce_base_msix_offset = 2996 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 2997 HCLGEVF_MSIX_OFT_ROCEE_M, 2998 HCLGEVF_MSIX_OFT_ROCEE_S); 2999 hdev->num_roce_msix = 3000 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 3001 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 3002 3003 /* nic's msix numbers is always equals to the roce's. */ 3004 hdev->num_nic_msix = hdev->num_roce_msix; 3005 3006 /* VF should have NIC vectors and Roce vectors, NIC vectors 3007 * are queued before Roce vectors. The offset is fixed to 64. 3008 */ 3009 hdev->num_msi = hdev->num_roce_msix + 3010 hdev->roce_base_msix_offset; 3011 } else { 3012 hdev->num_msi = 3013 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 3014 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 3015 3016 hdev->num_nic_msix = hdev->num_msi; 3017 } 3018 3019 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 3020 dev_err(&hdev->pdev->dev, 3021 "Just %u msi resources, not enough for vf(min:2).\n", 3022 hdev->num_nic_msix); 3023 return -EINVAL; 3024 } 3025 3026 return 0; 3027 } 3028 3029 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) 3030 { 3031 #define HCLGEVF_MAX_NON_TSO_BD_NUM 8U 3032 3033 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3034 3035 ae_dev->dev_specs.max_non_tso_bd_num = 3036 HCLGEVF_MAX_NON_TSO_BD_NUM; 3037 ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 3038 ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE; 3039 ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 3040 } 3041 3042 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, 3043 struct hclgevf_desc *desc) 3044 { 3045 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3046 struct hclgevf_dev_specs_0_cmd *req0; 3047 struct hclgevf_dev_specs_1_cmd *req1; 3048 3049 req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data; 3050 req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data; 3051 3052 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; 3053 ae_dev->dev_specs.rss_ind_tbl_size = 3054 le16_to_cpu(req0->rss_ind_tbl_size); 3055 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); 3056 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); 3057 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); 3058 } 3059 3060 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) 3061 { 3062 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; 3063 3064 if (!dev_specs->max_non_tso_bd_num) 3065 dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM; 3066 if (!dev_specs->rss_ind_tbl_size) 3067 dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 3068 if (!dev_specs->rss_key_size) 3069 dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE; 3070 if (!dev_specs->max_int_gl) 3071 dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 3072 } 3073 3074 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) 3075 { 3076 struct hclgevf_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; 3077 int ret; 3078 int i; 3079 3080 /* set default specifications as devices lower than version V3 do not 3081 * support querying specifications from firmware. 3082 */ 3083 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 3084 hclgevf_set_default_dev_specs(hdev); 3085 return 0; 3086 } 3087 3088 for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 3089 hclgevf_cmd_setup_basic_desc(&desc[i], 3090 HCLGEVF_OPC_QUERY_DEV_SPECS, true); 3091 desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT); 3092 } 3093 hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS, 3094 true); 3095 3096 ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM); 3097 if (ret) 3098 return ret; 3099 3100 hclgevf_parse_dev_specs(hdev, desc); 3101 hclgevf_check_dev_specs(hdev); 3102 3103 return 0; 3104 } 3105 3106 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 3107 { 3108 struct pci_dev *pdev = hdev->pdev; 3109 int ret = 0; 3110 3111 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 3112 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3113 hclgevf_misc_irq_uninit(hdev); 3114 hclgevf_uninit_msi(hdev); 3115 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3116 } 3117 3118 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3119 pci_set_master(pdev); 3120 ret = hclgevf_init_msi(hdev); 3121 if (ret) { 3122 dev_err(&pdev->dev, 3123 "failed(%d) to init MSI/MSI-X\n", ret); 3124 return ret; 3125 } 3126 3127 ret = hclgevf_misc_irq_init(hdev); 3128 if (ret) { 3129 hclgevf_uninit_msi(hdev); 3130 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 3131 ret); 3132 return ret; 3133 } 3134 3135 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3136 } 3137 3138 return ret; 3139 } 3140 3141 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) 3142 { 3143 struct hclge_vf_to_pf_msg send_msg; 3144 3145 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, 3146 HCLGE_MBX_VPORT_LIST_CLEAR); 3147 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3148 } 3149 3150 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 3151 { 3152 struct pci_dev *pdev = hdev->pdev; 3153 int ret; 3154 3155 ret = hclgevf_pci_reset(hdev); 3156 if (ret) { 3157 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 3158 return ret; 3159 } 3160 3161 ret = hclgevf_cmd_init(hdev); 3162 if (ret) { 3163 dev_err(&pdev->dev, "cmd failed %d\n", ret); 3164 return ret; 3165 } 3166 3167 ret = hclgevf_rss_init_hw(hdev); 3168 if (ret) { 3169 dev_err(&hdev->pdev->dev, 3170 "failed(%d) to initialize RSS\n", ret); 3171 return ret; 3172 } 3173 3174 ret = hclgevf_config_gro(hdev, true); 3175 if (ret) 3176 return ret; 3177 3178 ret = hclgevf_init_vlan_config(hdev); 3179 if (ret) { 3180 dev_err(&hdev->pdev->dev, 3181 "failed(%d) to initialize VLAN config\n", ret); 3182 return ret; 3183 } 3184 3185 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 3186 3187 dev_info(&hdev->pdev->dev, "Reset done\n"); 3188 3189 return 0; 3190 } 3191 3192 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 3193 { 3194 struct pci_dev *pdev = hdev->pdev; 3195 int ret; 3196 3197 ret = hclgevf_pci_init(hdev); 3198 if (ret) 3199 return ret; 3200 3201 ret = hclgevf_cmd_queue_init(hdev); 3202 if (ret) 3203 goto err_cmd_queue_init; 3204 3205 ret = hclgevf_cmd_init(hdev); 3206 if (ret) 3207 goto err_cmd_init; 3208 3209 /* Get vf resource */ 3210 ret = hclgevf_query_vf_resource(hdev); 3211 if (ret) 3212 goto err_cmd_init; 3213 3214 ret = hclgevf_query_dev_specs(hdev); 3215 if (ret) { 3216 dev_err(&pdev->dev, 3217 "failed to query dev specifications, ret = %d\n", ret); 3218 goto err_cmd_init; 3219 } 3220 3221 ret = hclgevf_init_msi(hdev); 3222 if (ret) { 3223 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 3224 goto err_cmd_init; 3225 } 3226 3227 hclgevf_state_init(hdev); 3228 hdev->reset_level = HNAE3_VF_FUNC_RESET; 3229 hdev->reset_type = HNAE3_NONE_RESET; 3230 3231 ret = hclgevf_misc_irq_init(hdev); 3232 if (ret) 3233 goto err_misc_irq_init; 3234 3235 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3236 3237 ret = hclgevf_configure(hdev); 3238 if (ret) { 3239 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 3240 goto err_config; 3241 } 3242 3243 ret = hclgevf_alloc_tqps(hdev); 3244 if (ret) { 3245 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 3246 goto err_config; 3247 } 3248 3249 ret = hclgevf_set_handle_info(hdev); 3250 if (ret) 3251 goto err_config; 3252 3253 ret = hclgevf_config_gro(hdev, true); 3254 if (ret) 3255 goto err_config; 3256 3257 /* Initialize RSS for this VF */ 3258 hclgevf_rss_init_cfg(hdev); 3259 ret = hclgevf_rss_init_hw(hdev); 3260 if (ret) { 3261 dev_err(&hdev->pdev->dev, 3262 "failed(%d) to initialize RSS\n", ret); 3263 goto err_config; 3264 } 3265 3266 /* ensure vf tbl list as empty before init*/ 3267 ret = hclgevf_clear_vport_list(hdev); 3268 if (ret) { 3269 dev_err(&pdev->dev, 3270 "failed to clear tbl list configuration, ret = %d.\n", 3271 ret); 3272 goto err_config; 3273 } 3274 3275 ret = hclgevf_init_vlan_config(hdev); 3276 if (ret) { 3277 dev_err(&hdev->pdev->dev, 3278 "failed(%d) to initialize VLAN config\n", ret); 3279 goto err_config; 3280 } 3281 3282 hdev->last_reset_time = jiffies; 3283 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 3284 HCLGEVF_DRIVER_NAME); 3285 3286 hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 3287 3288 return 0; 3289 3290 err_config: 3291 hclgevf_misc_irq_uninit(hdev); 3292 err_misc_irq_init: 3293 hclgevf_state_uninit(hdev); 3294 hclgevf_uninit_msi(hdev); 3295 err_cmd_init: 3296 hclgevf_cmd_uninit(hdev); 3297 err_cmd_queue_init: 3298 hclgevf_pci_uninit(hdev); 3299 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3300 return ret; 3301 } 3302 3303 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 3304 { 3305 struct hclge_vf_to_pf_msg send_msg; 3306 3307 hclgevf_state_uninit(hdev); 3308 3309 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 3310 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3311 3312 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3313 hclgevf_misc_irq_uninit(hdev); 3314 hclgevf_uninit_msi(hdev); 3315 } 3316 3317 hclgevf_cmd_uninit(hdev); 3318 hclgevf_pci_uninit(hdev); 3319 hclgevf_uninit_mac_list(hdev); 3320 } 3321 3322 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 3323 { 3324 struct pci_dev *pdev = ae_dev->pdev; 3325 int ret; 3326 3327 ret = hclgevf_alloc_hdev(ae_dev); 3328 if (ret) { 3329 dev_err(&pdev->dev, "hclge device allocation failed\n"); 3330 return ret; 3331 } 3332 3333 ret = hclgevf_init_hdev(ae_dev->priv); 3334 if (ret) { 3335 dev_err(&pdev->dev, "hclge device initialization failed\n"); 3336 return ret; 3337 } 3338 3339 return 0; 3340 } 3341 3342 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 3343 { 3344 struct hclgevf_dev *hdev = ae_dev->priv; 3345 3346 hclgevf_uninit_hdev(hdev); 3347 ae_dev->priv = NULL; 3348 } 3349 3350 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3351 { 3352 struct hnae3_handle *nic = &hdev->nic; 3353 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3354 3355 return min_t(u32, hdev->rss_size_max, 3356 hdev->num_tqps / kinfo->num_tc); 3357 } 3358 3359 /** 3360 * hclgevf_get_channels - Get the current channels enabled and max supported. 3361 * @handle: hardware information for network interface 3362 * @ch: ethtool channels structure 3363 * 3364 * We don't support separate tx and rx queues as channels. The other count 3365 * represents how many queues are being used for control. max_combined counts 3366 * how many queue pairs we can support. They may not be mapped 1 to 1 with 3367 * q_vectors since we support a lot more queue pairs than q_vectors. 3368 **/ 3369 static void hclgevf_get_channels(struct hnae3_handle *handle, 3370 struct ethtool_channels *ch) 3371 { 3372 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3373 3374 ch->max_combined = hclgevf_get_max_channels(hdev); 3375 ch->other_count = 0; 3376 ch->max_other = 0; 3377 ch->combined_count = handle->kinfo.rss_size; 3378 } 3379 3380 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 3381 u16 *alloc_tqps, u16 *max_rss_size) 3382 { 3383 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3384 3385 *alloc_tqps = hdev->num_tqps; 3386 *max_rss_size = hdev->rss_size_max; 3387 } 3388 3389 static void hclgevf_update_rss_size(struct hnae3_handle *handle, 3390 u32 new_tqps_num) 3391 { 3392 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3393 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3394 u16 max_rss_size; 3395 3396 kinfo->req_rss_size = new_tqps_num; 3397 3398 max_rss_size = min_t(u16, hdev->rss_size_max, 3399 hdev->num_tqps / kinfo->num_tc); 3400 3401 /* Use the user's configuration when it is not larger than 3402 * max_rss_size, otherwise, use the maximum specification value. 3403 */ 3404 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 3405 kinfo->req_rss_size <= max_rss_size) 3406 kinfo->rss_size = kinfo->req_rss_size; 3407 else if (kinfo->rss_size > max_rss_size || 3408 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 3409 kinfo->rss_size = max_rss_size; 3410 3411 kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; 3412 } 3413 3414 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 3415 bool rxfh_configured) 3416 { 3417 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3418 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3419 u16 cur_rss_size = kinfo->rss_size; 3420 u16 cur_tqps = kinfo->num_tqps; 3421 u32 *rss_indir; 3422 unsigned int i; 3423 int ret; 3424 3425 hclgevf_update_rss_size(handle, new_tqps_num); 3426 3427 ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); 3428 if (ret) 3429 return ret; 3430 3431 /* RSS indirection table has been configuared by user */ 3432 if (rxfh_configured) 3433 goto out; 3434 3435 /* Reinitializes the rss indirect table according to the new RSS size */ 3436 rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 3437 if (!rss_indir) 3438 return -ENOMEM; 3439 3440 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 3441 rss_indir[i] = i % kinfo->rss_size; 3442 3443 hdev->rss_cfg.rss_size = kinfo->rss_size; 3444 3445 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 3446 if (ret) 3447 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 3448 ret); 3449 3450 kfree(rss_indir); 3451 3452 out: 3453 if (!ret) 3454 dev_info(&hdev->pdev->dev, 3455 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 3456 cur_rss_size, kinfo->rss_size, 3457 cur_tqps, kinfo->rss_size * kinfo->num_tc); 3458 3459 return ret; 3460 } 3461 3462 static int hclgevf_get_status(struct hnae3_handle *handle) 3463 { 3464 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3465 3466 return hdev->hw.mac.link; 3467 } 3468 3469 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 3470 u8 *auto_neg, u32 *speed, 3471 u8 *duplex) 3472 { 3473 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3474 3475 if (speed) 3476 *speed = hdev->hw.mac.speed; 3477 if (duplex) 3478 *duplex = hdev->hw.mac.duplex; 3479 if (auto_neg) 3480 *auto_neg = AUTONEG_DISABLE; 3481 } 3482 3483 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 3484 u8 duplex) 3485 { 3486 hdev->hw.mac.speed = speed; 3487 hdev->hw.mac.duplex = duplex; 3488 } 3489 3490 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 3491 { 3492 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3493 3494 return hclgevf_config_gro(hdev, enable); 3495 } 3496 3497 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 3498 u8 *module_type) 3499 { 3500 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3501 3502 if (media_type) 3503 *media_type = hdev->hw.mac.media_type; 3504 3505 if (module_type) 3506 *module_type = hdev->hw.mac.module_type; 3507 } 3508 3509 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 3510 { 3511 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3512 3513 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 3514 } 3515 3516 static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle) 3517 { 3518 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3519 3520 return test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 3521 } 3522 3523 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 3524 { 3525 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3526 3527 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 3528 } 3529 3530 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 3531 { 3532 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3533 3534 return hdev->rst_stats.hw_rst_done_cnt; 3535 } 3536 3537 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 3538 unsigned long *supported, 3539 unsigned long *advertising) 3540 { 3541 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3542 3543 *supported = hdev->hw.mac.supported; 3544 *advertising = hdev->hw.mac.advertising; 3545 } 3546 3547 #define MAX_SEPARATE_NUM 4 3548 #define SEPARATOR_VALUE 0xFFFFFFFF 3549 #define REG_NUM_PER_LINE 4 3550 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 3551 3552 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 3553 { 3554 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 3555 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3556 3557 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 3558 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 3559 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 3560 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 3561 3562 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 3563 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 3564 } 3565 3566 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 3567 void *data) 3568 { 3569 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3570 int i, j, reg_um, separator_num; 3571 u32 *reg = data; 3572 3573 *version = hdev->fw_version; 3574 3575 /* fetching per-VF registers values from VF PCIe register space */ 3576 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 3577 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3578 for (i = 0; i < reg_um; i++) 3579 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 3580 for (i = 0; i < separator_num; i++) 3581 *reg++ = SEPARATOR_VALUE; 3582 3583 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 3584 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3585 for (i = 0; i < reg_um; i++) 3586 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 3587 for (i = 0; i < separator_num; i++) 3588 *reg++ = SEPARATOR_VALUE; 3589 3590 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 3591 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3592 for (j = 0; j < hdev->num_tqps; j++) { 3593 for (i = 0; i < reg_um; i++) 3594 *reg++ = hclgevf_read_dev(&hdev->hw, 3595 ring_reg_addr_list[i] + 3596 0x200 * j); 3597 for (i = 0; i < separator_num; i++) 3598 *reg++ = SEPARATOR_VALUE; 3599 } 3600 3601 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 3602 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3603 for (j = 0; j < hdev->num_msi_used - 1; j++) { 3604 for (i = 0; i < reg_um; i++) 3605 *reg++ = hclgevf_read_dev(&hdev->hw, 3606 tqp_intr_reg_addr_list[i] + 3607 4 * j); 3608 for (i = 0; i < separator_num; i++) 3609 *reg++ = SEPARATOR_VALUE; 3610 } 3611 } 3612 3613 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 3614 u8 *port_base_vlan_info, u8 data_size) 3615 { 3616 struct hnae3_handle *nic = &hdev->nic; 3617 struct hclge_vf_to_pf_msg send_msg; 3618 int ret; 3619 3620 rtnl_lock(); 3621 3622 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 3623 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { 3624 dev_warn(&hdev->pdev->dev, 3625 "is resetting when updating port based vlan info\n"); 3626 rtnl_unlock(); 3627 return; 3628 } 3629 3630 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3631 if (ret) { 3632 rtnl_unlock(); 3633 return; 3634 } 3635 3636 /* send msg to PF and wait update port based vlan info */ 3637 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3638 HCLGE_MBX_PORT_BASE_VLAN_CFG); 3639 memcpy(send_msg.data, port_base_vlan_info, data_size); 3640 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3641 if (!ret) { 3642 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3643 nic->port_base_vlan_state = state; 3644 else 3645 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3646 } 3647 3648 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 3649 rtnl_unlock(); 3650 } 3651 3652 static const struct hnae3_ae_ops hclgevf_ops = { 3653 .init_ae_dev = hclgevf_init_ae_dev, 3654 .uninit_ae_dev = hclgevf_uninit_ae_dev, 3655 .flr_prepare = hclgevf_flr_prepare, 3656 .flr_done = hclgevf_flr_done, 3657 .init_client_instance = hclgevf_init_client_instance, 3658 .uninit_client_instance = hclgevf_uninit_client_instance, 3659 .start = hclgevf_ae_start, 3660 .stop = hclgevf_ae_stop, 3661 .client_start = hclgevf_client_start, 3662 .client_stop = hclgevf_client_stop, 3663 .map_ring_to_vector = hclgevf_map_ring_to_vector, 3664 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3665 .get_vector = hclgevf_get_vector, 3666 .put_vector = hclgevf_put_vector, 3667 .reset_queue = hclgevf_reset_tqp, 3668 .get_mac_addr = hclgevf_get_mac_addr, 3669 .set_mac_addr = hclgevf_set_mac_addr, 3670 .add_uc_addr = hclgevf_add_uc_addr, 3671 .rm_uc_addr = hclgevf_rm_uc_addr, 3672 .add_mc_addr = hclgevf_add_mc_addr, 3673 .rm_mc_addr = hclgevf_rm_mc_addr, 3674 .get_stats = hclgevf_get_stats, 3675 .update_stats = hclgevf_update_stats, 3676 .get_strings = hclgevf_get_strings, 3677 .get_sset_count = hclgevf_get_sset_count, 3678 .get_rss_key_size = hclgevf_get_rss_key_size, 3679 .get_rss_indir_size = hclgevf_get_rss_indir_size, 3680 .get_rss = hclgevf_get_rss, 3681 .set_rss = hclgevf_set_rss, 3682 .get_rss_tuple = hclgevf_get_rss_tuple, 3683 .set_rss_tuple = hclgevf_set_rss_tuple, 3684 .get_tc_size = hclgevf_get_tc_size, 3685 .get_fw_version = hclgevf_get_fw_version, 3686 .set_vlan_filter = hclgevf_set_vlan_filter, 3687 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 3688 .reset_event = hclgevf_reset_event, 3689 .set_default_reset_request = hclgevf_set_def_reset_request, 3690 .set_channels = hclgevf_set_channels, 3691 .get_channels = hclgevf_get_channels, 3692 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 3693 .get_regs_len = hclgevf_get_regs_len, 3694 .get_regs = hclgevf_get_regs, 3695 .get_status = hclgevf_get_status, 3696 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3697 .get_media_type = hclgevf_get_media_type, 3698 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 3699 .ae_dev_resetting = hclgevf_ae_dev_resetting, 3700 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 3701 .set_gro_en = hclgevf_gro_en, 3702 .set_mtu = hclgevf_set_mtu, 3703 .get_global_queue_id = hclgevf_get_qid_global, 3704 .set_timer_task = hclgevf_set_timer_task, 3705 .get_link_mode = hclgevf_get_link_mode, 3706 .set_promisc_mode = hclgevf_set_promisc_mode, 3707 .request_update_promisc_mode = hclgevf_request_update_promisc_mode, 3708 .get_cmdq_stat = hclgevf_get_cmdq_stat, 3709 }; 3710 3711 static struct hnae3_ae_algo ae_algovf = { 3712 .ops = &hclgevf_ops, 3713 .pdev_id_table = ae_algovf_pci_tbl, 3714 }; 3715 3716 static int hclgevf_init(void) 3717 { 3718 pr_info("%s is initializing\n", HCLGEVF_NAME); 3719 3720 hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME); 3721 if (!hclgevf_wq) { 3722 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 3723 return -ENOMEM; 3724 } 3725 3726 hnae3_register_ae_algo(&ae_algovf); 3727 3728 return 0; 3729 } 3730 3731 static void hclgevf_exit(void) 3732 { 3733 hnae3_unregister_ae_algo(&ae_algovf); 3734 destroy_workqueue(hclgevf_wq); 3735 } 3736 module_init(hclgevf_init); 3737 module_exit(hclgevf_exit); 3738 3739 MODULE_LICENSE("GPL"); 3740 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3741 MODULE_DESCRIPTION("HCLGEVF Driver"); 3742 MODULE_VERSION(HCLGEVF_MOD_VERSION); 3743