1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 #define HCLGEVF_RESET_MAX_FAIL_CNT 5 15 16 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 17 static struct hnae3_ae_algo ae_algovf; 18 19 static struct workqueue_struct *hclgevf_wq; 20 21 static const struct pci_device_id ae_algovf_pci_tbl[] = { 22 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 23 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 24 /* required last entry */ 25 {0, } 26 }; 27 28 static const u8 hclgevf_hash_key[] = { 29 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 30 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 31 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 32 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 33 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 34 }; 35 36 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 37 38 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 39 HCLGEVF_CMDQ_TX_ADDR_H_REG, 40 HCLGEVF_CMDQ_TX_DEPTH_REG, 41 HCLGEVF_CMDQ_TX_TAIL_REG, 42 HCLGEVF_CMDQ_TX_HEAD_REG, 43 HCLGEVF_CMDQ_RX_ADDR_L_REG, 44 HCLGEVF_CMDQ_RX_ADDR_H_REG, 45 HCLGEVF_CMDQ_RX_DEPTH_REG, 46 HCLGEVF_CMDQ_RX_TAIL_REG, 47 HCLGEVF_CMDQ_RX_HEAD_REG, 48 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 49 HCLGEVF_CMDQ_INTR_STS_REG, 50 HCLGEVF_CMDQ_INTR_EN_REG, 51 HCLGEVF_CMDQ_INTR_GEN_REG}; 52 53 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 54 HCLGEVF_RST_ING, 55 HCLGEVF_GRO_EN_REG}; 56 57 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 58 HCLGEVF_RING_RX_ADDR_H_REG, 59 HCLGEVF_RING_RX_BD_NUM_REG, 60 HCLGEVF_RING_RX_BD_LENGTH_REG, 61 HCLGEVF_RING_RX_MERGE_EN_REG, 62 HCLGEVF_RING_RX_TAIL_REG, 63 HCLGEVF_RING_RX_HEAD_REG, 64 HCLGEVF_RING_RX_FBD_NUM_REG, 65 HCLGEVF_RING_RX_OFFSET_REG, 66 HCLGEVF_RING_RX_FBD_OFFSET_REG, 67 HCLGEVF_RING_RX_STASH_REG, 68 HCLGEVF_RING_RX_BD_ERR_REG, 69 HCLGEVF_RING_TX_ADDR_L_REG, 70 HCLGEVF_RING_TX_ADDR_H_REG, 71 HCLGEVF_RING_TX_BD_NUM_REG, 72 HCLGEVF_RING_TX_PRIORITY_REG, 73 HCLGEVF_RING_TX_TC_REG, 74 HCLGEVF_RING_TX_MERGE_EN_REG, 75 HCLGEVF_RING_TX_TAIL_REG, 76 HCLGEVF_RING_TX_HEAD_REG, 77 HCLGEVF_RING_TX_FBD_NUM_REG, 78 HCLGEVF_RING_TX_OFFSET_REG, 79 HCLGEVF_RING_TX_EBD_NUM_REG, 80 HCLGEVF_RING_TX_EBD_OFFSET_REG, 81 HCLGEVF_RING_TX_BD_ERR_REG, 82 HCLGEVF_RING_EN_REG}; 83 84 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 85 HCLGEVF_TQP_INTR_GL0_REG, 86 HCLGEVF_TQP_INTR_GL1_REG, 87 HCLGEVF_TQP_INTR_GL2_REG, 88 HCLGEVF_TQP_INTR_RL_REG}; 89 90 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 91 { 92 if (!handle->client) 93 return container_of(handle, struct hclgevf_dev, nic); 94 else if (handle->client->type == HNAE3_CLIENT_ROCE) 95 return container_of(handle, struct hclgevf_dev, roce); 96 else 97 return container_of(handle, struct hclgevf_dev, nic); 98 } 99 100 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 101 { 102 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 103 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 104 struct hclgevf_desc desc; 105 struct hclgevf_tqp *tqp; 106 int status; 107 int i; 108 109 for (i = 0; i < kinfo->num_tqps; i++) { 110 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 111 hclgevf_cmd_setup_basic_desc(&desc, 112 HCLGEVF_OPC_QUERY_RX_STATUS, 113 true); 114 115 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 116 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 117 if (status) { 118 dev_err(&hdev->pdev->dev, 119 "Query tqp stat fail, status = %d,queue = %d\n", 120 status, i); 121 return status; 122 } 123 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 124 le32_to_cpu(desc.data[1]); 125 126 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 127 true); 128 129 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 130 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 131 if (status) { 132 dev_err(&hdev->pdev->dev, 133 "Query tqp stat fail, status = %d,queue = %d\n", 134 status, i); 135 return status; 136 } 137 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 138 le32_to_cpu(desc.data[1]); 139 } 140 141 return 0; 142 } 143 144 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 145 { 146 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 147 struct hclgevf_tqp *tqp; 148 u64 *buff = data; 149 int i; 150 151 for (i = 0; i < kinfo->num_tqps; i++) { 152 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 153 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 154 } 155 for (i = 0; i < kinfo->num_tqps; i++) { 156 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 157 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 158 } 159 160 return buff; 161 } 162 163 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 164 { 165 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 166 167 return kinfo->num_tqps * 2; 168 } 169 170 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 171 { 172 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 173 u8 *buff = data; 174 int i = 0; 175 176 for (i = 0; i < kinfo->num_tqps; i++) { 177 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 178 struct hclgevf_tqp, q); 179 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 180 tqp->index); 181 buff += ETH_GSTRING_LEN; 182 } 183 184 for (i = 0; i < kinfo->num_tqps; i++) { 185 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 186 struct hclgevf_tqp, q); 187 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 188 tqp->index); 189 buff += ETH_GSTRING_LEN; 190 } 191 192 return buff; 193 } 194 195 static void hclgevf_update_stats(struct hnae3_handle *handle, 196 struct net_device_stats *net_stats) 197 { 198 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 199 int status; 200 201 status = hclgevf_tqps_update_stats(handle); 202 if (status) 203 dev_err(&hdev->pdev->dev, 204 "VF update of TQPS stats fail, status = %d.\n", 205 status); 206 } 207 208 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 209 { 210 if (strset == ETH_SS_TEST) 211 return -EOPNOTSUPP; 212 else if (strset == ETH_SS_STATS) 213 return hclgevf_tqps_get_sset_count(handle, strset); 214 215 return 0; 216 } 217 218 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 219 u8 *data) 220 { 221 u8 *p = (char *)data; 222 223 if (strset == ETH_SS_STATS) 224 p = hclgevf_tqps_get_strings(handle, p); 225 } 226 227 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 228 { 229 hclgevf_tqps_get_stats(handle, data); 230 } 231 232 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 233 u8 subcode) 234 { 235 if (msg) { 236 memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 237 msg->code = code; 238 msg->subcode = subcode; 239 } 240 } 241 242 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 243 { 244 struct hclge_vf_to_pf_msg send_msg; 245 u8 resp_msg; 246 int status; 247 248 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0); 249 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 250 sizeof(resp_msg)); 251 if (status) { 252 dev_err(&hdev->pdev->dev, 253 "VF request to get TC info from PF failed %d", 254 status); 255 return status; 256 } 257 258 hdev->hw_tc_map = resp_msg; 259 260 return 0; 261 } 262 263 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 264 { 265 struct hnae3_handle *nic = &hdev->nic; 266 struct hclge_vf_to_pf_msg send_msg; 267 u8 resp_msg; 268 int ret; 269 270 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 271 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 272 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 273 sizeof(u8)); 274 if (ret) { 275 dev_err(&hdev->pdev->dev, 276 "VF request to get port based vlan state failed %d", 277 ret); 278 return ret; 279 } 280 281 nic->port_base_vlan_state = resp_msg; 282 283 return 0; 284 } 285 286 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 287 { 288 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 289 #define HCLGEVF_TQPS_ALLOC_OFFSET 0 290 #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 291 #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 292 293 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 294 struct hclge_vf_to_pf_msg send_msg; 295 int status; 296 297 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 298 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 299 HCLGEVF_TQPS_RSS_INFO_LEN); 300 if (status) { 301 dev_err(&hdev->pdev->dev, 302 "VF request to get tqp info from PF failed %d", 303 status); 304 return status; 305 } 306 307 memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], 308 sizeof(u16)); 309 memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], 310 sizeof(u16)); 311 memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], 312 sizeof(u16)); 313 314 return 0; 315 } 316 317 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 318 { 319 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 320 #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 321 #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 322 323 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 324 struct hclge_vf_to_pf_msg send_msg; 325 int ret; 326 327 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 328 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 329 HCLGEVF_TQPS_DEPTH_INFO_LEN); 330 if (ret) { 331 dev_err(&hdev->pdev->dev, 332 "VF request to get tqp depth info from PF failed %d", 333 ret); 334 return ret; 335 } 336 337 memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], 338 sizeof(u16)); 339 memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], 340 sizeof(u16)); 341 342 return 0; 343 } 344 345 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 346 { 347 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 348 struct hclge_vf_to_pf_msg send_msg; 349 u16 qid_in_pf = 0; 350 u8 resp_data[2]; 351 int ret; 352 353 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 354 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 355 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 356 sizeof(resp_data)); 357 if (!ret) 358 qid_in_pf = *(u16 *)resp_data; 359 360 return qid_in_pf; 361 } 362 363 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 364 { 365 struct hclge_vf_to_pf_msg send_msg; 366 u8 resp_msg[2]; 367 int ret; 368 369 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 370 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 371 sizeof(resp_msg)); 372 if (ret) { 373 dev_err(&hdev->pdev->dev, 374 "VF request to get the pf port media type failed %d", 375 ret); 376 return ret; 377 } 378 379 hdev->hw.mac.media_type = resp_msg[0]; 380 hdev->hw.mac.module_type = resp_msg[1]; 381 382 return 0; 383 } 384 385 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 386 { 387 struct hclgevf_tqp *tqp; 388 int i; 389 390 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 391 sizeof(struct hclgevf_tqp), GFP_KERNEL); 392 if (!hdev->htqp) 393 return -ENOMEM; 394 395 tqp = hdev->htqp; 396 397 for (i = 0; i < hdev->num_tqps; i++) { 398 tqp->dev = &hdev->pdev->dev; 399 tqp->index = i; 400 401 tqp->q.ae_algo = &ae_algovf; 402 tqp->q.buf_size = hdev->rx_buf_len; 403 tqp->q.tx_desc_num = hdev->num_tx_desc; 404 tqp->q.rx_desc_num = hdev->num_rx_desc; 405 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 406 i * HCLGEVF_TQP_REG_SIZE; 407 408 tqp++; 409 } 410 411 return 0; 412 } 413 414 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 415 { 416 struct hnae3_handle *nic = &hdev->nic; 417 struct hnae3_knic_private_info *kinfo; 418 u16 new_tqps = hdev->num_tqps; 419 unsigned int i; 420 421 kinfo = &nic->kinfo; 422 kinfo->num_tc = 0; 423 kinfo->num_tx_desc = hdev->num_tx_desc; 424 kinfo->num_rx_desc = hdev->num_rx_desc; 425 kinfo->rx_buf_len = hdev->rx_buf_len; 426 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 427 if (hdev->hw_tc_map & BIT(i)) 428 kinfo->num_tc++; 429 430 kinfo->rss_size 431 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 432 new_tqps = kinfo->rss_size * kinfo->num_tc; 433 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 434 435 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 436 sizeof(struct hnae3_queue *), GFP_KERNEL); 437 if (!kinfo->tqp) 438 return -ENOMEM; 439 440 for (i = 0; i < kinfo->num_tqps; i++) { 441 hdev->htqp[i].q.handle = &hdev->nic; 442 hdev->htqp[i].q.tqp_index = i; 443 kinfo->tqp[i] = &hdev->htqp[i].q; 444 } 445 446 /* after init the max rss_size and tqps, adjust the default tqp numbers 447 * and rss size with the actual vector numbers 448 */ 449 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 450 kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc, 451 kinfo->rss_size); 452 453 return 0; 454 } 455 456 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 457 { 458 struct hclge_vf_to_pf_msg send_msg; 459 int status; 460 461 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 462 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 463 if (status) 464 dev_err(&hdev->pdev->dev, 465 "VF failed to fetch link status(%d) from PF", status); 466 } 467 468 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 469 { 470 struct hnae3_handle *rhandle = &hdev->roce; 471 struct hnae3_handle *handle = &hdev->nic; 472 struct hnae3_client *rclient; 473 struct hnae3_client *client; 474 475 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 476 return; 477 478 client = handle->client; 479 rclient = hdev->roce_client; 480 481 link_state = 482 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 483 484 if (link_state != hdev->hw.mac.link) { 485 client->ops->link_status_change(handle, !!link_state); 486 if (rclient && rclient->ops->link_status_change) 487 rclient->ops->link_status_change(rhandle, !!link_state); 488 hdev->hw.mac.link = link_state; 489 } 490 491 clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 492 } 493 494 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 495 { 496 #define HCLGEVF_ADVERTISING 0 497 #define HCLGEVF_SUPPORTED 1 498 499 struct hclge_vf_to_pf_msg send_msg; 500 501 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 502 send_msg.data[0] = HCLGEVF_ADVERTISING; 503 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 504 send_msg.data[0] = HCLGEVF_SUPPORTED; 505 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 506 } 507 508 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 509 { 510 struct hnae3_handle *nic = &hdev->nic; 511 int ret; 512 513 nic->ae_algo = &ae_algovf; 514 nic->pdev = hdev->pdev; 515 nic->numa_node_mask = hdev->numa_node_mask; 516 nic->flags |= HNAE3_SUPPORT_VF; 517 518 ret = hclgevf_knic_setup(hdev); 519 if (ret) 520 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 521 ret); 522 return ret; 523 } 524 525 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 526 { 527 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 528 dev_warn(&hdev->pdev->dev, 529 "vector(vector_id %d) has been freed.\n", vector_id); 530 return; 531 } 532 533 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 534 hdev->num_msi_left += 1; 535 hdev->num_msi_used -= 1; 536 } 537 538 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 539 struct hnae3_vector_info *vector_info) 540 { 541 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 542 struct hnae3_vector_info *vector = vector_info; 543 int alloc = 0; 544 int i, j; 545 546 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 547 vector_num = min(hdev->num_msi_left, vector_num); 548 549 for (j = 0; j < vector_num; j++) { 550 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 551 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 552 vector->vector = pci_irq_vector(hdev->pdev, i); 553 vector->io_addr = hdev->hw.io_base + 554 HCLGEVF_VECTOR_REG_BASE + 555 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 556 hdev->vector_status[i] = 0; 557 hdev->vector_irq[i] = vector->vector; 558 559 vector++; 560 alloc++; 561 562 break; 563 } 564 } 565 } 566 hdev->num_msi_left -= alloc; 567 hdev->num_msi_used += alloc; 568 569 return alloc; 570 } 571 572 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 573 { 574 int i; 575 576 for (i = 0; i < hdev->num_msi; i++) 577 if (vector == hdev->vector_irq[i]) 578 return i; 579 580 return -EINVAL; 581 } 582 583 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 584 const u8 hfunc, const u8 *key) 585 { 586 struct hclgevf_rss_config_cmd *req; 587 unsigned int key_offset = 0; 588 struct hclgevf_desc desc; 589 int key_counts; 590 int key_size; 591 int ret; 592 593 key_counts = HCLGEVF_RSS_KEY_SIZE; 594 req = (struct hclgevf_rss_config_cmd *)desc.data; 595 596 while (key_counts) { 597 hclgevf_cmd_setup_basic_desc(&desc, 598 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 599 false); 600 601 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 602 req->hash_config |= 603 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 604 605 key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 606 memcpy(req->hash_key, 607 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 608 609 key_counts -= key_size; 610 key_offset++; 611 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 612 if (ret) { 613 dev_err(&hdev->pdev->dev, 614 "Configure RSS config fail, status = %d\n", 615 ret); 616 return ret; 617 } 618 } 619 620 return 0; 621 } 622 623 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 624 { 625 return HCLGEVF_RSS_KEY_SIZE; 626 } 627 628 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 629 { 630 return HCLGEVF_RSS_IND_TBL_SIZE; 631 } 632 633 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 634 { 635 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 636 struct hclgevf_rss_indirection_table_cmd *req; 637 struct hclgevf_desc desc; 638 int status; 639 int i, j; 640 641 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 642 643 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 644 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 645 false); 646 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 647 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 648 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 649 req->rss_result[j] = 650 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 651 652 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 653 if (status) { 654 dev_err(&hdev->pdev->dev, 655 "VF failed(=%d) to set RSS indirection table\n", 656 status); 657 return status; 658 } 659 } 660 661 return 0; 662 } 663 664 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 665 { 666 struct hclgevf_rss_tc_mode_cmd *req; 667 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 668 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 669 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 670 struct hclgevf_desc desc; 671 u16 roundup_size; 672 int status; 673 unsigned int i; 674 675 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 676 677 roundup_size = roundup_pow_of_two(rss_size); 678 roundup_size = ilog2(roundup_size); 679 680 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 681 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 682 tc_size[i] = roundup_size; 683 tc_offset[i] = rss_size * i; 684 } 685 686 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 687 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 688 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 689 (tc_valid[i] & 0x1)); 690 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 691 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 692 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 693 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 694 } 695 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 696 if (status) 697 dev_err(&hdev->pdev->dev, 698 "VF failed(=%d) to set rss tc mode\n", status); 699 700 return status; 701 } 702 703 /* for revision 0x20, vf shared the same rss config with pf */ 704 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 705 { 706 #define HCLGEVF_RSS_MBX_RESP_LEN 8 707 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 708 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 709 struct hclge_vf_to_pf_msg send_msg; 710 u16 msg_num, hash_key_index; 711 u8 index; 712 int ret; 713 714 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 715 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 716 HCLGEVF_RSS_MBX_RESP_LEN; 717 for (index = 0; index < msg_num; index++) { 718 send_msg.data[0] = index; 719 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 720 HCLGEVF_RSS_MBX_RESP_LEN); 721 if (ret) { 722 dev_err(&hdev->pdev->dev, 723 "VF get rss hash key from PF failed, ret=%d", 724 ret); 725 return ret; 726 } 727 728 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 729 if (index == msg_num - 1) 730 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 731 &resp_msg[0], 732 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 733 else 734 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 735 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 736 } 737 738 return 0; 739 } 740 741 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 742 u8 *hfunc) 743 { 744 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 745 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 746 int i, ret; 747 748 if (handle->pdev->revision >= 0x21) { 749 /* Get hash algorithm */ 750 if (hfunc) { 751 switch (rss_cfg->hash_algo) { 752 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 753 *hfunc = ETH_RSS_HASH_TOP; 754 break; 755 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 756 *hfunc = ETH_RSS_HASH_XOR; 757 break; 758 default: 759 *hfunc = ETH_RSS_HASH_UNKNOWN; 760 break; 761 } 762 } 763 764 /* Get the RSS Key required by the user */ 765 if (key) 766 memcpy(key, rss_cfg->rss_hash_key, 767 HCLGEVF_RSS_KEY_SIZE); 768 } else { 769 if (hfunc) 770 *hfunc = ETH_RSS_HASH_TOP; 771 if (key) { 772 ret = hclgevf_get_rss_hash_key(hdev); 773 if (ret) 774 return ret; 775 memcpy(key, rss_cfg->rss_hash_key, 776 HCLGEVF_RSS_KEY_SIZE); 777 } 778 } 779 780 if (indir) 781 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 782 indir[i] = rss_cfg->rss_indirection_tbl[i]; 783 784 return 0; 785 } 786 787 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 788 const u8 *key, const u8 hfunc) 789 { 790 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 791 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 792 int ret, i; 793 794 if (handle->pdev->revision >= 0x21) { 795 /* Set the RSS Hash Key if specififed by the user */ 796 if (key) { 797 switch (hfunc) { 798 case ETH_RSS_HASH_TOP: 799 rss_cfg->hash_algo = 800 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 801 break; 802 case ETH_RSS_HASH_XOR: 803 rss_cfg->hash_algo = 804 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 805 break; 806 case ETH_RSS_HASH_NO_CHANGE: 807 break; 808 default: 809 return -EINVAL; 810 } 811 812 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 813 key); 814 if (ret) 815 return ret; 816 817 /* Update the shadow RSS key with user specified qids */ 818 memcpy(rss_cfg->rss_hash_key, key, 819 HCLGEVF_RSS_KEY_SIZE); 820 } 821 } 822 823 /* update the shadow RSS table with user specified qids */ 824 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 825 rss_cfg->rss_indirection_tbl[i] = indir[i]; 826 827 /* update the hardware */ 828 return hclgevf_set_rss_indir_table(hdev); 829 } 830 831 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 832 { 833 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 834 835 if (nfc->data & RXH_L4_B_2_3) 836 hash_sets |= HCLGEVF_D_PORT_BIT; 837 else 838 hash_sets &= ~HCLGEVF_D_PORT_BIT; 839 840 if (nfc->data & RXH_IP_SRC) 841 hash_sets |= HCLGEVF_S_IP_BIT; 842 else 843 hash_sets &= ~HCLGEVF_S_IP_BIT; 844 845 if (nfc->data & RXH_IP_DST) 846 hash_sets |= HCLGEVF_D_IP_BIT; 847 else 848 hash_sets &= ~HCLGEVF_D_IP_BIT; 849 850 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 851 hash_sets |= HCLGEVF_V_TAG_BIT; 852 853 return hash_sets; 854 } 855 856 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 857 struct ethtool_rxnfc *nfc) 858 { 859 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 860 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 861 struct hclgevf_rss_input_tuple_cmd *req; 862 struct hclgevf_desc desc; 863 u8 tuple_sets; 864 int ret; 865 866 if (handle->pdev->revision == 0x20) 867 return -EOPNOTSUPP; 868 869 if (nfc->data & 870 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 871 return -EINVAL; 872 873 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 874 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 875 876 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 877 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 878 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 879 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 880 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 881 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 882 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 883 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 884 885 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 886 switch (nfc->flow_type) { 887 case TCP_V4_FLOW: 888 req->ipv4_tcp_en = tuple_sets; 889 break; 890 case TCP_V6_FLOW: 891 req->ipv6_tcp_en = tuple_sets; 892 break; 893 case UDP_V4_FLOW: 894 req->ipv4_udp_en = tuple_sets; 895 break; 896 case UDP_V6_FLOW: 897 req->ipv6_udp_en = tuple_sets; 898 break; 899 case SCTP_V4_FLOW: 900 req->ipv4_sctp_en = tuple_sets; 901 break; 902 case SCTP_V6_FLOW: 903 if ((nfc->data & RXH_L4_B_0_1) || 904 (nfc->data & RXH_L4_B_2_3)) 905 return -EINVAL; 906 907 req->ipv6_sctp_en = tuple_sets; 908 break; 909 case IPV4_FLOW: 910 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 911 break; 912 case IPV6_FLOW: 913 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 914 break; 915 default: 916 return -EINVAL; 917 } 918 919 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 920 if (ret) { 921 dev_err(&hdev->pdev->dev, 922 "Set rss tuple fail, status = %d\n", ret); 923 return ret; 924 } 925 926 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 927 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 928 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 929 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 930 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 931 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 932 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 933 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 934 return 0; 935 } 936 937 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 938 struct ethtool_rxnfc *nfc) 939 { 940 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 941 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 942 u8 tuple_sets; 943 944 if (handle->pdev->revision == 0x20) 945 return -EOPNOTSUPP; 946 947 nfc->data = 0; 948 949 switch (nfc->flow_type) { 950 case TCP_V4_FLOW: 951 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 952 break; 953 case UDP_V4_FLOW: 954 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 955 break; 956 case TCP_V6_FLOW: 957 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 958 break; 959 case UDP_V6_FLOW: 960 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 961 break; 962 case SCTP_V4_FLOW: 963 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 964 break; 965 case SCTP_V6_FLOW: 966 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 967 break; 968 case IPV4_FLOW: 969 case IPV6_FLOW: 970 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 971 break; 972 default: 973 return -EINVAL; 974 } 975 976 if (!tuple_sets) 977 return 0; 978 979 if (tuple_sets & HCLGEVF_D_PORT_BIT) 980 nfc->data |= RXH_L4_B_2_3; 981 if (tuple_sets & HCLGEVF_S_PORT_BIT) 982 nfc->data |= RXH_L4_B_0_1; 983 if (tuple_sets & HCLGEVF_D_IP_BIT) 984 nfc->data |= RXH_IP_DST; 985 if (tuple_sets & HCLGEVF_S_IP_BIT) 986 nfc->data |= RXH_IP_SRC; 987 988 return 0; 989 } 990 991 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 992 struct hclgevf_rss_cfg *rss_cfg) 993 { 994 struct hclgevf_rss_input_tuple_cmd *req; 995 struct hclgevf_desc desc; 996 int ret; 997 998 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 999 1000 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 1001 1002 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 1003 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 1004 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 1005 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 1006 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 1007 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 1008 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 1009 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 1010 1011 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1012 if (ret) 1013 dev_err(&hdev->pdev->dev, 1014 "Configure rss input fail, status = %d\n", ret); 1015 return ret; 1016 } 1017 1018 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 1019 { 1020 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1021 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1022 1023 return rss_cfg->rss_size; 1024 } 1025 1026 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 1027 int vector_id, 1028 struct hnae3_ring_chain_node *ring_chain) 1029 { 1030 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1031 struct hclge_vf_to_pf_msg send_msg; 1032 struct hnae3_ring_chain_node *node; 1033 int status; 1034 int i = 0; 1035 1036 memset(&send_msg, 0, sizeof(send_msg)); 1037 send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 1038 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1039 send_msg.vector_id = vector_id; 1040 1041 for (node = ring_chain; node; node = node->next) { 1042 send_msg.param[i].ring_type = 1043 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1044 1045 send_msg.param[i].tqp_index = node->tqp_index; 1046 send_msg.param[i].int_gl_index = 1047 hnae3_get_field(node->int_gl_idx, 1048 HNAE3_RING_GL_IDX_M, 1049 HNAE3_RING_GL_IDX_S); 1050 1051 i++; 1052 if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 1053 send_msg.ring_num = i; 1054 1055 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 1056 NULL, 0); 1057 if (status) { 1058 dev_err(&hdev->pdev->dev, 1059 "Map TQP fail, status is %d.\n", 1060 status); 1061 return status; 1062 } 1063 i = 0; 1064 } 1065 } 1066 1067 return 0; 1068 } 1069 1070 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1071 struct hnae3_ring_chain_node *ring_chain) 1072 { 1073 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1074 int vector_id; 1075 1076 vector_id = hclgevf_get_vector_index(hdev, vector); 1077 if (vector_id < 0) { 1078 dev_err(&handle->pdev->dev, 1079 "Get vector index fail. ret =%d\n", vector_id); 1080 return vector_id; 1081 } 1082 1083 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1084 } 1085 1086 static int hclgevf_unmap_ring_from_vector( 1087 struct hnae3_handle *handle, 1088 int vector, 1089 struct hnae3_ring_chain_node *ring_chain) 1090 { 1091 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1092 int ret, vector_id; 1093 1094 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1095 return 0; 1096 1097 vector_id = hclgevf_get_vector_index(hdev, vector); 1098 if (vector_id < 0) { 1099 dev_err(&handle->pdev->dev, 1100 "Get vector index fail. ret =%d\n", vector_id); 1101 return vector_id; 1102 } 1103 1104 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1105 if (ret) 1106 dev_err(&handle->pdev->dev, 1107 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1108 vector_id, 1109 ret); 1110 1111 return ret; 1112 } 1113 1114 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1115 { 1116 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1117 int vector_id; 1118 1119 vector_id = hclgevf_get_vector_index(hdev, vector); 1120 if (vector_id < 0) { 1121 dev_err(&handle->pdev->dev, 1122 "hclgevf_put_vector get vector index fail. ret =%d\n", 1123 vector_id); 1124 return vector_id; 1125 } 1126 1127 hclgevf_free_vector(hdev, vector_id); 1128 1129 return 0; 1130 } 1131 1132 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1133 bool en_uc_pmc, bool en_mc_pmc, 1134 bool en_bc_pmc) 1135 { 1136 struct hclge_vf_to_pf_msg send_msg; 1137 int ret; 1138 1139 memset(&send_msg, 0, sizeof(send_msg)); 1140 send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 1141 send_msg.en_bc = en_bc_pmc ? 1 : 0; 1142 send_msg.en_uc = en_uc_pmc ? 1 : 0; 1143 send_msg.en_mc = en_mc_pmc ? 1 : 0; 1144 1145 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1146 1147 if (ret) 1148 dev_err(&hdev->pdev->dev, 1149 "Set promisc mode fail, status is %d.\n", ret); 1150 1151 return ret; 1152 } 1153 1154 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 1155 bool en_mc_pmc) 1156 { 1157 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1158 struct pci_dev *pdev = hdev->pdev; 1159 bool en_bc_pmc; 1160 1161 en_bc_pmc = pdev->revision != 0x20; 1162 1163 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 1164 en_bc_pmc); 1165 } 1166 1167 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, 1168 int stream_id, bool enable) 1169 { 1170 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1171 struct hclgevf_desc desc; 1172 int status; 1173 1174 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1175 1176 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1177 false); 1178 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1179 req->stream_id = cpu_to_le16(stream_id); 1180 if (enable) 1181 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1182 1183 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1184 if (status) 1185 dev_err(&hdev->pdev->dev, 1186 "TQP enable fail, status =%d.\n", status); 1187 1188 return status; 1189 } 1190 1191 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1192 { 1193 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1194 struct hclgevf_tqp *tqp; 1195 int i; 1196 1197 for (i = 0; i < kinfo->num_tqps; i++) { 1198 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1199 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1200 } 1201 } 1202 1203 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 1204 { 1205 struct hclge_vf_to_pf_msg send_msg; 1206 u8 host_mac[ETH_ALEN]; 1207 int status; 1208 1209 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 1210 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 1211 ETH_ALEN); 1212 if (status) { 1213 dev_err(&hdev->pdev->dev, 1214 "fail to get VF MAC from host %d", status); 1215 return status; 1216 } 1217 1218 ether_addr_copy(p, host_mac); 1219 1220 return 0; 1221 } 1222 1223 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1224 { 1225 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1226 u8 host_mac_addr[ETH_ALEN]; 1227 1228 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 1229 return; 1230 1231 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 1232 if (hdev->has_pf_mac) 1233 ether_addr_copy(p, host_mac_addr); 1234 else 1235 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1236 } 1237 1238 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1239 bool is_first) 1240 { 1241 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1242 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1243 struct hclge_vf_to_pf_msg send_msg; 1244 u8 *new_mac_addr = (u8 *)p; 1245 int status; 1246 1247 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 1248 send_msg.subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 1249 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1250 ether_addr_copy(send_msg.data, new_mac_addr); 1251 ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 1252 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1253 if (!status) 1254 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1255 1256 return status; 1257 } 1258 1259 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1260 const unsigned char *addr) 1261 { 1262 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1263 struct hclge_vf_to_pf_msg send_msg; 1264 1265 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 1266 HCLGE_MBX_MAC_VLAN_UC_ADD); 1267 ether_addr_copy(send_msg.data, addr); 1268 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1269 } 1270 1271 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1272 const unsigned char *addr) 1273 { 1274 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1275 struct hclge_vf_to_pf_msg send_msg; 1276 1277 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 1278 HCLGE_MBX_MAC_VLAN_UC_REMOVE); 1279 ether_addr_copy(send_msg.data, addr); 1280 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1281 } 1282 1283 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1284 const unsigned char *addr) 1285 { 1286 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1287 struct hclge_vf_to_pf_msg send_msg; 1288 1289 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MULTICAST, 1290 HCLGE_MBX_MAC_VLAN_MC_ADD); 1291 ether_addr_copy(send_msg.data, addr); 1292 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1293 } 1294 1295 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1296 const unsigned char *addr) 1297 { 1298 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1299 struct hclge_vf_to_pf_msg send_msg; 1300 1301 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MULTICAST, 1302 HCLGE_MBX_MAC_VLAN_MC_REMOVE); 1303 ether_addr_copy(send_msg.data, addr); 1304 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1305 } 1306 1307 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1308 __be16 proto, u16 vlan_id, 1309 bool is_kill) 1310 { 1311 #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 1312 #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 1313 #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 1314 1315 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1316 struct hclge_vf_to_pf_msg send_msg; 1317 int ret; 1318 1319 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1320 return -EINVAL; 1321 1322 if (proto != htons(ETH_P_8021Q)) 1323 return -EPROTONOSUPPORT; 1324 1325 /* When device is resetting, firmware is unable to handle 1326 * mailbox. Just record the vlan id, and remove it after 1327 * reset finished. 1328 */ 1329 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) { 1330 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1331 return -EBUSY; 1332 } 1333 1334 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1335 HCLGE_MBX_VLAN_FILTER); 1336 send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; 1337 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, 1338 sizeof(vlan_id)); 1339 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, 1340 sizeof(proto)); 1341 /* when remove hw vlan filter failed, record the vlan id, 1342 * and try to remove it from hw later, to be consistence 1343 * with stack. 1344 */ 1345 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1346 if (is_kill && ret) 1347 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1348 1349 return ret; 1350 } 1351 1352 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1353 { 1354 #define HCLGEVF_MAX_SYNC_COUNT 60 1355 struct hnae3_handle *handle = &hdev->nic; 1356 int ret, sync_cnt = 0; 1357 u16 vlan_id; 1358 1359 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1360 while (vlan_id != VLAN_N_VID) { 1361 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1362 vlan_id, true); 1363 if (ret) 1364 return; 1365 1366 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1367 sync_cnt++; 1368 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1369 return; 1370 1371 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1372 } 1373 } 1374 1375 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1376 { 1377 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1378 struct hclge_vf_to_pf_msg send_msg; 1379 1380 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1381 HCLGE_MBX_VLAN_RX_OFF_CFG); 1382 send_msg.data[0] = enable ? 1 : 0; 1383 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1384 } 1385 1386 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1387 { 1388 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1389 struct hclge_vf_to_pf_msg send_msg; 1390 int ret; 1391 1392 /* disable vf queue before send queue reset msg to PF */ 1393 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1394 if (ret) 1395 return ret; 1396 1397 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1398 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 1399 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1400 } 1401 1402 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1403 { 1404 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1405 struct hclge_vf_to_pf_msg send_msg; 1406 1407 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1408 memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); 1409 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1410 } 1411 1412 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1413 enum hnae3_reset_notify_type type) 1414 { 1415 struct hnae3_client *client = hdev->nic_client; 1416 struct hnae3_handle *handle = &hdev->nic; 1417 int ret; 1418 1419 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1420 !client) 1421 return 0; 1422 1423 if (!client->ops->reset_notify) 1424 return -EOPNOTSUPP; 1425 1426 ret = client->ops->reset_notify(handle, type); 1427 if (ret) 1428 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1429 type, ret); 1430 1431 return ret; 1432 } 1433 1434 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1435 { 1436 #define HCLGEVF_RESET_WAIT_US 20000 1437 #define HCLGEVF_RESET_WAIT_CNT 2000 1438 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1439 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1440 1441 u32 val; 1442 int ret; 1443 1444 if (hdev->reset_type == HNAE3_VF_RESET) 1445 ret = readl_poll_timeout(hdev->hw.io_base + 1446 HCLGEVF_VF_RST_ING, val, 1447 !(val & HCLGEVF_VF_RST_ING_BIT), 1448 HCLGEVF_RESET_WAIT_US, 1449 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1450 else 1451 ret = readl_poll_timeout(hdev->hw.io_base + 1452 HCLGEVF_RST_ING, val, 1453 !(val & HCLGEVF_RST_ING_BITS), 1454 HCLGEVF_RESET_WAIT_US, 1455 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1456 1457 /* hardware completion status should be available by this time */ 1458 if (ret) { 1459 dev_err(&hdev->pdev->dev, 1460 "could'nt get reset done status from h/w, timeout!\n"); 1461 return ret; 1462 } 1463 1464 /* we will wait a bit more to let reset of the stack to complete. This 1465 * might happen in case reset assertion was made by PF. Yes, this also 1466 * means we might end up waiting bit more even for VF reset. 1467 */ 1468 msleep(5000); 1469 1470 return 0; 1471 } 1472 1473 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 1474 { 1475 u32 reg_val; 1476 1477 reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); 1478 if (enable) 1479 reg_val |= HCLGEVF_NIC_SW_RST_RDY; 1480 else 1481 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 1482 1483 hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 1484 reg_val); 1485 } 1486 1487 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1488 { 1489 int ret; 1490 1491 /* uninitialize the nic client */ 1492 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1493 if (ret) 1494 return ret; 1495 1496 /* re-initialize the hclge device */ 1497 ret = hclgevf_reset_hdev(hdev); 1498 if (ret) { 1499 dev_err(&hdev->pdev->dev, 1500 "hclge device re-init failed, VF is disabled!\n"); 1501 return ret; 1502 } 1503 1504 /* bring up the nic client again */ 1505 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1506 if (ret) 1507 return ret; 1508 1509 ret = hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); 1510 if (ret) 1511 return ret; 1512 1513 /* clear handshake status with IMP */ 1514 hclgevf_reset_handshake(hdev, false); 1515 1516 /* bring up the nic to enable TX/RX again */ 1517 return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1518 } 1519 1520 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1521 { 1522 #define HCLGEVF_RESET_SYNC_TIME 100 1523 1524 struct hclge_vf_to_pf_msg send_msg; 1525 int ret = 0; 1526 1527 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1528 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1529 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1530 hdev->rst_stats.vf_func_rst_cnt++; 1531 } 1532 1533 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1534 /* inform hardware that preparatory work is done */ 1535 msleep(HCLGEVF_RESET_SYNC_TIME); 1536 hclgevf_reset_handshake(hdev, true); 1537 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1538 hdev->reset_type, ret); 1539 1540 return ret; 1541 } 1542 1543 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 1544 { 1545 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 1546 hdev->rst_stats.vf_func_rst_cnt); 1547 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1548 hdev->rst_stats.flr_rst_cnt); 1549 dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 1550 hdev->rst_stats.vf_rst_cnt); 1551 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1552 hdev->rst_stats.rst_done_cnt); 1553 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1554 hdev->rst_stats.hw_rst_done_cnt); 1555 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1556 hdev->rst_stats.rst_cnt); 1557 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1558 hdev->rst_stats.rst_fail_cnt); 1559 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1560 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 1561 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 1562 hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STAT_REG)); 1563 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1564 hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); 1565 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1566 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 1567 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1568 } 1569 1570 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1571 { 1572 /* recover handshake status with IMP when reset fail */ 1573 hclgevf_reset_handshake(hdev, true); 1574 hdev->rst_stats.rst_fail_cnt++; 1575 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1576 hdev->rst_stats.rst_fail_cnt); 1577 1578 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1579 set_bit(hdev->reset_type, &hdev->reset_pending); 1580 1581 if (hclgevf_is_reset_pending(hdev)) { 1582 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1583 hclgevf_reset_task_schedule(hdev); 1584 } else { 1585 set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1586 hclgevf_dump_rst_info(hdev); 1587 } 1588 } 1589 1590 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 1591 { 1592 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1593 int ret; 1594 1595 /* Initialize ae_dev reset status as well, in case enet layer wants to 1596 * know if device is undergoing reset 1597 */ 1598 ae_dev->reset_type = hdev->reset_type; 1599 hdev->rst_stats.rst_cnt++; 1600 1601 rtnl_lock(); 1602 /* bring down the nic to stop any ongoing TX/RX */ 1603 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1604 rtnl_unlock(); 1605 if (ret) 1606 return ret; 1607 1608 return hclgevf_reset_prepare_wait(hdev); 1609 } 1610 1611 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 1612 { 1613 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1614 int ret; 1615 1616 hdev->rst_stats.hw_rst_done_cnt++; 1617 1618 rtnl_lock(); 1619 /* now, re-initialize the nic client and ae device */ 1620 ret = hclgevf_reset_stack(hdev); 1621 rtnl_unlock(); 1622 if (ret) { 1623 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1624 return ret; 1625 } 1626 1627 hdev->last_reset_time = jiffies; 1628 ae_dev->reset_type = HNAE3_NONE_RESET; 1629 hdev->rst_stats.rst_done_cnt++; 1630 hdev->rst_stats.rst_fail_cnt = 0; 1631 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1632 1633 return 0; 1634 } 1635 1636 static void hclgevf_reset(struct hclgevf_dev *hdev) 1637 { 1638 if (hclgevf_reset_prepare(hdev)) 1639 goto err_reset; 1640 1641 /* check if VF could successfully fetch the hardware reset completion 1642 * status from the hardware 1643 */ 1644 if (hclgevf_reset_wait(hdev)) { 1645 /* can't do much in this situation, will disable VF */ 1646 dev_err(&hdev->pdev->dev, 1647 "failed to fetch H/W reset completion status\n"); 1648 goto err_reset; 1649 } 1650 1651 if (hclgevf_reset_rebuild(hdev)) 1652 goto err_reset; 1653 1654 return; 1655 1656 err_reset: 1657 hclgevf_reset_err_handle(hdev); 1658 } 1659 1660 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1661 unsigned long *addr) 1662 { 1663 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1664 1665 /* return the highest priority reset level amongst all */ 1666 if (test_bit(HNAE3_VF_RESET, addr)) { 1667 rst_level = HNAE3_VF_RESET; 1668 clear_bit(HNAE3_VF_RESET, addr); 1669 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1670 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1671 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1672 rst_level = HNAE3_VF_FULL_RESET; 1673 clear_bit(HNAE3_VF_FULL_RESET, addr); 1674 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1675 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1676 rst_level = HNAE3_VF_PF_FUNC_RESET; 1677 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1678 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1679 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1680 rst_level = HNAE3_VF_FUNC_RESET; 1681 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1682 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1683 rst_level = HNAE3_FLR_RESET; 1684 clear_bit(HNAE3_FLR_RESET, addr); 1685 } 1686 1687 return rst_level; 1688 } 1689 1690 static void hclgevf_reset_event(struct pci_dev *pdev, 1691 struct hnae3_handle *handle) 1692 { 1693 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1694 struct hclgevf_dev *hdev = ae_dev->priv; 1695 1696 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1697 1698 if (hdev->default_reset_request) 1699 hdev->reset_level = 1700 hclgevf_get_reset_level(hdev, 1701 &hdev->default_reset_request); 1702 else 1703 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1704 1705 /* reset of this VF requested */ 1706 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1707 hclgevf_reset_task_schedule(hdev); 1708 1709 hdev->last_reset_time = jiffies; 1710 } 1711 1712 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1713 enum hnae3_reset_type rst_type) 1714 { 1715 struct hclgevf_dev *hdev = ae_dev->priv; 1716 1717 set_bit(rst_type, &hdev->default_reset_request); 1718 } 1719 1720 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1721 { 1722 writel(en ? 1 : 0, vector->addr); 1723 } 1724 1725 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1726 { 1727 #define HCLGEVF_FLR_RETRY_WAIT_MS 500 1728 #define HCLGEVF_FLR_RETRY_CNT 5 1729 1730 struct hclgevf_dev *hdev = ae_dev->priv; 1731 int retry_cnt = 0; 1732 int ret; 1733 1734 retry: 1735 down(&hdev->reset_sem); 1736 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1737 hdev->reset_type = HNAE3_FLR_RESET; 1738 ret = hclgevf_reset_prepare(hdev); 1739 if (ret) { 1740 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", 1741 ret); 1742 if (hdev->reset_pending || 1743 retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) { 1744 dev_err(&hdev->pdev->dev, 1745 "reset_pending:0x%lx, retry_cnt:%d\n", 1746 hdev->reset_pending, retry_cnt); 1747 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1748 up(&hdev->reset_sem); 1749 msleep(HCLGEVF_FLR_RETRY_WAIT_MS); 1750 goto retry; 1751 } 1752 } 1753 1754 /* disable misc vector before FLR done */ 1755 hclgevf_enable_vector(&hdev->misc_vector, false); 1756 hdev->rst_stats.flr_rst_cnt++; 1757 } 1758 1759 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 1760 { 1761 struct hclgevf_dev *hdev = ae_dev->priv; 1762 int ret; 1763 1764 hclgevf_enable_vector(&hdev->misc_vector, true); 1765 1766 ret = hclgevf_reset_rebuild(hdev); 1767 if (ret) 1768 dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 1769 ret); 1770 1771 hdev->reset_type = HNAE3_NONE_RESET; 1772 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1773 up(&hdev->reset_sem); 1774 } 1775 1776 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1777 { 1778 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1779 1780 return hdev->fw_version; 1781 } 1782 1783 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1784 { 1785 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1786 1787 vector->vector_irq = pci_irq_vector(hdev->pdev, 1788 HCLGEVF_MISC_VECTOR_NUM); 1789 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1790 /* vector status always valid for Vector 0 */ 1791 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1792 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1793 1794 hdev->num_msi_left -= 1; 1795 hdev->num_msi_used += 1; 1796 } 1797 1798 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1799 { 1800 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 1801 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 1802 &hdev->state)) 1803 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 1804 } 1805 1806 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1807 { 1808 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 1809 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 1810 &hdev->state)) 1811 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 1812 } 1813 1814 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 1815 unsigned long delay) 1816 { 1817 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 1818 !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 1819 mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 1820 } 1821 1822 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 1823 { 1824 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 1825 1826 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 1827 return; 1828 1829 down(&hdev->reset_sem); 1830 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1831 1832 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1833 &hdev->reset_state)) { 1834 /* PF has initmated that it is about to reset the hardware. 1835 * We now have to poll & check if hardware has actually 1836 * completed the reset sequence. On hardware reset completion, 1837 * VF needs to reset the client and ae device. 1838 */ 1839 hdev->reset_attempts = 0; 1840 1841 hdev->last_reset_time = jiffies; 1842 while ((hdev->reset_type = 1843 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 1844 != HNAE3_NONE_RESET) 1845 hclgevf_reset(hdev); 1846 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1847 &hdev->reset_state)) { 1848 /* we could be here when either of below happens: 1849 * 1. reset was initiated due to watchdog timeout caused by 1850 * a. IMP was earlier reset and our TX got choked down and 1851 * which resulted in watchdog reacting and inducing VF 1852 * reset. This also means our cmdq would be unreliable. 1853 * b. problem in TX due to other lower layer(example link 1854 * layer not functioning properly etc.) 1855 * 2. VF reset might have been initiated due to some config 1856 * change. 1857 * 1858 * NOTE: Theres no clear way to detect above cases than to react 1859 * to the response of PF for this reset request. PF will ack the 1860 * 1b and 2. cases but we will not get any intimation about 1a 1861 * from PF as cmdq would be in unreliable state i.e. mailbox 1862 * communication between PF and VF would be broken. 1863 * 1864 * if we are never geting into pending state it means either: 1865 * 1. PF is not receiving our request which could be due to IMP 1866 * reset 1867 * 2. PF is screwed 1868 * We cannot do much for 2. but to check first we can try reset 1869 * our PCIe + stack and see if it alleviates the problem. 1870 */ 1871 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 1872 /* prepare for full reset of stack + pcie interface */ 1873 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1874 1875 /* "defer" schedule the reset task again */ 1876 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1877 } else { 1878 hdev->reset_attempts++; 1879 1880 set_bit(hdev->reset_level, &hdev->reset_pending); 1881 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1882 } 1883 hclgevf_reset_task_schedule(hdev); 1884 } 1885 1886 hdev->reset_type = HNAE3_NONE_RESET; 1887 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1888 up(&hdev->reset_sem); 1889 } 1890 1891 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 1892 { 1893 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 1894 return; 1895 1896 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1897 return; 1898 1899 hclgevf_mbx_async_handler(hdev); 1900 1901 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1902 } 1903 1904 static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 1905 { 1906 struct hclge_vf_to_pf_msg send_msg; 1907 int ret; 1908 1909 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) 1910 return; 1911 1912 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 1913 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1914 if (ret) 1915 dev_err(&hdev->pdev->dev, 1916 "VF sends keep alive cmd failed(=%d)\n", ret); 1917 } 1918 1919 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 1920 { 1921 unsigned long delta = round_jiffies_relative(HZ); 1922 struct hnae3_handle *handle = &hdev->nic; 1923 1924 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 1925 delta = jiffies - hdev->last_serv_processed; 1926 1927 if (delta < round_jiffies_relative(HZ)) { 1928 delta = round_jiffies_relative(HZ) - delta; 1929 goto out; 1930 } 1931 } 1932 1933 hdev->serv_processed_cnt++; 1934 if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 1935 hclgevf_keep_alive(hdev); 1936 1937 if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 1938 hdev->last_serv_processed = jiffies; 1939 goto out; 1940 } 1941 1942 if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 1943 hclgevf_tqps_update_stats(handle); 1944 1945 /* request the link status from the PF. PF would be able to tell VF 1946 * about such updates in future so we might remove this later 1947 */ 1948 hclgevf_request_link_info(hdev); 1949 1950 hclgevf_update_link_mode(hdev); 1951 1952 hclgevf_sync_vlan_filter(hdev); 1953 1954 hdev->last_serv_processed = jiffies; 1955 1956 out: 1957 hclgevf_task_schedule(hdev, delta); 1958 } 1959 1960 static void hclgevf_service_task(struct work_struct *work) 1961 { 1962 struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 1963 service_task.work); 1964 1965 hclgevf_reset_service_task(hdev); 1966 hclgevf_mailbox_service_task(hdev); 1967 hclgevf_periodic_service_task(hdev); 1968 1969 /* Handle reset and mbx again in case periodical task delays the 1970 * handling by calling hclgevf_task_schedule() in 1971 * hclgevf_periodic_service_task() 1972 */ 1973 hclgevf_reset_service_task(hdev); 1974 hclgevf_mailbox_service_task(hdev); 1975 } 1976 1977 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1978 { 1979 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1980 } 1981 1982 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1983 u32 *clearval) 1984 { 1985 u32 val, cmdq_stat_reg, rst_ing_reg; 1986 1987 /* fetch the events from their corresponding regs */ 1988 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 1989 HCLGEVF_VECTOR0_CMDQ_STAT_REG); 1990 1991 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 1992 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1993 dev_info(&hdev->pdev->dev, 1994 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1995 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1996 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1997 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1998 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 1999 hdev->rst_stats.vf_rst_cnt++; 2000 /* set up VF hardware reset status, its PF will clear 2001 * this status when PF has initialized done. 2002 */ 2003 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 2004 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 2005 val | HCLGEVF_VF_RST_ING_BIT); 2006 return HCLGEVF_VECTOR0_EVENT_RST; 2007 } 2008 2009 /* check for vector0 mailbox(=CMDQ RX) event source */ 2010 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 2011 /* for revision 0x21, clearing interrupt is writing bit 0 2012 * to the clear register, writing bit 1 means to keep the 2013 * old value. 2014 * for revision 0x20, the clear register is a read & write 2015 * register, so we should just write 0 to the bit we are 2016 * handling, and keep other bits as cmdq_stat_reg. 2017 */ 2018 if (hdev->pdev->revision >= 0x21) 2019 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2020 else 2021 *clearval = cmdq_stat_reg & 2022 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2023 2024 return HCLGEVF_VECTOR0_EVENT_MBX; 2025 } 2026 2027 /* print other vector0 event source */ 2028 dev_info(&hdev->pdev->dev, 2029 "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2030 cmdq_stat_reg); 2031 2032 return HCLGEVF_VECTOR0_EVENT_OTHER; 2033 } 2034 2035 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2036 { 2037 enum hclgevf_evt_cause event_cause; 2038 struct hclgevf_dev *hdev = data; 2039 u32 clearval; 2040 2041 hclgevf_enable_vector(&hdev->misc_vector, false); 2042 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2043 2044 switch (event_cause) { 2045 case HCLGEVF_VECTOR0_EVENT_RST: 2046 hclgevf_reset_task_schedule(hdev); 2047 break; 2048 case HCLGEVF_VECTOR0_EVENT_MBX: 2049 hclgevf_mbx_handler(hdev); 2050 break; 2051 default: 2052 break; 2053 } 2054 2055 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 2056 hclgevf_clear_event_cause(hdev, clearval); 2057 hclgevf_enable_vector(&hdev->misc_vector, true); 2058 } 2059 2060 return IRQ_HANDLED; 2061 } 2062 2063 static int hclgevf_configure(struct hclgevf_dev *hdev) 2064 { 2065 int ret; 2066 2067 /* get current port based vlan state from PF */ 2068 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2069 if (ret) 2070 return ret; 2071 2072 /* get queue configuration from PF */ 2073 ret = hclgevf_get_queue_info(hdev); 2074 if (ret) 2075 return ret; 2076 2077 /* get queue depth info from PF */ 2078 ret = hclgevf_get_queue_depth(hdev); 2079 if (ret) 2080 return ret; 2081 2082 ret = hclgevf_get_pf_media_type(hdev); 2083 if (ret) 2084 return ret; 2085 2086 /* get tc configuration from PF */ 2087 return hclgevf_get_tc_info(hdev); 2088 } 2089 2090 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 2091 { 2092 struct pci_dev *pdev = ae_dev->pdev; 2093 struct hclgevf_dev *hdev; 2094 2095 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 2096 if (!hdev) 2097 return -ENOMEM; 2098 2099 hdev->pdev = pdev; 2100 hdev->ae_dev = ae_dev; 2101 ae_dev->priv = hdev; 2102 2103 return 0; 2104 } 2105 2106 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2107 { 2108 struct hnae3_handle *roce = &hdev->roce; 2109 struct hnae3_handle *nic = &hdev->nic; 2110 2111 roce->rinfo.num_vectors = hdev->num_roce_msix; 2112 2113 if (hdev->num_msi_left < roce->rinfo.num_vectors || 2114 hdev->num_msi_left == 0) 2115 return -EINVAL; 2116 2117 roce->rinfo.base_vector = hdev->roce_base_vector; 2118 2119 roce->rinfo.netdev = nic->kinfo.netdev; 2120 roce->rinfo.roce_io_base = hdev->hw.io_base; 2121 2122 roce->pdev = nic->pdev; 2123 roce->ae_algo = nic->ae_algo; 2124 roce->numa_node_mask = nic->numa_node_mask; 2125 2126 return 0; 2127 } 2128 2129 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 2130 { 2131 struct hclgevf_cfg_gro_status_cmd *req; 2132 struct hclgevf_desc desc; 2133 int ret; 2134 2135 if (!hnae3_dev_gro_supported(hdev)) 2136 return 0; 2137 2138 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2139 false); 2140 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2141 2142 req->gro_en = cpu_to_le16(en ? 1 : 0); 2143 2144 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2145 if (ret) 2146 dev_err(&hdev->pdev->dev, 2147 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2148 2149 return ret; 2150 } 2151 2152 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2153 { 2154 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2155 int ret; 2156 u32 i; 2157 2158 rss_cfg->rss_size = hdev->nic.kinfo.rss_size; 2159 2160 if (hdev->pdev->revision >= 0x21) { 2161 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 2162 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2163 HCLGEVF_RSS_KEY_SIZE); 2164 2165 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 2166 rss_cfg->rss_hash_key); 2167 if (ret) 2168 return ret; 2169 2170 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 2171 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2172 rss_cfg->rss_tuple_sets.ipv4_udp_en = 2173 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2174 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 2175 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2176 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 2177 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2178 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 2179 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2180 rss_cfg->rss_tuple_sets.ipv6_udp_en = 2181 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2182 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 2183 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2184 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 2185 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2186 2187 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2188 if (ret) 2189 return ret; 2190 } 2191 2192 /* Initialize RSS indirect table */ 2193 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 2194 rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 2195 2196 ret = hclgevf_set_rss_indir_table(hdev); 2197 if (ret) 2198 return ret; 2199 2200 return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); 2201 } 2202 2203 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2204 { 2205 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2206 false); 2207 } 2208 2209 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2210 { 2211 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2212 2213 unsigned long last = hdev->serv_processed_cnt; 2214 int i = 0; 2215 2216 while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2217 i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2218 last == hdev->serv_processed_cnt) 2219 usleep_range(1, 1); 2220 } 2221 2222 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2223 { 2224 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2225 2226 if (enable) { 2227 hclgevf_task_schedule(hdev, 0); 2228 } else { 2229 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2230 2231 /* flush memory to make sure DOWN is seen by service task */ 2232 smp_mb__before_atomic(); 2233 hclgevf_flush_link_update(hdev); 2234 } 2235 } 2236 2237 static int hclgevf_ae_start(struct hnae3_handle *handle) 2238 { 2239 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2240 2241 hclgevf_reset_tqp_stats(handle); 2242 2243 hclgevf_request_link_info(hdev); 2244 2245 hclgevf_update_link_mode(hdev); 2246 2247 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2248 2249 return 0; 2250 } 2251 2252 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2253 { 2254 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2255 int i; 2256 2257 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2258 2259 if (hdev->reset_type != HNAE3_VF_RESET) 2260 for (i = 0; i < handle->kinfo.num_tqps; i++) 2261 if (hclgevf_reset_tqp(handle, i)) 2262 break; 2263 2264 hclgevf_reset_tqp_stats(handle); 2265 hclgevf_update_link_status(hdev, 0); 2266 } 2267 2268 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2269 { 2270 #define HCLGEVF_STATE_ALIVE 1 2271 #define HCLGEVF_STATE_NOT_ALIVE 0 2272 2273 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2274 struct hclge_vf_to_pf_msg send_msg; 2275 2276 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2277 send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2278 HCLGEVF_STATE_NOT_ALIVE; 2279 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2280 } 2281 2282 static int hclgevf_client_start(struct hnae3_handle *handle) 2283 { 2284 int ret; 2285 2286 ret = hclgevf_set_alive(handle, true); 2287 if (ret) 2288 return ret; 2289 2290 return 0; 2291 } 2292 2293 static void hclgevf_client_stop(struct hnae3_handle *handle) 2294 { 2295 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2296 int ret; 2297 2298 ret = hclgevf_set_alive(handle, false); 2299 if (ret) 2300 dev_warn(&hdev->pdev->dev, 2301 "%s failed %d\n", __func__, ret); 2302 } 2303 2304 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2305 { 2306 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2307 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2308 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2309 2310 INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 2311 2312 mutex_init(&hdev->mbx_resp.mbx_mutex); 2313 sema_init(&hdev->reset_sem, 1); 2314 2315 /* bring the device down */ 2316 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2317 } 2318 2319 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2320 { 2321 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2322 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2323 2324 if (hdev->service_task.work.func) 2325 cancel_delayed_work_sync(&hdev->service_task); 2326 2327 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2328 } 2329 2330 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2331 { 2332 struct pci_dev *pdev = hdev->pdev; 2333 int vectors; 2334 int i; 2335 2336 if (hnae3_dev_roce_supported(hdev)) 2337 vectors = pci_alloc_irq_vectors(pdev, 2338 hdev->roce_base_msix_offset + 1, 2339 hdev->num_msi, 2340 PCI_IRQ_MSIX); 2341 else 2342 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2343 hdev->num_msi, 2344 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2345 2346 if (vectors < 0) { 2347 dev_err(&pdev->dev, 2348 "failed(%d) to allocate MSI/MSI-X vectors\n", 2349 vectors); 2350 return vectors; 2351 } 2352 if (vectors < hdev->num_msi) 2353 dev_warn(&hdev->pdev->dev, 2354 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2355 hdev->num_msi, vectors); 2356 2357 hdev->num_msi = vectors; 2358 hdev->num_msi_left = vectors; 2359 2360 hdev->base_msi_vector = pdev->irq; 2361 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2362 2363 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2364 sizeof(u16), GFP_KERNEL); 2365 if (!hdev->vector_status) { 2366 pci_free_irq_vectors(pdev); 2367 return -ENOMEM; 2368 } 2369 2370 for (i = 0; i < hdev->num_msi; i++) 2371 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2372 2373 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2374 sizeof(int), GFP_KERNEL); 2375 if (!hdev->vector_irq) { 2376 devm_kfree(&pdev->dev, hdev->vector_status); 2377 pci_free_irq_vectors(pdev); 2378 return -ENOMEM; 2379 } 2380 2381 return 0; 2382 } 2383 2384 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2385 { 2386 struct pci_dev *pdev = hdev->pdev; 2387 2388 devm_kfree(&pdev->dev, hdev->vector_status); 2389 devm_kfree(&pdev->dev, hdev->vector_irq); 2390 pci_free_irq_vectors(pdev); 2391 } 2392 2393 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2394 { 2395 int ret; 2396 2397 hclgevf_get_misc_vector(hdev); 2398 2399 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2400 HCLGEVF_NAME, pci_name(hdev->pdev)); 2401 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2402 0, hdev->misc_vector.name, hdev); 2403 if (ret) { 2404 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2405 hdev->misc_vector.vector_irq); 2406 return ret; 2407 } 2408 2409 hclgevf_clear_event_cause(hdev, 0); 2410 2411 /* enable misc. vector(vector 0) */ 2412 hclgevf_enable_vector(&hdev->misc_vector, true); 2413 2414 return ret; 2415 } 2416 2417 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2418 { 2419 /* disable misc vector(vector 0) */ 2420 hclgevf_enable_vector(&hdev->misc_vector, false); 2421 synchronize_irq(hdev->misc_vector.vector_irq); 2422 free_irq(hdev->misc_vector.vector_irq, hdev); 2423 hclgevf_free_vector(hdev, 0); 2424 } 2425 2426 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2427 { 2428 struct device *dev = &hdev->pdev->dev; 2429 2430 dev_info(dev, "VF info begin:\n"); 2431 2432 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2433 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2434 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2435 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2436 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2437 dev_info(dev, "PF media type of this VF: %u\n", 2438 hdev->hw.mac.media_type); 2439 2440 dev_info(dev, "VF info end.\n"); 2441 } 2442 2443 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2444 struct hnae3_client *client) 2445 { 2446 struct hclgevf_dev *hdev = ae_dev->priv; 2447 int ret; 2448 2449 ret = client->ops->init_instance(&hdev->nic); 2450 if (ret) 2451 return ret; 2452 2453 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2454 hnae3_set_client_init_flag(client, ae_dev, 1); 2455 2456 if (netif_msg_drv(&hdev->nic)) 2457 hclgevf_info_show(hdev); 2458 2459 return 0; 2460 } 2461 2462 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2463 struct hnae3_client *client) 2464 { 2465 struct hclgevf_dev *hdev = ae_dev->priv; 2466 int ret; 2467 2468 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2469 !hdev->nic_client) 2470 return 0; 2471 2472 ret = hclgevf_init_roce_base_info(hdev); 2473 if (ret) 2474 return ret; 2475 2476 ret = client->ops->init_instance(&hdev->roce); 2477 if (ret) 2478 return ret; 2479 2480 hnae3_set_client_init_flag(client, ae_dev, 1); 2481 2482 return 0; 2483 } 2484 2485 static int hclgevf_init_client_instance(struct hnae3_client *client, 2486 struct hnae3_ae_dev *ae_dev) 2487 { 2488 struct hclgevf_dev *hdev = ae_dev->priv; 2489 int ret; 2490 2491 switch (client->type) { 2492 case HNAE3_CLIENT_KNIC: 2493 hdev->nic_client = client; 2494 hdev->nic.client = client; 2495 2496 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2497 if (ret) 2498 goto clear_nic; 2499 2500 ret = hclgevf_init_roce_client_instance(ae_dev, 2501 hdev->roce_client); 2502 if (ret) 2503 goto clear_roce; 2504 2505 break; 2506 case HNAE3_CLIENT_ROCE: 2507 if (hnae3_dev_roce_supported(hdev)) { 2508 hdev->roce_client = client; 2509 hdev->roce.client = client; 2510 } 2511 2512 ret = hclgevf_init_roce_client_instance(ae_dev, client); 2513 if (ret) 2514 goto clear_roce; 2515 2516 break; 2517 default: 2518 return -EINVAL; 2519 } 2520 2521 return 0; 2522 2523 clear_nic: 2524 hdev->nic_client = NULL; 2525 hdev->nic.client = NULL; 2526 return ret; 2527 clear_roce: 2528 hdev->roce_client = NULL; 2529 hdev->roce.client = NULL; 2530 return ret; 2531 } 2532 2533 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2534 struct hnae3_ae_dev *ae_dev) 2535 { 2536 struct hclgevf_dev *hdev = ae_dev->priv; 2537 2538 /* un-init roce, if it exists */ 2539 if (hdev->roce_client) { 2540 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2541 hdev->roce_client = NULL; 2542 hdev->roce.client = NULL; 2543 } 2544 2545 /* un-init nic/unic, if this was not called by roce client */ 2546 if (client->ops->uninit_instance && hdev->nic_client && 2547 client->type != HNAE3_CLIENT_ROCE) { 2548 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2549 2550 client->ops->uninit_instance(&hdev->nic, 0); 2551 hdev->nic_client = NULL; 2552 hdev->nic.client = NULL; 2553 } 2554 } 2555 2556 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2557 { 2558 struct pci_dev *pdev = hdev->pdev; 2559 struct hclgevf_hw *hw; 2560 int ret; 2561 2562 ret = pci_enable_device(pdev); 2563 if (ret) { 2564 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2565 return ret; 2566 } 2567 2568 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2569 if (ret) { 2570 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2571 goto err_disable_device; 2572 } 2573 2574 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2575 if (ret) { 2576 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2577 goto err_disable_device; 2578 } 2579 2580 pci_set_master(pdev); 2581 hw = &hdev->hw; 2582 hw->hdev = hdev; 2583 hw->io_base = pci_iomap(pdev, 2, 0); 2584 if (!hw->io_base) { 2585 dev_err(&pdev->dev, "can't map configuration register space\n"); 2586 ret = -ENOMEM; 2587 goto err_clr_master; 2588 } 2589 2590 return 0; 2591 2592 err_clr_master: 2593 pci_clear_master(pdev); 2594 pci_release_regions(pdev); 2595 err_disable_device: 2596 pci_disable_device(pdev); 2597 2598 return ret; 2599 } 2600 2601 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2602 { 2603 struct pci_dev *pdev = hdev->pdev; 2604 2605 pci_iounmap(pdev, hdev->hw.io_base); 2606 pci_clear_master(pdev); 2607 pci_release_regions(pdev); 2608 pci_disable_device(pdev); 2609 } 2610 2611 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2612 { 2613 struct hclgevf_query_res_cmd *req; 2614 struct hclgevf_desc desc; 2615 int ret; 2616 2617 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2618 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2619 if (ret) { 2620 dev_err(&hdev->pdev->dev, 2621 "query vf resource failed, ret = %d.\n", ret); 2622 return ret; 2623 } 2624 2625 req = (struct hclgevf_query_res_cmd *)desc.data; 2626 2627 if (hnae3_dev_roce_supported(hdev)) { 2628 hdev->roce_base_msix_offset = 2629 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 2630 HCLGEVF_MSIX_OFT_ROCEE_M, 2631 HCLGEVF_MSIX_OFT_ROCEE_S); 2632 hdev->num_roce_msix = 2633 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 2634 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2635 2636 /* nic's msix numbers is always equals to the roce's. */ 2637 hdev->num_nic_msix = hdev->num_roce_msix; 2638 2639 /* VF should have NIC vectors and Roce vectors, NIC vectors 2640 * are queued before Roce vectors. The offset is fixed to 64. 2641 */ 2642 hdev->num_msi = hdev->num_roce_msix + 2643 hdev->roce_base_msix_offset; 2644 } else { 2645 hdev->num_msi = 2646 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 2647 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2648 2649 hdev->num_nic_msix = hdev->num_msi; 2650 } 2651 2652 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 2653 dev_err(&hdev->pdev->dev, 2654 "Just %u msi resources, not enough for vf(min:2).\n", 2655 hdev->num_nic_msix); 2656 return -EINVAL; 2657 } 2658 2659 return 0; 2660 } 2661 2662 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2663 { 2664 struct pci_dev *pdev = hdev->pdev; 2665 int ret = 0; 2666 2667 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2668 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2669 hclgevf_misc_irq_uninit(hdev); 2670 hclgevf_uninit_msi(hdev); 2671 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2672 } 2673 2674 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2675 pci_set_master(pdev); 2676 ret = hclgevf_init_msi(hdev); 2677 if (ret) { 2678 dev_err(&pdev->dev, 2679 "failed(%d) to init MSI/MSI-X\n", ret); 2680 return ret; 2681 } 2682 2683 ret = hclgevf_misc_irq_init(hdev); 2684 if (ret) { 2685 hclgevf_uninit_msi(hdev); 2686 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2687 ret); 2688 return ret; 2689 } 2690 2691 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2692 } 2693 2694 return ret; 2695 } 2696 2697 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2698 { 2699 struct pci_dev *pdev = hdev->pdev; 2700 int ret; 2701 2702 ret = hclgevf_pci_reset(hdev); 2703 if (ret) { 2704 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2705 return ret; 2706 } 2707 2708 ret = hclgevf_cmd_init(hdev); 2709 if (ret) { 2710 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2711 return ret; 2712 } 2713 2714 ret = hclgevf_rss_init_hw(hdev); 2715 if (ret) { 2716 dev_err(&hdev->pdev->dev, 2717 "failed(%d) to initialize RSS\n", ret); 2718 return ret; 2719 } 2720 2721 ret = hclgevf_config_gro(hdev, true); 2722 if (ret) 2723 return ret; 2724 2725 ret = hclgevf_init_vlan_config(hdev); 2726 if (ret) { 2727 dev_err(&hdev->pdev->dev, 2728 "failed(%d) to initialize VLAN config\n", ret); 2729 return ret; 2730 } 2731 2732 dev_info(&hdev->pdev->dev, "Reset done\n"); 2733 2734 return 0; 2735 } 2736 2737 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2738 { 2739 struct pci_dev *pdev = hdev->pdev; 2740 int ret; 2741 2742 ret = hclgevf_pci_init(hdev); 2743 if (ret) 2744 return ret; 2745 2746 ret = hclgevf_cmd_queue_init(hdev); 2747 if (ret) 2748 goto err_cmd_queue_init; 2749 2750 ret = hclgevf_cmd_init(hdev); 2751 if (ret) 2752 goto err_cmd_init; 2753 2754 /* Get vf resource */ 2755 ret = hclgevf_query_vf_resource(hdev); 2756 if (ret) 2757 goto err_cmd_init; 2758 2759 ret = hclgevf_init_msi(hdev); 2760 if (ret) { 2761 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2762 goto err_cmd_init; 2763 } 2764 2765 hclgevf_state_init(hdev); 2766 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2767 hdev->reset_type = HNAE3_NONE_RESET; 2768 2769 ret = hclgevf_misc_irq_init(hdev); 2770 if (ret) 2771 goto err_misc_irq_init; 2772 2773 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2774 2775 ret = hclgevf_configure(hdev); 2776 if (ret) { 2777 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2778 goto err_config; 2779 } 2780 2781 ret = hclgevf_alloc_tqps(hdev); 2782 if (ret) { 2783 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2784 goto err_config; 2785 } 2786 2787 ret = hclgevf_set_handle_info(hdev); 2788 if (ret) 2789 goto err_config; 2790 2791 ret = hclgevf_config_gro(hdev, true); 2792 if (ret) 2793 goto err_config; 2794 2795 /* Initialize RSS for this VF */ 2796 ret = hclgevf_rss_init_hw(hdev); 2797 if (ret) { 2798 dev_err(&hdev->pdev->dev, 2799 "failed(%d) to initialize RSS\n", ret); 2800 goto err_config; 2801 } 2802 2803 ret = hclgevf_init_vlan_config(hdev); 2804 if (ret) { 2805 dev_err(&hdev->pdev->dev, 2806 "failed(%d) to initialize VLAN config\n", ret); 2807 goto err_config; 2808 } 2809 2810 hdev->last_reset_time = jiffies; 2811 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 2812 HCLGEVF_DRIVER_NAME); 2813 2814 hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 2815 2816 return 0; 2817 2818 err_config: 2819 hclgevf_misc_irq_uninit(hdev); 2820 err_misc_irq_init: 2821 hclgevf_state_uninit(hdev); 2822 hclgevf_uninit_msi(hdev); 2823 err_cmd_init: 2824 hclgevf_cmd_uninit(hdev); 2825 err_cmd_queue_init: 2826 hclgevf_pci_uninit(hdev); 2827 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2828 return ret; 2829 } 2830 2831 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2832 { 2833 struct hclge_vf_to_pf_msg send_msg; 2834 2835 hclgevf_state_uninit(hdev); 2836 2837 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 2838 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2839 2840 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2841 hclgevf_misc_irq_uninit(hdev); 2842 hclgevf_uninit_msi(hdev); 2843 } 2844 2845 hclgevf_pci_uninit(hdev); 2846 hclgevf_cmd_uninit(hdev); 2847 } 2848 2849 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2850 { 2851 struct pci_dev *pdev = ae_dev->pdev; 2852 int ret; 2853 2854 ret = hclgevf_alloc_hdev(ae_dev); 2855 if (ret) { 2856 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2857 return ret; 2858 } 2859 2860 ret = hclgevf_init_hdev(ae_dev->priv); 2861 if (ret) { 2862 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2863 return ret; 2864 } 2865 2866 return 0; 2867 } 2868 2869 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2870 { 2871 struct hclgevf_dev *hdev = ae_dev->priv; 2872 2873 hclgevf_uninit_hdev(hdev); 2874 ae_dev->priv = NULL; 2875 } 2876 2877 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2878 { 2879 struct hnae3_handle *nic = &hdev->nic; 2880 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2881 2882 return min_t(u32, hdev->rss_size_max, 2883 hdev->num_tqps / kinfo->num_tc); 2884 } 2885 2886 /** 2887 * hclgevf_get_channels - Get the current channels enabled and max supported. 2888 * @handle: hardware information for network interface 2889 * @ch: ethtool channels structure 2890 * 2891 * We don't support separate tx and rx queues as channels. The other count 2892 * represents how many queues are being used for control. max_combined counts 2893 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2894 * q_vectors since we support a lot more queue pairs than q_vectors. 2895 **/ 2896 static void hclgevf_get_channels(struct hnae3_handle *handle, 2897 struct ethtool_channels *ch) 2898 { 2899 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2900 2901 ch->max_combined = hclgevf_get_max_channels(hdev); 2902 ch->other_count = 0; 2903 ch->max_other = 0; 2904 ch->combined_count = handle->kinfo.rss_size; 2905 } 2906 2907 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2908 u16 *alloc_tqps, u16 *max_rss_size) 2909 { 2910 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2911 2912 *alloc_tqps = hdev->num_tqps; 2913 *max_rss_size = hdev->rss_size_max; 2914 } 2915 2916 static void hclgevf_update_rss_size(struct hnae3_handle *handle, 2917 u32 new_tqps_num) 2918 { 2919 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 2920 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2921 u16 max_rss_size; 2922 2923 kinfo->req_rss_size = new_tqps_num; 2924 2925 max_rss_size = min_t(u16, hdev->rss_size_max, 2926 hdev->num_tqps / kinfo->num_tc); 2927 2928 /* Use the user's configuration when it is not larger than 2929 * max_rss_size, otherwise, use the maximum specification value. 2930 */ 2931 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 2932 kinfo->req_rss_size <= max_rss_size) 2933 kinfo->rss_size = kinfo->req_rss_size; 2934 else if (kinfo->rss_size > max_rss_size || 2935 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 2936 kinfo->rss_size = max_rss_size; 2937 2938 kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; 2939 } 2940 2941 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 2942 bool rxfh_configured) 2943 { 2944 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2945 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 2946 u16 cur_rss_size = kinfo->rss_size; 2947 u16 cur_tqps = kinfo->num_tqps; 2948 u32 *rss_indir; 2949 unsigned int i; 2950 int ret; 2951 2952 hclgevf_update_rss_size(handle, new_tqps_num); 2953 2954 ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); 2955 if (ret) 2956 return ret; 2957 2958 /* RSS indirection table has been configuared by user */ 2959 if (rxfh_configured) 2960 goto out; 2961 2962 /* Reinitializes the rss indirect table according to the new RSS size */ 2963 rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 2964 if (!rss_indir) 2965 return -ENOMEM; 2966 2967 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 2968 rss_indir[i] = i % kinfo->rss_size; 2969 2970 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 2971 if (ret) 2972 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 2973 ret); 2974 2975 kfree(rss_indir); 2976 2977 out: 2978 if (!ret) 2979 dev_info(&hdev->pdev->dev, 2980 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 2981 cur_rss_size, kinfo->rss_size, 2982 cur_tqps, kinfo->rss_size * kinfo->num_tc); 2983 2984 return ret; 2985 } 2986 2987 static int hclgevf_get_status(struct hnae3_handle *handle) 2988 { 2989 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2990 2991 return hdev->hw.mac.link; 2992 } 2993 2994 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2995 u8 *auto_neg, u32 *speed, 2996 u8 *duplex) 2997 { 2998 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2999 3000 if (speed) 3001 *speed = hdev->hw.mac.speed; 3002 if (duplex) 3003 *duplex = hdev->hw.mac.duplex; 3004 if (auto_neg) 3005 *auto_neg = AUTONEG_DISABLE; 3006 } 3007 3008 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 3009 u8 duplex) 3010 { 3011 hdev->hw.mac.speed = speed; 3012 hdev->hw.mac.duplex = duplex; 3013 } 3014 3015 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 3016 { 3017 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3018 3019 return hclgevf_config_gro(hdev, enable); 3020 } 3021 3022 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 3023 u8 *module_type) 3024 { 3025 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3026 3027 if (media_type) 3028 *media_type = hdev->hw.mac.media_type; 3029 3030 if (module_type) 3031 *module_type = hdev->hw.mac.module_type; 3032 } 3033 3034 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 3035 { 3036 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3037 3038 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 3039 } 3040 3041 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 3042 { 3043 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3044 3045 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 3046 } 3047 3048 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 3049 { 3050 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3051 3052 return hdev->rst_stats.hw_rst_done_cnt; 3053 } 3054 3055 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 3056 unsigned long *supported, 3057 unsigned long *advertising) 3058 { 3059 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3060 3061 *supported = hdev->hw.mac.supported; 3062 *advertising = hdev->hw.mac.advertising; 3063 } 3064 3065 #define MAX_SEPARATE_NUM 4 3066 #define SEPARATOR_VALUE 0xFFFFFFFF 3067 #define REG_NUM_PER_LINE 4 3068 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 3069 3070 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 3071 { 3072 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 3073 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3074 3075 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 3076 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 3077 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 3078 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 3079 3080 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 3081 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 3082 } 3083 3084 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 3085 void *data) 3086 { 3087 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3088 int i, j, reg_um, separator_num; 3089 u32 *reg = data; 3090 3091 *version = hdev->fw_version; 3092 3093 /* fetching per-VF registers values from VF PCIe register space */ 3094 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 3095 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3096 for (i = 0; i < reg_um; i++) 3097 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 3098 for (i = 0; i < separator_num; i++) 3099 *reg++ = SEPARATOR_VALUE; 3100 3101 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 3102 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3103 for (i = 0; i < reg_um; i++) 3104 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 3105 for (i = 0; i < separator_num; i++) 3106 *reg++ = SEPARATOR_VALUE; 3107 3108 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 3109 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3110 for (j = 0; j < hdev->num_tqps; j++) { 3111 for (i = 0; i < reg_um; i++) 3112 *reg++ = hclgevf_read_dev(&hdev->hw, 3113 ring_reg_addr_list[i] + 3114 0x200 * j); 3115 for (i = 0; i < separator_num; i++) 3116 *reg++ = SEPARATOR_VALUE; 3117 } 3118 3119 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 3120 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3121 for (j = 0; j < hdev->num_msi_used - 1; j++) { 3122 for (i = 0; i < reg_um; i++) 3123 *reg++ = hclgevf_read_dev(&hdev->hw, 3124 tqp_intr_reg_addr_list[i] + 3125 4 * j); 3126 for (i = 0; i < separator_num; i++) 3127 *reg++ = SEPARATOR_VALUE; 3128 } 3129 } 3130 3131 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 3132 u8 *port_base_vlan_info, u8 data_size) 3133 { 3134 struct hnae3_handle *nic = &hdev->nic; 3135 struct hclge_vf_to_pf_msg send_msg; 3136 3137 rtnl_lock(); 3138 hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3139 rtnl_unlock(); 3140 3141 /* send msg to PF and wait update port based vlan info */ 3142 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3143 HCLGE_MBX_PORT_BASE_VLAN_CFG); 3144 memcpy(send_msg.data, port_base_vlan_info, data_size); 3145 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3146 3147 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3148 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; 3149 else 3150 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3151 3152 rtnl_lock(); 3153 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 3154 rtnl_unlock(); 3155 } 3156 3157 static const struct hnae3_ae_ops hclgevf_ops = { 3158 .init_ae_dev = hclgevf_init_ae_dev, 3159 .uninit_ae_dev = hclgevf_uninit_ae_dev, 3160 .flr_prepare = hclgevf_flr_prepare, 3161 .flr_done = hclgevf_flr_done, 3162 .init_client_instance = hclgevf_init_client_instance, 3163 .uninit_client_instance = hclgevf_uninit_client_instance, 3164 .start = hclgevf_ae_start, 3165 .stop = hclgevf_ae_stop, 3166 .client_start = hclgevf_client_start, 3167 .client_stop = hclgevf_client_stop, 3168 .map_ring_to_vector = hclgevf_map_ring_to_vector, 3169 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3170 .get_vector = hclgevf_get_vector, 3171 .put_vector = hclgevf_put_vector, 3172 .reset_queue = hclgevf_reset_tqp, 3173 .get_mac_addr = hclgevf_get_mac_addr, 3174 .set_mac_addr = hclgevf_set_mac_addr, 3175 .add_uc_addr = hclgevf_add_uc_addr, 3176 .rm_uc_addr = hclgevf_rm_uc_addr, 3177 .add_mc_addr = hclgevf_add_mc_addr, 3178 .rm_mc_addr = hclgevf_rm_mc_addr, 3179 .get_stats = hclgevf_get_stats, 3180 .update_stats = hclgevf_update_stats, 3181 .get_strings = hclgevf_get_strings, 3182 .get_sset_count = hclgevf_get_sset_count, 3183 .get_rss_key_size = hclgevf_get_rss_key_size, 3184 .get_rss_indir_size = hclgevf_get_rss_indir_size, 3185 .get_rss = hclgevf_get_rss, 3186 .set_rss = hclgevf_set_rss, 3187 .get_rss_tuple = hclgevf_get_rss_tuple, 3188 .set_rss_tuple = hclgevf_set_rss_tuple, 3189 .get_tc_size = hclgevf_get_tc_size, 3190 .get_fw_version = hclgevf_get_fw_version, 3191 .set_vlan_filter = hclgevf_set_vlan_filter, 3192 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 3193 .reset_event = hclgevf_reset_event, 3194 .set_default_reset_request = hclgevf_set_def_reset_request, 3195 .set_channels = hclgevf_set_channels, 3196 .get_channels = hclgevf_get_channels, 3197 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 3198 .get_regs_len = hclgevf_get_regs_len, 3199 .get_regs = hclgevf_get_regs, 3200 .get_status = hclgevf_get_status, 3201 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3202 .get_media_type = hclgevf_get_media_type, 3203 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 3204 .ae_dev_resetting = hclgevf_ae_dev_resetting, 3205 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 3206 .set_gro_en = hclgevf_gro_en, 3207 .set_mtu = hclgevf_set_mtu, 3208 .get_global_queue_id = hclgevf_get_qid_global, 3209 .set_timer_task = hclgevf_set_timer_task, 3210 .get_link_mode = hclgevf_get_link_mode, 3211 .set_promisc_mode = hclgevf_set_promisc_mode, 3212 }; 3213 3214 static struct hnae3_ae_algo ae_algovf = { 3215 .ops = &hclgevf_ops, 3216 .pdev_id_table = ae_algovf_pci_tbl, 3217 }; 3218 3219 static int hclgevf_init(void) 3220 { 3221 pr_info("%s is initializing\n", HCLGEVF_NAME); 3222 3223 hclgevf_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGEVF_NAME); 3224 if (!hclgevf_wq) { 3225 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 3226 return -ENOMEM; 3227 } 3228 3229 hnae3_register_ae_algo(&ae_algovf); 3230 3231 return 0; 3232 } 3233 3234 static void hclgevf_exit(void) 3235 { 3236 hnae3_unregister_ae_algo(&ae_algovf); 3237 destroy_workqueue(hclgevf_wq); 3238 } 3239 module_init(hclgevf_init); 3240 module_exit(hclgevf_exit); 3241 3242 MODULE_LICENSE("GPL"); 3243 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3244 MODULE_DESCRIPTION("HCLGEVF Driver"); 3245 MODULE_VERSION(HCLGEVF_MOD_VERSION); 3246