1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 #define HCLGEVF_RESET_MAX_FAIL_CNT 5 15 16 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 17 static struct hnae3_ae_algo ae_algovf; 18 19 static struct workqueue_struct *hclgevf_wq; 20 21 static const struct pci_device_id ae_algovf_pci_tbl[] = { 22 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 23 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 24 /* required last entry */ 25 {0, } 26 }; 27 28 static const u8 hclgevf_hash_key[] = { 29 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 30 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 31 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 32 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 33 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 34 }; 35 36 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 37 38 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 39 HCLGEVF_CMDQ_TX_ADDR_H_REG, 40 HCLGEVF_CMDQ_TX_DEPTH_REG, 41 HCLGEVF_CMDQ_TX_TAIL_REG, 42 HCLGEVF_CMDQ_TX_HEAD_REG, 43 HCLGEVF_CMDQ_RX_ADDR_L_REG, 44 HCLGEVF_CMDQ_RX_ADDR_H_REG, 45 HCLGEVF_CMDQ_RX_DEPTH_REG, 46 HCLGEVF_CMDQ_RX_TAIL_REG, 47 HCLGEVF_CMDQ_RX_HEAD_REG, 48 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 49 HCLGEVF_CMDQ_INTR_STS_REG, 50 HCLGEVF_CMDQ_INTR_EN_REG, 51 HCLGEVF_CMDQ_INTR_GEN_REG}; 52 53 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 54 HCLGEVF_RST_ING, 55 HCLGEVF_GRO_EN_REG}; 56 57 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 58 HCLGEVF_RING_RX_ADDR_H_REG, 59 HCLGEVF_RING_RX_BD_NUM_REG, 60 HCLGEVF_RING_RX_BD_LENGTH_REG, 61 HCLGEVF_RING_RX_MERGE_EN_REG, 62 HCLGEVF_RING_RX_TAIL_REG, 63 HCLGEVF_RING_RX_HEAD_REG, 64 HCLGEVF_RING_RX_FBD_NUM_REG, 65 HCLGEVF_RING_RX_OFFSET_REG, 66 HCLGEVF_RING_RX_FBD_OFFSET_REG, 67 HCLGEVF_RING_RX_STASH_REG, 68 HCLGEVF_RING_RX_BD_ERR_REG, 69 HCLGEVF_RING_TX_ADDR_L_REG, 70 HCLGEVF_RING_TX_ADDR_H_REG, 71 HCLGEVF_RING_TX_BD_NUM_REG, 72 HCLGEVF_RING_TX_PRIORITY_REG, 73 HCLGEVF_RING_TX_TC_REG, 74 HCLGEVF_RING_TX_MERGE_EN_REG, 75 HCLGEVF_RING_TX_TAIL_REG, 76 HCLGEVF_RING_TX_HEAD_REG, 77 HCLGEVF_RING_TX_FBD_NUM_REG, 78 HCLGEVF_RING_TX_OFFSET_REG, 79 HCLGEVF_RING_TX_EBD_NUM_REG, 80 HCLGEVF_RING_TX_EBD_OFFSET_REG, 81 HCLGEVF_RING_TX_BD_ERR_REG, 82 HCLGEVF_RING_EN_REG}; 83 84 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 85 HCLGEVF_TQP_INTR_GL0_REG, 86 HCLGEVF_TQP_INTR_GL1_REG, 87 HCLGEVF_TQP_INTR_GL2_REG, 88 HCLGEVF_TQP_INTR_RL_REG}; 89 90 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 91 { 92 if (!handle->client) 93 return container_of(handle, struct hclgevf_dev, nic); 94 else if (handle->client->type == HNAE3_CLIENT_ROCE) 95 return container_of(handle, struct hclgevf_dev, roce); 96 else 97 return container_of(handle, struct hclgevf_dev, nic); 98 } 99 100 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 101 { 102 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 103 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 104 struct hclgevf_desc desc; 105 struct hclgevf_tqp *tqp; 106 int status; 107 int i; 108 109 for (i = 0; i < kinfo->num_tqps; i++) { 110 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 111 hclgevf_cmd_setup_basic_desc(&desc, 112 HCLGEVF_OPC_QUERY_RX_STATUS, 113 true); 114 115 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 116 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 117 if (status) { 118 dev_err(&hdev->pdev->dev, 119 "Query tqp stat fail, status = %d,queue = %d\n", 120 status, i); 121 return status; 122 } 123 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 124 le32_to_cpu(desc.data[1]); 125 126 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 127 true); 128 129 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 130 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 131 if (status) { 132 dev_err(&hdev->pdev->dev, 133 "Query tqp stat fail, status = %d,queue = %d\n", 134 status, i); 135 return status; 136 } 137 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 138 le32_to_cpu(desc.data[1]); 139 } 140 141 return 0; 142 } 143 144 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 145 { 146 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 147 struct hclgevf_tqp *tqp; 148 u64 *buff = data; 149 int i; 150 151 for (i = 0; i < kinfo->num_tqps; i++) { 152 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 153 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 154 } 155 for (i = 0; i < kinfo->num_tqps; i++) { 156 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 157 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 158 } 159 160 return buff; 161 } 162 163 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 164 { 165 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 166 167 return kinfo->num_tqps * 2; 168 } 169 170 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 171 { 172 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 173 u8 *buff = data; 174 int i = 0; 175 176 for (i = 0; i < kinfo->num_tqps; i++) { 177 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 178 struct hclgevf_tqp, q); 179 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 180 tqp->index); 181 buff += ETH_GSTRING_LEN; 182 } 183 184 for (i = 0; i < kinfo->num_tqps; i++) { 185 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 186 struct hclgevf_tqp, q); 187 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 188 tqp->index); 189 buff += ETH_GSTRING_LEN; 190 } 191 192 return buff; 193 } 194 195 static void hclgevf_update_stats(struct hnae3_handle *handle, 196 struct net_device_stats *net_stats) 197 { 198 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 199 int status; 200 201 status = hclgevf_tqps_update_stats(handle); 202 if (status) 203 dev_err(&hdev->pdev->dev, 204 "VF update of TQPS stats fail, status = %d.\n", 205 status); 206 } 207 208 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 209 { 210 if (strset == ETH_SS_TEST) 211 return -EOPNOTSUPP; 212 else if (strset == ETH_SS_STATS) 213 return hclgevf_tqps_get_sset_count(handle, strset); 214 215 return 0; 216 } 217 218 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 219 u8 *data) 220 { 221 u8 *p = (char *)data; 222 223 if (strset == ETH_SS_STATS) 224 p = hclgevf_tqps_get_strings(handle, p); 225 } 226 227 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 228 { 229 hclgevf_tqps_get_stats(handle, data); 230 } 231 232 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 233 { 234 u8 resp_msg; 235 int status; 236 237 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 238 true, &resp_msg, sizeof(resp_msg)); 239 if (status) { 240 dev_err(&hdev->pdev->dev, 241 "VF request to get TC info from PF failed %d", 242 status); 243 return status; 244 } 245 246 hdev->hw_tc_map = resp_msg; 247 248 return 0; 249 } 250 251 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 252 { 253 struct hnae3_handle *nic = &hdev->nic; 254 u8 resp_msg; 255 int ret; 256 257 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 258 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, 259 NULL, 0, true, &resp_msg, sizeof(u8)); 260 if (ret) { 261 dev_err(&hdev->pdev->dev, 262 "VF request to get port based vlan state failed %d", 263 ret); 264 return ret; 265 } 266 267 nic->port_base_vlan_state = resp_msg; 268 269 return 0; 270 } 271 272 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 273 { 274 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 275 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 276 int status; 277 278 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 279 true, resp_msg, 280 HCLGEVF_TQPS_RSS_INFO_LEN); 281 if (status) { 282 dev_err(&hdev->pdev->dev, 283 "VF request to get tqp info from PF failed %d", 284 status); 285 return status; 286 } 287 288 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 289 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 290 memcpy(&hdev->rx_buf_len, &resp_msg[4], sizeof(u16)); 291 292 return 0; 293 } 294 295 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 296 { 297 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 298 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 299 int ret; 300 301 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QDEPTH, 0, NULL, 0, 302 true, resp_msg, 303 HCLGEVF_TQPS_DEPTH_INFO_LEN); 304 if (ret) { 305 dev_err(&hdev->pdev->dev, 306 "VF request to get tqp depth info from PF failed %d", 307 ret); 308 return ret; 309 } 310 311 memcpy(&hdev->num_tx_desc, &resp_msg[0], sizeof(u16)); 312 memcpy(&hdev->num_rx_desc, &resp_msg[2], sizeof(u16)); 313 314 return 0; 315 } 316 317 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 318 { 319 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 320 u8 msg_data[2], resp_data[2]; 321 u16 qid_in_pf = 0; 322 int ret; 323 324 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 325 326 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, 327 sizeof(msg_data), true, resp_data, 328 sizeof(resp_data)); 329 if (!ret) 330 qid_in_pf = *(u16 *)resp_data; 331 332 return qid_in_pf; 333 } 334 335 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 336 { 337 u8 resp_msg[2]; 338 int ret; 339 340 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0, 341 true, resp_msg, sizeof(resp_msg)); 342 if (ret) { 343 dev_err(&hdev->pdev->dev, 344 "VF request to get the pf port media type failed %d", 345 ret); 346 return ret; 347 } 348 349 hdev->hw.mac.media_type = resp_msg[0]; 350 hdev->hw.mac.module_type = resp_msg[1]; 351 352 return 0; 353 } 354 355 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 356 { 357 struct hclgevf_tqp *tqp; 358 int i; 359 360 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 361 sizeof(struct hclgevf_tqp), GFP_KERNEL); 362 if (!hdev->htqp) 363 return -ENOMEM; 364 365 tqp = hdev->htqp; 366 367 for (i = 0; i < hdev->num_tqps; i++) { 368 tqp->dev = &hdev->pdev->dev; 369 tqp->index = i; 370 371 tqp->q.ae_algo = &ae_algovf; 372 tqp->q.buf_size = hdev->rx_buf_len; 373 tqp->q.tx_desc_num = hdev->num_tx_desc; 374 tqp->q.rx_desc_num = hdev->num_rx_desc; 375 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 376 i * HCLGEVF_TQP_REG_SIZE; 377 378 tqp++; 379 } 380 381 return 0; 382 } 383 384 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 385 { 386 struct hnae3_handle *nic = &hdev->nic; 387 struct hnae3_knic_private_info *kinfo; 388 u16 new_tqps = hdev->num_tqps; 389 unsigned int i; 390 391 kinfo = &nic->kinfo; 392 kinfo->num_tc = 0; 393 kinfo->num_tx_desc = hdev->num_tx_desc; 394 kinfo->num_rx_desc = hdev->num_rx_desc; 395 kinfo->rx_buf_len = hdev->rx_buf_len; 396 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 397 if (hdev->hw_tc_map & BIT(i)) 398 kinfo->num_tc++; 399 400 kinfo->rss_size 401 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 402 new_tqps = kinfo->rss_size * kinfo->num_tc; 403 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 404 405 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 406 sizeof(struct hnae3_queue *), GFP_KERNEL); 407 if (!kinfo->tqp) 408 return -ENOMEM; 409 410 for (i = 0; i < kinfo->num_tqps; i++) { 411 hdev->htqp[i].q.handle = &hdev->nic; 412 hdev->htqp[i].q.tqp_index = i; 413 kinfo->tqp[i] = &hdev->htqp[i].q; 414 } 415 416 /* after init the max rss_size and tqps, adjust the default tqp numbers 417 * and rss size with the actual vector numbers 418 */ 419 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 420 kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc, 421 kinfo->rss_size); 422 423 return 0; 424 } 425 426 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 427 { 428 int status; 429 u8 resp_msg; 430 431 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 432 0, false, &resp_msg, sizeof(resp_msg)); 433 if (status) 434 dev_err(&hdev->pdev->dev, 435 "VF failed to fetch link status(%d) from PF", status); 436 } 437 438 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 439 { 440 struct hnae3_handle *rhandle = &hdev->roce; 441 struct hnae3_handle *handle = &hdev->nic; 442 struct hnae3_client *rclient; 443 struct hnae3_client *client; 444 445 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 446 return; 447 448 client = handle->client; 449 rclient = hdev->roce_client; 450 451 link_state = 452 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 453 454 if (link_state != hdev->hw.mac.link) { 455 client->ops->link_status_change(handle, !!link_state); 456 if (rclient && rclient->ops->link_status_change) 457 rclient->ops->link_status_change(rhandle, !!link_state); 458 hdev->hw.mac.link = link_state; 459 } 460 461 clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 462 } 463 464 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 465 { 466 #define HCLGEVF_ADVERTISING 0 467 #define HCLGEVF_SUPPORTED 1 468 u8 send_msg; 469 u8 resp_msg; 470 471 send_msg = HCLGEVF_ADVERTISING; 472 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, 473 &send_msg, sizeof(send_msg), false, 474 &resp_msg, sizeof(resp_msg)); 475 send_msg = HCLGEVF_SUPPORTED; 476 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, 477 &send_msg, sizeof(send_msg), false, 478 &resp_msg, sizeof(resp_msg)); 479 } 480 481 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 482 { 483 struct hnae3_handle *nic = &hdev->nic; 484 int ret; 485 486 nic->ae_algo = &ae_algovf; 487 nic->pdev = hdev->pdev; 488 nic->numa_node_mask = hdev->numa_node_mask; 489 nic->flags |= HNAE3_SUPPORT_VF; 490 491 ret = hclgevf_knic_setup(hdev); 492 if (ret) 493 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 494 ret); 495 return ret; 496 } 497 498 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 499 { 500 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 501 dev_warn(&hdev->pdev->dev, 502 "vector(vector_id %d) has been freed.\n", vector_id); 503 return; 504 } 505 506 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 507 hdev->num_msi_left += 1; 508 hdev->num_msi_used -= 1; 509 } 510 511 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 512 struct hnae3_vector_info *vector_info) 513 { 514 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 515 struct hnae3_vector_info *vector = vector_info; 516 int alloc = 0; 517 int i, j; 518 519 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 520 vector_num = min(hdev->num_msi_left, vector_num); 521 522 for (j = 0; j < vector_num; j++) { 523 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 524 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 525 vector->vector = pci_irq_vector(hdev->pdev, i); 526 vector->io_addr = hdev->hw.io_base + 527 HCLGEVF_VECTOR_REG_BASE + 528 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 529 hdev->vector_status[i] = 0; 530 hdev->vector_irq[i] = vector->vector; 531 532 vector++; 533 alloc++; 534 535 break; 536 } 537 } 538 } 539 hdev->num_msi_left -= alloc; 540 hdev->num_msi_used += alloc; 541 542 return alloc; 543 } 544 545 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 546 { 547 int i; 548 549 for (i = 0; i < hdev->num_msi; i++) 550 if (vector == hdev->vector_irq[i]) 551 return i; 552 553 return -EINVAL; 554 } 555 556 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 557 const u8 hfunc, const u8 *key) 558 { 559 struct hclgevf_rss_config_cmd *req; 560 unsigned int key_offset = 0; 561 struct hclgevf_desc desc; 562 int key_counts; 563 int key_size; 564 int ret; 565 566 key_counts = HCLGEVF_RSS_KEY_SIZE; 567 req = (struct hclgevf_rss_config_cmd *)desc.data; 568 569 while (key_counts) { 570 hclgevf_cmd_setup_basic_desc(&desc, 571 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 572 false); 573 574 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 575 req->hash_config |= 576 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 577 578 key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 579 memcpy(req->hash_key, 580 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 581 582 key_counts -= key_size; 583 key_offset++; 584 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 585 if (ret) { 586 dev_err(&hdev->pdev->dev, 587 "Configure RSS config fail, status = %d\n", 588 ret); 589 return ret; 590 } 591 } 592 593 return 0; 594 } 595 596 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 597 { 598 return HCLGEVF_RSS_KEY_SIZE; 599 } 600 601 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 602 { 603 return HCLGEVF_RSS_IND_TBL_SIZE; 604 } 605 606 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 607 { 608 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 609 struct hclgevf_rss_indirection_table_cmd *req; 610 struct hclgevf_desc desc; 611 int status; 612 int i, j; 613 614 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 615 616 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 617 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 618 false); 619 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 620 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 621 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 622 req->rss_result[j] = 623 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 624 625 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 626 if (status) { 627 dev_err(&hdev->pdev->dev, 628 "VF failed(=%d) to set RSS indirection table\n", 629 status); 630 return status; 631 } 632 } 633 634 return 0; 635 } 636 637 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 638 { 639 struct hclgevf_rss_tc_mode_cmd *req; 640 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 641 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 642 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 643 struct hclgevf_desc desc; 644 u16 roundup_size; 645 int status; 646 unsigned int i; 647 648 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 649 650 roundup_size = roundup_pow_of_two(rss_size); 651 roundup_size = ilog2(roundup_size); 652 653 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 654 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 655 tc_size[i] = roundup_size; 656 tc_offset[i] = rss_size * i; 657 } 658 659 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 660 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 661 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 662 (tc_valid[i] & 0x1)); 663 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 664 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 665 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 666 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 667 } 668 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 669 if (status) 670 dev_err(&hdev->pdev->dev, 671 "VF failed(=%d) to set rss tc mode\n", status); 672 673 return status; 674 } 675 676 /* for revision 0x20, vf shared the same rss config with pf */ 677 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 678 { 679 #define HCLGEVF_RSS_MBX_RESP_LEN 8 680 681 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 682 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 683 u16 msg_num, hash_key_index; 684 u8 index; 685 int ret; 686 687 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 688 HCLGEVF_RSS_MBX_RESP_LEN; 689 for (index = 0; index < msg_num; index++) { 690 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_RSS_KEY, 0, 691 &index, sizeof(index), 692 true, resp_msg, 693 HCLGEVF_RSS_MBX_RESP_LEN); 694 if (ret) { 695 dev_err(&hdev->pdev->dev, 696 "VF get rss hash key from PF failed, ret=%d", 697 ret); 698 return ret; 699 } 700 701 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 702 if (index == msg_num - 1) 703 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 704 &resp_msg[0], 705 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 706 else 707 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 708 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 709 } 710 711 return 0; 712 } 713 714 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 715 u8 *hfunc) 716 { 717 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 718 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 719 int i, ret; 720 721 if (handle->pdev->revision >= 0x21) { 722 /* Get hash algorithm */ 723 if (hfunc) { 724 switch (rss_cfg->hash_algo) { 725 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 726 *hfunc = ETH_RSS_HASH_TOP; 727 break; 728 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 729 *hfunc = ETH_RSS_HASH_XOR; 730 break; 731 default: 732 *hfunc = ETH_RSS_HASH_UNKNOWN; 733 break; 734 } 735 } 736 737 /* Get the RSS Key required by the user */ 738 if (key) 739 memcpy(key, rss_cfg->rss_hash_key, 740 HCLGEVF_RSS_KEY_SIZE); 741 } else { 742 if (hfunc) 743 *hfunc = ETH_RSS_HASH_TOP; 744 if (key) { 745 ret = hclgevf_get_rss_hash_key(hdev); 746 if (ret) 747 return ret; 748 memcpy(key, rss_cfg->rss_hash_key, 749 HCLGEVF_RSS_KEY_SIZE); 750 } 751 } 752 753 if (indir) 754 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 755 indir[i] = rss_cfg->rss_indirection_tbl[i]; 756 757 return 0; 758 } 759 760 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 761 const u8 *key, const u8 hfunc) 762 { 763 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 764 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 765 int ret, i; 766 767 if (handle->pdev->revision >= 0x21) { 768 /* Set the RSS Hash Key if specififed by the user */ 769 if (key) { 770 switch (hfunc) { 771 case ETH_RSS_HASH_TOP: 772 rss_cfg->hash_algo = 773 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 774 break; 775 case ETH_RSS_HASH_XOR: 776 rss_cfg->hash_algo = 777 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 778 break; 779 case ETH_RSS_HASH_NO_CHANGE: 780 break; 781 default: 782 return -EINVAL; 783 } 784 785 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 786 key); 787 if (ret) 788 return ret; 789 790 /* Update the shadow RSS key with user specified qids */ 791 memcpy(rss_cfg->rss_hash_key, key, 792 HCLGEVF_RSS_KEY_SIZE); 793 } 794 } 795 796 /* update the shadow RSS table with user specified qids */ 797 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 798 rss_cfg->rss_indirection_tbl[i] = indir[i]; 799 800 /* update the hardware */ 801 return hclgevf_set_rss_indir_table(hdev); 802 } 803 804 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 805 { 806 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 807 808 if (nfc->data & RXH_L4_B_2_3) 809 hash_sets |= HCLGEVF_D_PORT_BIT; 810 else 811 hash_sets &= ~HCLGEVF_D_PORT_BIT; 812 813 if (nfc->data & RXH_IP_SRC) 814 hash_sets |= HCLGEVF_S_IP_BIT; 815 else 816 hash_sets &= ~HCLGEVF_S_IP_BIT; 817 818 if (nfc->data & RXH_IP_DST) 819 hash_sets |= HCLGEVF_D_IP_BIT; 820 else 821 hash_sets &= ~HCLGEVF_D_IP_BIT; 822 823 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 824 hash_sets |= HCLGEVF_V_TAG_BIT; 825 826 return hash_sets; 827 } 828 829 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 830 struct ethtool_rxnfc *nfc) 831 { 832 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 833 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 834 struct hclgevf_rss_input_tuple_cmd *req; 835 struct hclgevf_desc desc; 836 u8 tuple_sets; 837 int ret; 838 839 if (handle->pdev->revision == 0x20) 840 return -EOPNOTSUPP; 841 842 if (nfc->data & 843 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 844 return -EINVAL; 845 846 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 847 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 848 849 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 850 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 851 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 852 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 853 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 854 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 855 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 856 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 857 858 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 859 switch (nfc->flow_type) { 860 case TCP_V4_FLOW: 861 req->ipv4_tcp_en = tuple_sets; 862 break; 863 case TCP_V6_FLOW: 864 req->ipv6_tcp_en = tuple_sets; 865 break; 866 case UDP_V4_FLOW: 867 req->ipv4_udp_en = tuple_sets; 868 break; 869 case UDP_V6_FLOW: 870 req->ipv6_udp_en = tuple_sets; 871 break; 872 case SCTP_V4_FLOW: 873 req->ipv4_sctp_en = tuple_sets; 874 break; 875 case SCTP_V6_FLOW: 876 if ((nfc->data & RXH_L4_B_0_1) || 877 (nfc->data & RXH_L4_B_2_3)) 878 return -EINVAL; 879 880 req->ipv6_sctp_en = tuple_sets; 881 break; 882 case IPV4_FLOW: 883 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 884 break; 885 case IPV6_FLOW: 886 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 887 break; 888 default: 889 return -EINVAL; 890 } 891 892 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 893 if (ret) { 894 dev_err(&hdev->pdev->dev, 895 "Set rss tuple fail, status = %d\n", ret); 896 return ret; 897 } 898 899 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 900 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 901 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 902 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 903 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 904 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 905 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 906 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 907 return 0; 908 } 909 910 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 911 struct ethtool_rxnfc *nfc) 912 { 913 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 914 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 915 u8 tuple_sets; 916 917 if (handle->pdev->revision == 0x20) 918 return -EOPNOTSUPP; 919 920 nfc->data = 0; 921 922 switch (nfc->flow_type) { 923 case TCP_V4_FLOW: 924 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 925 break; 926 case UDP_V4_FLOW: 927 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 928 break; 929 case TCP_V6_FLOW: 930 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 931 break; 932 case UDP_V6_FLOW: 933 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 934 break; 935 case SCTP_V4_FLOW: 936 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 937 break; 938 case SCTP_V6_FLOW: 939 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 940 break; 941 case IPV4_FLOW: 942 case IPV6_FLOW: 943 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 944 break; 945 default: 946 return -EINVAL; 947 } 948 949 if (!tuple_sets) 950 return 0; 951 952 if (tuple_sets & HCLGEVF_D_PORT_BIT) 953 nfc->data |= RXH_L4_B_2_3; 954 if (tuple_sets & HCLGEVF_S_PORT_BIT) 955 nfc->data |= RXH_L4_B_0_1; 956 if (tuple_sets & HCLGEVF_D_IP_BIT) 957 nfc->data |= RXH_IP_DST; 958 if (tuple_sets & HCLGEVF_S_IP_BIT) 959 nfc->data |= RXH_IP_SRC; 960 961 return 0; 962 } 963 964 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 965 struct hclgevf_rss_cfg *rss_cfg) 966 { 967 struct hclgevf_rss_input_tuple_cmd *req; 968 struct hclgevf_desc desc; 969 int ret; 970 971 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 972 973 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 974 975 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 976 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 977 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 978 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 979 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 980 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 981 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 982 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 983 984 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 985 if (ret) 986 dev_err(&hdev->pdev->dev, 987 "Configure rss input fail, status = %d\n", ret); 988 return ret; 989 } 990 991 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 992 { 993 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 994 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 995 996 return rss_cfg->rss_size; 997 } 998 999 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 1000 int vector_id, 1001 struct hnae3_ring_chain_node *ring_chain) 1002 { 1003 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1004 struct hnae3_ring_chain_node *node; 1005 struct hclge_mbx_vf_to_pf_cmd *req; 1006 struct hclgevf_desc desc; 1007 int i = 0; 1008 int status; 1009 u8 type; 1010 1011 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 1012 type = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 1013 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1014 1015 for (node = ring_chain; node; node = node->next) { 1016 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1017 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 1018 1019 if (i == 0) { 1020 hclgevf_cmd_setup_basic_desc(&desc, 1021 HCLGEVF_OPC_MBX_VF_TO_PF, 1022 false); 1023 req->msg[0] = type; 1024 req->msg[1] = vector_id; 1025 } 1026 1027 req->msg[idx_offset] = 1028 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1029 req->msg[idx_offset + 1] = node->tqp_index; 1030 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 1031 HNAE3_RING_GL_IDX_M, 1032 HNAE3_RING_GL_IDX_S); 1033 1034 i++; 1035 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 1036 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 1037 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 1038 !node->next) { 1039 req->msg[2] = i; 1040 1041 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1042 if (status) { 1043 dev_err(&hdev->pdev->dev, 1044 "Map TQP fail, status is %d.\n", 1045 status); 1046 return status; 1047 } 1048 i = 0; 1049 hclgevf_cmd_setup_basic_desc(&desc, 1050 HCLGEVF_OPC_MBX_VF_TO_PF, 1051 false); 1052 req->msg[0] = type; 1053 req->msg[1] = vector_id; 1054 } 1055 } 1056 1057 return 0; 1058 } 1059 1060 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1061 struct hnae3_ring_chain_node *ring_chain) 1062 { 1063 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1064 int vector_id; 1065 1066 vector_id = hclgevf_get_vector_index(hdev, vector); 1067 if (vector_id < 0) { 1068 dev_err(&handle->pdev->dev, 1069 "Get vector index fail. ret =%d\n", vector_id); 1070 return vector_id; 1071 } 1072 1073 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1074 } 1075 1076 static int hclgevf_unmap_ring_from_vector( 1077 struct hnae3_handle *handle, 1078 int vector, 1079 struct hnae3_ring_chain_node *ring_chain) 1080 { 1081 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1082 int ret, vector_id; 1083 1084 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1085 return 0; 1086 1087 vector_id = hclgevf_get_vector_index(hdev, vector); 1088 if (vector_id < 0) { 1089 dev_err(&handle->pdev->dev, 1090 "Get vector index fail. ret =%d\n", vector_id); 1091 return vector_id; 1092 } 1093 1094 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1095 if (ret) 1096 dev_err(&handle->pdev->dev, 1097 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1098 vector_id, 1099 ret); 1100 1101 return ret; 1102 } 1103 1104 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1105 { 1106 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1107 int vector_id; 1108 1109 vector_id = hclgevf_get_vector_index(hdev, vector); 1110 if (vector_id < 0) { 1111 dev_err(&handle->pdev->dev, 1112 "hclgevf_put_vector get vector index fail. ret =%d\n", 1113 vector_id); 1114 return vector_id; 1115 } 1116 1117 hclgevf_free_vector(hdev, vector_id); 1118 1119 return 0; 1120 } 1121 1122 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1123 bool en_uc_pmc, bool en_mc_pmc, 1124 bool en_bc_pmc) 1125 { 1126 struct hclge_mbx_vf_to_pf_cmd *req; 1127 struct hclgevf_desc desc; 1128 int ret; 1129 1130 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 1131 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 1132 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 1133 req->msg[1] = en_bc_pmc ? 1 : 0; 1134 req->msg[2] = en_uc_pmc ? 1 : 0; 1135 req->msg[3] = en_mc_pmc ? 1 : 0; 1136 1137 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1138 if (ret) 1139 dev_err(&hdev->pdev->dev, 1140 "Set promisc mode fail, status is %d.\n", ret); 1141 1142 return ret; 1143 } 1144 1145 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 1146 bool en_mc_pmc) 1147 { 1148 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1149 struct pci_dev *pdev = hdev->pdev; 1150 bool en_bc_pmc; 1151 1152 en_bc_pmc = pdev->revision != 0x20; 1153 1154 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 1155 en_bc_pmc); 1156 } 1157 1158 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, 1159 int stream_id, bool enable) 1160 { 1161 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1162 struct hclgevf_desc desc; 1163 int status; 1164 1165 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1166 1167 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1168 false); 1169 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1170 req->stream_id = cpu_to_le16(stream_id); 1171 if (enable) 1172 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1173 1174 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1175 if (status) 1176 dev_err(&hdev->pdev->dev, 1177 "TQP enable fail, status =%d.\n", status); 1178 1179 return status; 1180 } 1181 1182 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1183 { 1184 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1185 struct hclgevf_tqp *tqp; 1186 int i; 1187 1188 for (i = 0; i < kinfo->num_tqps; i++) { 1189 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1190 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1191 } 1192 } 1193 1194 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 1195 { 1196 u8 host_mac[ETH_ALEN]; 1197 int status; 1198 1199 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MAC_ADDR, 0, NULL, 0, 1200 true, host_mac, ETH_ALEN); 1201 if (status) { 1202 dev_err(&hdev->pdev->dev, 1203 "fail to get VF MAC from host %d", status); 1204 return status; 1205 } 1206 1207 ether_addr_copy(p, host_mac); 1208 1209 return 0; 1210 } 1211 1212 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1213 { 1214 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1215 u8 host_mac_addr[ETH_ALEN]; 1216 1217 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 1218 return; 1219 1220 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 1221 if (hdev->has_pf_mac) 1222 ether_addr_copy(p, host_mac_addr); 1223 else 1224 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1225 } 1226 1227 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1228 bool is_first) 1229 { 1230 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1231 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1232 u8 *new_mac_addr = (u8 *)p; 1233 u8 msg_data[ETH_ALEN * 2]; 1234 u16 subcode; 1235 int status; 1236 1237 ether_addr_copy(msg_data, new_mac_addr); 1238 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 1239 1240 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 1241 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1242 1243 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1244 subcode, msg_data, sizeof(msg_data), 1245 true, NULL, 0); 1246 if (!status) 1247 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1248 1249 return status; 1250 } 1251 1252 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1253 const unsigned char *addr) 1254 { 1255 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1256 1257 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1258 HCLGE_MBX_MAC_VLAN_UC_ADD, 1259 addr, ETH_ALEN, false, NULL, 0); 1260 } 1261 1262 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1263 const unsigned char *addr) 1264 { 1265 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1266 1267 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1268 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1269 addr, ETH_ALEN, false, NULL, 0); 1270 } 1271 1272 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1273 const unsigned char *addr) 1274 { 1275 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1276 1277 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1278 HCLGE_MBX_MAC_VLAN_MC_ADD, 1279 addr, ETH_ALEN, false, NULL, 0); 1280 } 1281 1282 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1283 const unsigned char *addr) 1284 { 1285 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1286 1287 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1288 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1289 addr, ETH_ALEN, false, NULL, 0); 1290 } 1291 1292 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1293 __be16 proto, u16 vlan_id, 1294 bool is_kill) 1295 { 1296 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1297 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1298 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1299 int ret; 1300 1301 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1302 return -EINVAL; 1303 1304 if (proto != htons(ETH_P_8021Q)) 1305 return -EPROTONOSUPPORT; 1306 1307 /* When device is resetting, firmware is unable to handle 1308 * mailbox. Just record the vlan id, and remove it after 1309 * reset finished. 1310 */ 1311 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) { 1312 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1313 return -EBUSY; 1314 } 1315 1316 msg_data[0] = is_kill; 1317 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1318 memcpy(&msg_data[3], &proto, sizeof(proto)); 1319 /* when remove hw vlan filter failed, record the vlan id, 1320 * and try to remove it from hw later, to be consistence 1321 * with stack. 1322 */ 1323 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1324 HCLGE_MBX_VLAN_FILTER, msg_data, 1325 HCLGEVF_VLAN_MBX_MSG_LEN, true, NULL, 0); 1326 if (is_kill && ret) 1327 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1328 1329 return ret; 1330 } 1331 1332 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1333 { 1334 #define HCLGEVF_MAX_SYNC_COUNT 60 1335 struct hnae3_handle *handle = &hdev->nic; 1336 int ret, sync_cnt = 0; 1337 u16 vlan_id; 1338 1339 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1340 while (vlan_id != VLAN_N_VID) { 1341 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1342 vlan_id, true); 1343 if (ret) 1344 return; 1345 1346 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1347 sync_cnt++; 1348 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1349 return; 1350 1351 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1352 } 1353 } 1354 1355 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1356 { 1357 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1358 u8 msg_data; 1359 1360 msg_data = enable ? 1 : 0; 1361 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1362 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1363 1, false, NULL, 0); 1364 } 1365 1366 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1367 { 1368 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1369 u8 msg_data[2]; 1370 int ret; 1371 1372 memcpy(msg_data, &queue_id, sizeof(queue_id)); 1373 1374 /* disable vf queue before send queue reset msg to PF */ 1375 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1376 if (ret) 1377 return ret; 1378 1379 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1380 sizeof(msg_data), true, NULL, 0); 1381 } 1382 1383 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1384 { 1385 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1386 1387 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu, 1388 sizeof(new_mtu), true, NULL, 0); 1389 } 1390 1391 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1392 enum hnae3_reset_notify_type type) 1393 { 1394 struct hnae3_client *client = hdev->nic_client; 1395 struct hnae3_handle *handle = &hdev->nic; 1396 int ret; 1397 1398 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1399 !client) 1400 return 0; 1401 1402 if (!client->ops->reset_notify) 1403 return -EOPNOTSUPP; 1404 1405 ret = client->ops->reset_notify(handle, type); 1406 if (ret) 1407 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1408 type, ret); 1409 1410 return ret; 1411 } 1412 1413 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1414 { 1415 #define HCLGEVF_RESET_WAIT_US 20000 1416 #define HCLGEVF_RESET_WAIT_CNT 2000 1417 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1418 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1419 1420 u32 val; 1421 int ret; 1422 1423 if (hdev->reset_type == HNAE3_VF_RESET) 1424 ret = readl_poll_timeout(hdev->hw.io_base + 1425 HCLGEVF_VF_RST_ING, val, 1426 !(val & HCLGEVF_VF_RST_ING_BIT), 1427 HCLGEVF_RESET_WAIT_US, 1428 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1429 else 1430 ret = readl_poll_timeout(hdev->hw.io_base + 1431 HCLGEVF_RST_ING, val, 1432 !(val & HCLGEVF_RST_ING_BITS), 1433 HCLGEVF_RESET_WAIT_US, 1434 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1435 1436 /* hardware completion status should be available by this time */ 1437 if (ret) { 1438 dev_err(&hdev->pdev->dev, 1439 "could'nt get reset done status from h/w, timeout!\n"); 1440 return ret; 1441 } 1442 1443 /* we will wait a bit more to let reset of the stack to complete. This 1444 * might happen in case reset assertion was made by PF. Yes, this also 1445 * means we might end up waiting bit more even for VF reset. 1446 */ 1447 msleep(5000); 1448 1449 return 0; 1450 } 1451 1452 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 1453 { 1454 u32 reg_val; 1455 1456 reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); 1457 if (enable) 1458 reg_val |= HCLGEVF_NIC_SW_RST_RDY; 1459 else 1460 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 1461 1462 hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 1463 reg_val); 1464 } 1465 1466 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1467 { 1468 int ret; 1469 1470 /* uninitialize the nic client */ 1471 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1472 if (ret) 1473 return ret; 1474 1475 /* re-initialize the hclge device */ 1476 ret = hclgevf_reset_hdev(hdev); 1477 if (ret) { 1478 dev_err(&hdev->pdev->dev, 1479 "hclge device re-init failed, VF is disabled!\n"); 1480 return ret; 1481 } 1482 1483 /* bring up the nic client again */ 1484 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1485 if (ret) 1486 return ret; 1487 1488 ret = hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); 1489 if (ret) 1490 return ret; 1491 1492 /* clear handshake status with IMP */ 1493 hclgevf_reset_handshake(hdev, false); 1494 1495 /* bring up the nic to enable TX/RX again */ 1496 return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1497 } 1498 1499 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1500 { 1501 #define HCLGEVF_RESET_SYNC_TIME 100 1502 1503 int ret = 0; 1504 1505 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1506 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1507 0, true, NULL, sizeof(u8)); 1508 hdev->rst_stats.vf_func_rst_cnt++; 1509 } 1510 1511 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1512 /* inform hardware that preparatory work is done */ 1513 msleep(HCLGEVF_RESET_SYNC_TIME); 1514 hclgevf_reset_handshake(hdev, true); 1515 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1516 hdev->reset_type, ret); 1517 1518 return ret; 1519 } 1520 1521 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 1522 { 1523 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 1524 hdev->rst_stats.vf_func_rst_cnt); 1525 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1526 hdev->rst_stats.flr_rst_cnt); 1527 dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 1528 hdev->rst_stats.vf_rst_cnt); 1529 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1530 hdev->rst_stats.rst_done_cnt); 1531 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1532 hdev->rst_stats.hw_rst_done_cnt); 1533 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1534 hdev->rst_stats.rst_cnt); 1535 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1536 hdev->rst_stats.rst_fail_cnt); 1537 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1538 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 1539 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 1540 hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STAT_REG)); 1541 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1542 hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); 1543 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1544 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 1545 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1546 } 1547 1548 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1549 { 1550 /* recover handshake status with IMP when reset fail */ 1551 hclgevf_reset_handshake(hdev, true); 1552 hdev->rst_stats.rst_fail_cnt++; 1553 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1554 hdev->rst_stats.rst_fail_cnt); 1555 1556 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1557 set_bit(hdev->reset_type, &hdev->reset_pending); 1558 1559 if (hclgevf_is_reset_pending(hdev)) { 1560 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1561 hclgevf_reset_task_schedule(hdev); 1562 } else { 1563 set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1564 hclgevf_dump_rst_info(hdev); 1565 } 1566 } 1567 1568 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 1569 { 1570 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1571 int ret; 1572 1573 /* Initialize ae_dev reset status as well, in case enet layer wants to 1574 * know if device is undergoing reset 1575 */ 1576 ae_dev->reset_type = hdev->reset_type; 1577 hdev->rst_stats.rst_cnt++; 1578 1579 rtnl_lock(); 1580 /* bring down the nic to stop any ongoing TX/RX */ 1581 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1582 rtnl_unlock(); 1583 if (ret) 1584 return ret; 1585 1586 return hclgevf_reset_prepare_wait(hdev); 1587 } 1588 1589 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 1590 { 1591 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1592 int ret; 1593 1594 hdev->rst_stats.hw_rst_done_cnt++; 1595 1596 rtnl_lock(); 1597 /* now, re-initialize the nic client and ae device */ 1598 ret = hclgevf_reset_stack(hdev); 1599 rtnl_unlock(); 1600 if (ret) { 1601 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1602 return ret; 1603 } 1604 1605 hdev->last_reset_time = jiffies; 1606 ae_dev->reset_type = HNAE3_NONE_RESET; 1607 hdev->rst_stats.rst_done_cnt++; 1608 hdev->rst_stats.rst_fail_cnt = 0; 1609 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1610 1611 return 0; 1612 } 1613 1614 static void hclgevf_reset(struct hclgevf_dev *hdev) 1615 { 1616 if (hclgevf_reset_prepare(hdev)) 1617 goto err_reset; 1618 1619 /* check if VF could successfully fetch the hardware reset completion 1620 * status from the hardware 1621 */ 1622 if (hclgevf_reset_wait(hdev)) { 1623 /* can't do much in this situation, will disable VF */ 1624 dev_err(&hdev->pdev->dev, 1625 "failed to fetch H/W reset completion status\n"); 1626 goto err_reset; 1627 } 1628 1629 if (hclgevf_reset_rebuild(hdev)) 1630 goto err_reset; 1631 1632 return; 1633 1634 err_reset: 1635 hclgevf_reset_err_handle(hdev); 1636 } 1637 1638 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1639 unsigned long *addr) 1640 { 1641 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1642 1643 /* return the highest priority reset level amongst all */ 1644 if (test_bit(HNAE3_VF_RESET, addr)) { 1645 rst_level = HNAE3_VF_RESET; 1646 clear_bit(HNAE3_VF_RESET, addr); 1647 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1648 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1649 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1650 rst_level = HNAE3_VF_FULL_RESET; 1651 clear_bit(HNAE3_VF_FULL_RESET, addr); 1652 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1653 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1654 rst_level = HNAE3_VF_PF_FUNC_RESET; 1655 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1656 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1657 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1658 rst_level = HNAE3_VF_FUNC_RESET; 1659 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1660 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1661 rst_level = HNAE3_FLR_RESET; 1662 clear_bit(HNAE3_FLR_RESET, addr); 1663 } 1664 1665 return rst_level; 1666 } 1667 1668 static void hclgevf_reset_event(struct pci_dev *pdev, 1669 struct hnae3_handle *handle) 1670 { 1671 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1672 struct hclgevf_dev *hdev = ae_dev->priv; 1673 1674 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1675 1676 if (hdev->default_reset_request) 1677 hdev->reset_level = 1678 hclgevf_get_reset_level(hdev, 1679 &hdev->default_reset_request); 1680 else 1681 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1682 1683 /* reset of this VF requested */ 1684 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1685 hclgevf_reset_task_schedule(hdev); 1686 1687 hdev->last_reset_time = jiffies; 1688 } 1689 1690 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1691 enum hnae3_reset_type rst_type) 1692 { 1693 struct hclgevf_dev *hdev = ae_dev->priv; 1694 1695 set_bit(rst_type, &hdev->default_reset_request); 1696 } 1697 1698 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1699 { 1700 writel(en ? 1 : 0, vector->addr); 1701 } 1702 1703 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1704 { 1705 #define HCLGEVF_FLR_RETRY_WAIT_MS 500 1706 #define HCLGEVF_FLR_RETRY_CNT 5 1707 1708 struct hclgevf_dev *hdev = ae_dev->priv; 1709 int retry_cnt = 0; 1710 int ret; 1711 1712 retry: 1713 down(&hdev->reset_sem); 1714 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1715 hdev->reset_type = HNAE3_FLR_RESET; 1716 ret = hclgevf_reset_prepare(hdev); 1717 if (ret) { 1718 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", 1719 ret); 1720 if (hdev->reset_pending || 1721 retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) { 1722 dev_err(&hdev->pdev->dev, 1723 "reset_pending:0x%lx, retry_cnt:%d\n", 1724 hdev->reset_pending, retry_cnt); 1725 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1726 up(&hdev->reset_sem); 1727 msleep(HCLGEVF_FLR_RETRY_WAIT_MS); 1728 goto retry; 1729 } 1730 } 1731 1732 /* disable misc vector before FLR done */ 1733 hclgevf_enable_vector(&hdev->misc_vector, false); 1734 hdev->rst_stats.flr_rst_cnt++; 1735 } 1736 1737 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 1738 { 1739 struct hclgevf_dev *hdev = ae_dev->priv; 1740 int ret; 1741 1742 hclgevf_enable_vector(&hdev->misc_vector, true); 1743 1744 ret = hclgevf_reset_rebuild(hdev); 1745 if (ret) 1746 dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 1747 ret); 1748 1749 hdev->reset_type = HNAE3_NONE_RESET; 1750 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1751 up(&hdev->reset_sem); 1752 } 1753 1754 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1755 { 1756 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1757 1758 return hdev->fw_version; 1759 } 1760 1761 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1762 { 1763 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1764 1765 vector->vector_irq = pci_irq_vector(hdev->pdev, 1766 HCLGEVF_MISC_VECTOR_NUM); 1767 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1768 /* vector status always valid for Vector 0 */ 1769 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1770 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1771 1772 hdev->num_msi_left -= 1; 1773 hdev->num_msi_used += 1; 1774 } 1775 1776 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1777 { 1778 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 1779 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 1780 &hdev->state)) 1781 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 1782 } 1783 1784 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1785 { 1786 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 1787 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 1788 &hdev->state)) 1789 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 1790 } 1791 1792 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 1793 unsigned long delay) 1794 { 1795 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 1796 !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 1797 mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 1798 } 1799 1800 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 1801 { 1802 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 1803 1804 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 1805 return; 1806 1807 down(&hdev->reset_sem); 1808 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1809 1810 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1811 &hdev->reset_state)) { 1812 /* PF has initmated that it is about to reset the hardware. 1813 * We now have to poll & check if hardware has actually 1814 * completed the reset sequence. On hardware reset completion, 1815 * VF needs to reset the client and ae device. 1816 */ 1817 hdev->reset_attempts = 0; 1818 1819 hdev->last_reset_time = jiffies; 1820 while ((hdev->reset_type = 1821 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 1822 != HNAE3_NONE_RESET) 1823 hclgevf_reset(hdev); 1824 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1825 &hdev->reset_state)) { 1826 /* we could be here when either of below happens: 1827 * 1. reset was initiated due to watchdog timeout caused by 1828 * a. IMP was earlier reset and our TX got choked down and 1829 * which resulted in watchdog reacting and inducing VF 1830 * reset. This also means our cmdq would be unreliable. 1831 * b. problem in TX due to other lower layer(example link 1832 * layer not functioning properly etc.) 1833 * 2. VF reset might have been initiated due to some config 1834 * change. 1835 * 1836 * NOTE: Theres no clear way to detect above cases than to react 1837 * to the response of PF for this reset request. PF will ack the 1838 * 1b and 2. cases but we will not get any intimation about 1a 1839 * from PF as cmdq would be in unreliable state i.e. mailbox 1840 * communication between PF and VF would be broken. 1841 * 1842 * if we are never geting into pending state it means either: 1843 * 1. PF is not receiving our request which could be due to IMP 1844 * reset 1845 * 2. PF is screwed 1846 * We cannot do much for 2. but to check first we can try reset 1847 * our PCIe + stack and see if it alleviates the problem. 1848 */ 1849 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 1850 /* prepare for full reset of stack + pcie interface */ 1851 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1852 1853 /* "defer" schedule the reset task again */ 1854 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1855 } else { 1856 hdev->reset_attempts++; 1857 1858 set_bit(hdev->reset_level, &hdev->reset_pending); 1859 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1860 } 1861 hclgevf_reset_task_schedule(hdev); 1862 } 1863 1864 hdev->reset_type = HNAE3_NONE_RESET; 1865 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1866 up(&hdev->reset_sem); 1867 } 1868 1869 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 1870 { 1871 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 1872 return; 1873 1874 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1875 return; 1876 1877 hclgevf_mbx_async_handler(hdev); 1878 1879 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1880 } 1881 1882 static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 1883 { 1884 u8 respmsg; 1885 int ret; 1886 1887 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) 1888 return; 1889 1890 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, 1891 0, false, &respmsg, sizeof(respmsg)); 1892 if (ret) 1893 dev_err(&hdev->pdev->dev, 1894 "VF sends keep alive cmd failed(=%d)\n", ret); 1895 } 1896 1897 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 1898 { 1899 unsigned long delta = round_jiffies_relative(HZ); 1900 struct hnae3_handle *handle = &hdev->nic; 1901 1902 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 1903 delta = jiffies - hdev->last_serv_processed; 1904 1905 if (delta < round_jiffies_relative(HZ)) { 1906 delta = round_jiffies_relative(HZ) - delta; 1907 goto out; 1908 } 1909 } 1910 1911 hdev->serv_processed_cnt++; 1912 if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 1913 hclgevf_keep_alive(hdev); 1914 1915 if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 1916 hdev->last_serv_processed = jiffies; 1917 goto out; 1918 } 1919 1920 if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 1921 hclgevf_tqps_update_stats(handle); 1922 1923 /* request the link status from the PF. PF would be able to tell VF 1924 * about such updates in future so we might remove this later 1925 */ 1926 hclgevf_request_link_info(hdev); 1927 1928 hclgevf_update_link_mode(hdev); 1929 1930 hclgevf_sync_vlan_filter(hdev); 1931 1932 hdev->last_serv_processed = jiffies; 1933 1934 out: 1935 hclgevf_task_schedule(hdev, delta); 1936 } 1937 1938 static void hclgevf_service_task(struct work_struct *work) 1939 { 1940 struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 1941 service_task.work); 1942 1943 hclgevf_reset_service_task(hdev); 1944 hclgevf_mailbox_service_task(hdev); 1945 hclgevf_periodic_service_task(hdev); 1946 1947 /* Handle reset and mbx again in case periodical task delays the 1948 * handling by calling hclgevf_task_schedule() in 1949 * hclgevf_periodic_service_task() 1950 */ 1951 hclgevf_reset_service_task(hdev); 1952 hclgevf_mailbox_service_task(hdev); 1953 } 1954 1955 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1956 { 1957 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1958 } 1959 1960 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1961 u32 *clearval) 1962 { 1963 u32 val, cmdq_stat_reg, rst_ing_reg; 1964 1965 /* fetch the events from their corresponding regs */ 1966 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 1967 HCLGEVF_VECTOR0_CMDQ_STAT_REG); 1968 1969 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 1970 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1971 dev_info(&hdev->pdev->dev, 1972 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1973 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1974 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1975 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1976 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 1977 hdev->rst_stats.vf_rst_cnt++; 1978 /* set up VF hardware reset status, its PF will clear 1979 * this status when PF has initialized done. 1980 */ 1981 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 1982 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 1983 val | HCLGEVF_VF_RST_ING_BIT); 1984 return HCLGEVF_VECTOR0_EVENT_RST; 1985 } 1986 1987 /* check for vector0 mailbox(=CMDQ RX) event source */ 1988 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 1989 /* for revision 0x21, clearing interrupt is writing bit 0 1990 * to the clear register, writing bit 1 means to keep the 1991 * old value. 1992 * for revision 0x20, the clear register is a read & write 1993 * register, so we should just write 0 to the bit we are 1994 * handling, and keep other bits as cmdq_stat_reg. 1995 */ 1996 if (hdev->pdev->revision >= 0x21) 1997 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1998 else 1999 *clearval = cmdq_stat_reg & 2000 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2001 2002 return HCLGEVF_VECTOR0_EVENT_MBX; 2003 } 2004 2005 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 2006 2007 return HCLGEVF_VECTOR0_EVENT_OTHER; 2008 } 2009 2010 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2011 { 2012 enum hclgevf_evt_cause event_cause; 2013 struct hclgevf_dev *hdev = data; 2014 u32 clearval; 2015 2016 hclgevf_enable_vector(&hdev->misc_vector, false); 2017 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2018 2019 switch (event_cause) { 2020 case HCLGEVF_VECTOR0_EVENT_RST: 2021 hclgevf_reset_task_schedule(hdev); 2022 break; 2023 case HCLGEVF_VECTOR0_EVENT_MBX: 2024 hclgevf_mbx_handler(hdev); 2025 break; 2026 default: 2027 break; 2028 } 2029 2030 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 2031 hclgevf_clear_event_cause(hdev, clearval); 2032 hclgevf_enable_vector(&hdev->misc_vector, true); 2033 } 2034 2035 return IRQ_HANDLED; 2036 } 2037 2038 static int hclgevf_configure(struct hclgevf_dev *hdev) 2039 { 2040 int ret; 2041 2042 /* get current port based vlan state from PF */ 2043 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2044 if (ret) 2045 return ret; 2046 2047 /* get queue configuration from PF */ 2048 ret = hclgevf_get_queue_info(hdev); 2049 if (ret) 2050 return ret; 2051 2052 /* get queue depth info from PF */ 2053 ret = hclgevf_get_queue_depth(hdev); 2054 if (ret) 2055 return ret; 2056 2057 ret = hclgevf_get_pf_media_type(hdev); 2058 if (ret) 2059 return ret; 2060 2061 /* get tc configuration from PF */ 2062 return hclgevf_get_tc_info(hdev); 2063 } 2064 2065 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 2066 { 2067 struct pci_dev *pdev = ae_dev->pdev; 2068 struct hclgevf_dev *hdev; 2069 2070 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 2071 if (!hdev) 2072 return -ENOMEM; 2073 2074 hdev->pdev = pdev; 2075 hdev->ae_dev = ae_dev; 2076 ae_dev->priv = hdev; 2077 2078 return 0; 2079 } 2080 2081 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2082 { 2083 struct hnae3_handle *roce = &hdev->roce; 2084 struct hnae3_handle *nic = &hdev->nic; 2085 2086 roce->rinfo.num_vectors = hdev->num_roce_msix; 2087 2088 if (hdev->num_msi_left < roce->rinfo.num_vectors || 2089 hdev->num_msi_left == 0) 2090 return -EINVAL; 2091 2092 roce->rinfo.base_vector = hdev->roce_base_vector; 2093 2094 roce->rinfo.netdev = nic->kinfo.netdev; 2095 roce->rinfo.roce_io_base = hdev->hw.io_base; 2096 2097 roce->pdev = nic->pdev; 2098 roce->ae_algo = nic->ae_algo; 2099 roce->numa_node_mask = nic->numa_node_mask; 2100 2101 return 0; 2102 } 2103 2104 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 2105 { 2106 struct hclgevf_cfg_gro_status_cmd *req; 2107 struct hclgevf_desc desc; 2108 int ret; 2109 2110 if (!hnae3_dev_gro_supported(hdev)) 2111 return 0; 2112 2113 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2114 false); 2115 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2116 2117 req->gro_en = cpu_to_le16(en ? 1 : 0); 2118 2119 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2120 if (ret) 2121 dev_err(&hdev->pdev->dev, 2122 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2123 2124 return ret; 2125 } 2126 2127 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2128 { 2129 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2130 int ret; 2131 u32 i; 2132 2133 rss_cfg->rss_size = hdev->nic.kinfo.rss_size; 2134 2135 if (hdev->pdev->revision >= 0x21) { 2136 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 2137 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2138 HCLGEVF_RSS_KEY_SIZE); 2139 2140 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 2141 rss_cfg->rss_hash_key); 2142 if (ret) 2143 return ret; 2144 2145 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 2146 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2147 rss_cfg->rss_tuple_sets.ipv4_udp_en = 2148 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2149 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 2150 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2151 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 2152 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2153 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 2154 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2155 rss_cfg->rss_tuple_sets.ipv6_udp_en = 2156 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2157 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 2158 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2159 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 2160 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2161 2162 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2163 if (ret) 2164 return ret; 2165 } 2166 2167 /* Initialize RSS indirect table */ 2168 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 2169 rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 2170 2171 ret = hclgevf_set_rss_indir_table(hdev); 2172 if (ret) 2173 return ret; 2174 2175 return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); 2176 } 2177 2178 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2179 { 2180 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2181 false); 2182 } 2183 2184 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2185 { 2186 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2187 2188 unsigned long last = hdev->serv_processed_cnt; 2189 int i = 0; 2190 2191 while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2192 i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2193 last == hdev->serv_processed_cnt) 2194 usleep_range(1, 1); 2195 } 2196 2197 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2198 { 2199 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2200 2201 if (enable) { 2202 hclgevf_task_schedule(hdev, 0); 2203 } else { 2204 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2205 2206 /* flush memory to make sure DOWN is seen by service task */ 2207 smp_mb__before_atomic(); 2208 hclgevf_flush_link_update(hdev); 2209 } 2210 } 2211 2212 static int hclgevf_ae_start(struct hnae3_handle *handle) 2213 { 2214 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2215 2216 hclgevf_reset_tqp_stats(handle); 2217 2218 hclgevf_request_link_info(hdev); 2219 2220 hclgevf_update_link_mode(hdev); 2221 2222 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2223 2224 return 0; 2225 } 2226 2227 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2228 { 2229 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2230 int i; 2231 2232 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2233 2234 if (hdev->reset_type != HNAE3_VF_RESET) 2235 for (i = 0; i < handle->kinfo.num_tqps; i++) 2236 if (hclgevf_reset_tqp(handle, i)) 2237 break; 2238 2239 hclgevf_reset_tqp_stats(handle); 2240 hclgevf_update_link_status(hdev, 0); 2241 } 2242 2243 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2244 { 2245 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2246 u8 msg_data; 2247 2248 msg_data = alive ? 1 : 0; 2249 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE, 2250 0, &msg_data, 1, false, NULL, 0); 2251 } 2252 2253 static int hclgevf_client_start(struct hnae3_handle *handle) 2254 { 2255 int ret; 2256 2257 ret = hclgevf_set_alive(handle, true); 2258 if (ret) 2259 return ret; 2260 2261 return 0; 2262 } 2263 2264 static void hclgevf_client_stop(struct hnae3_handle *handle) 2265 { 2266 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2267 int ret; 2268 2269 ret = hclgevf_set_alive(handle, false); 2270 if (ret) 2271 dev_warn(&hdev->pdev->dev, 2272 "%s failed %d\n", __func__, ret); 2273 } 2274 2275 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2276 { 2277 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2278 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2279 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2280 2281 INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 2282 2283 mutex_init(&hdev->mbx_resp.mbx_mutex); 2284 sema_init(&hdev->reset_sem, 1); 2285 2286 /* bring the device down */ 2287 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2288 } 2289 2290 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2291 { 2292 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2293 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2294 2295 if (hdev->service_task.work.func) 2296 cancel_delayed_work_sync(&hdev->service_task); 2297 2298 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2299 } 2300 2301 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2302 { 2303 struct pci_dev *pdev = hdev->pdev; 2304 int vectors; 2305 int i; 2306 2307 if (hnae3_dev_roce_supported(hdev)) 2308 vectors = pci_alloc_irq_vectors(pdev, 2309 hdev->roce_base_msix_offset + 1, 2310 hdev->num_msi, 2311 PCI_IRQ_MSIX); 2312 else 2313 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2314 hdev->num_msi, 2315 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2316 2317 if (vectors < 0) { 2318 dev_err(&pdev->dev, 2319 "failed(%d) to allocate MSI/MSI-X vectors\n", 2320 vectors); 2321 return vectors; 2322 } 2323 if (vectors < hdev->num_msi) 2324 dev_warn(&hdev->pdev->dev, 2325 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2326 hdev->num_msi, vectors); 2327 2328 hdev->num_msi = vectors; 2329 hdev->num_msi_left = vectors; 2330 2331 hdev->base_msi_vector = pdev->irq; 2332 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2333 2334 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2335 sizeof(u16), GFP_KERNEL); 2336 if (!hdev->vector_status) { 2337 pci_free_irq_vectors(pdev); 2338 return -ENOMEM; 2339 } 2340 2341 for (i = 0; i < hdev->num_msi; i++) 2342 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2343 2344 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2345 sizeof(int), GFP_KERNEL); 2346 if (!hdev->vector_irq) { 2347 devm_kfree(&pdev->dev, hdev->vector_status); 2348 pci_free_irq_vectors(pdev); 2349 return -ENOMEM; 2350 } 2351 2352 return 0; 2353 } 2354 2355 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2356 { 2357 struct pci_dev *pdev = hdev->pdev; 2358 2359 devm_kfree(&pdev->dev, hdev->vector_status); 2360 devm_kfree(&pdev->dev, hdev->vector_irq); 2361 pci_free_irq_vectors(pdev); 2362 } 2363 2364 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2365 { 2366 int ret; 2367 2368 hclgevf_get_misc_vector(hdev); 2369 2370 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2371 HCLGEVF_NAME, pci_name(hdev->pdev)); 2372 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2373 0, hdev->misc_vector.name, hdev); 2374 if (ret) { 2375 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2376 hdev->misc_vector.vector_irq); 2377 return ret; 2378 } 2379 2380 hclgevf_clear_event_cause(hdev, 0); 2381 2382 /* enable misc. vector(vector 0) */ 2383 hclgevf_enable_vector(&hdev->misc_vector, true); 2384 2385 return ret; 2386 } 2387 2388 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2389 { 2390 /* disable misc vector(vector 0) */ 2391 hclgevf_enable_vector(&hdev->misc_vector, false); 2392 synchronize_irq(hdev->misc_vector.vector_irq); 2393 free_irq(hdev->misc_vector.vector_irq, hdev); 2394 hclgevf_free_vector(hdev, 0); 2395 } 2396 2397 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2398 { 2399 struct device *dev = &hdev->pdev->dev; 2400 2401 dev_info(dev, "VF info begin:\n"); 2402 2403 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2404 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2405 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2406 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2407 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2408 dev_info(dev, "PF media type of this VF: %u\n", 2409 hdev->hw.mac.media_type); 2410 2411 dev_info(dev, "VF info end.\n"); 2412 } 2413 2414 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2415 struct hnae3_client *client) 2416 { 2417 struct hclgevf_dev *hdev = ae_dev->priv; 2418 int ret; 2419 2420 ret = client->ops->init_instance(&hdev->nic); 2421 if (ret) 2422 return ret; 2423 2424 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2425 hnae3_set_client_init_flag(client, ae_dev, 1); 2426 2427 if (netif_msg_drv(&hdev->nic)) 2428 hclgevf_info_show(hdev); 2429 2430 return 0; 2431 } 2432 2433 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2434 struct hnae3_client *client) 2435 { 2436 struct hclgevf_dev *hdev = ae_dev->priv; 2437 int ret; 2438 2439 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2440 !hdev->nic_client) 2441 return 0; 2442 2443 ret = hclgevf_init_roce_base_info(hdev); 2444 if (ret) 2445 return ret; 2446 2447 ret = client->ops->init_instance(&hdev->roce); 2448 if (ret) 2449 return ret; 2450 2451 hnae3_set_client_init_flag(client, ae_dev, 1); 2452 2453 return 0; 2454 } 2455 2456 static int hclgevf_init_client_instance(struct hnae3_client *client, 2457 struct hnae3_ae_dev *ae_dev) 2458 { 2459 struct hclgevf_dev *hdev = ae_dev->priv; 2460 int ret; 2461 2462 switch (client->type) { 2463 case HNAE3_CLIENT_KNIC: 2464 hdev->nic_client = client; 2465 hdev->nic.client = client; 2466 2467 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2468 if (ret) 2469 goto clear_nic; 2470 2471 ret = hclgevf_init_roce_client_instance(ae_dev, 2472 hdev->roce_client); 2473 if (ret) 2474 goto clear_roce; 2475 2476 break; 2477 case HNAE3_CLIENT_ROCE: 2478 if (hnae3_dev_roce_supported(hdev)) { 2479 hdev->roce_client = client; 2480 hdev->roce.client = client; 2481 } 2482 2483 ret = hclgevf_init_roce_client_instance(ae_dev, client); 2484 if (ret) 2485 goto clear_roce; 2486 2487 break; 2488 default: 2489 return -EINVAL; 2490 } 2491 2492 return 0; 2493 2494 clear_nic: 2495 hdev->nic_client = NULL; 2496 hdev->nic.client = NULL; 2497 return ret; 2498 clear_roce: 2499 hdev->roce_client = NULL; 2500 hdev->roce.client = NULL; 2501 return ret; 2502 } 2503 2504 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2505 struct hnae3_ae_dev *ae_dev) 2506 { 2507 struct hclgevf_dev *hdev = ae_dev->priv; 2508 2509 /* un-init roce, if it exists */ 2510 if (hdev->roce_client) { 2511 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2512 hdev->roce_client = NULL; 2513 hdev->roce.client = NULL; 2514 } 2515 2516 /* un-init nic/unic, if this was not called by roce client */ 2517 if (client->ops->uninit_instance && hdev->nic_client && 2518 client->type != HNAE3_CLIENT_ROCE) { 2519 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2520 2521 client->ops->uninit_instance(&hdev->nic, 0); 2522 hdev->nic_client = NULL; 2523 hdev->nic.client = NULL; 2524 } 2525 } 2526 2527 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2528 { 2529 struct pci_dev *pdev = hdev->pdev; 2530 struct hclgevf_hw *hw; 2531 int ret; 2532 2533 ret = pci_enable_device(pdev); 2534 if (ret) { 2535 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2536 return ret; 2537 } 2538 2539 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2540 if (ret) { 2541 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2542 goto err_disable_device; 2543 } 2544 2545 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2546 if (ret) { 2547 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2548 goto err_disable_device; 2549 } 2550 2551 pci_set_master(pdev); 2552 hw = &hdev->hw; 2553 hw->hdev = hdev; 2554 hw->io_base = pci_iomap(pdev, 2, 0); 2555 if (!hw->io_base) { 2556 dev_err(&pdev->dev, "can't map configuration register space\n"); 2557 ret = -ENOMEM; 2558 goto err_clr_master; 2559 } 2560 2561 return 0; 2562 2563 err_clr_master: 2564 pci_clear_master(pdev); 2565 pci_release_regions(pdev); 2566 err_disable_device: 2567 pci_disable_device(pdev); 2568 2569 return ret; 2570 } 2571 2572 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2573 { 2574 struct pci_dev *pdev = hdev->pdev; 2575 2576 pci_iounmap(pdev, hdev->hw.io_base); 2577 pci_clear_master(pdev); 2578 pci_release_regions(pdev); 2579 pci_disable_device(pdev); 2580 } 2581 2582 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2583 { 2584 struct hclgevf_query_res_cmd *req; 2585 struct hclgevf_desc desc; 2586 int ret; 2587 2588 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2589 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2590 if (ret) { 2591 dev_err(&hdev->pdev->dev, 2592 "query vf resource failed, ret = %d.\n", ret); 2593 return ret; 2594 } 2595 2596 req = (struct hclgevf_query_res_cmd *)desc.data; 2597 2598 if (hnae3_dev_roce_supported(hdev)) { 2599 hdev->roce_base_msix_offset = 2600 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 2601 HCLGEVF_MSIX_OFT_ROCEE_M, 2602 HCLGEVF_MSIX_OFT_ROCEE_S); 2603 hdev->num_roce_msix = 2604 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 2605 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2606 2607 /* nic's msix numbers is always equals to the roce's. */ 2608 hdev->num_nic_msix = hdev->num_roce_msix; 2609 2610 /* VF should have NIC vectors and Roce vectors, NIC vectors 2611 * are queued before Roce vectors. The offset is fixed to 64. 2612 */ 2613 hdev->num_msi = hdev->num_roce_msix + 2614 hdev->roce_base_msix_offset; 2615 } else { 2616 hdev->num_msi = 2617 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 2618 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2619 2620 hdev->num_nic_msix = hdev->num_msi; 2621 } 2622 2623 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 2624 dev_err(&hdev->pdev->dev, 2625 "Just %u msi resources, not enough for vf(min:2).\n", 2626 hdev->num_nic_msix); 2627 return -EINVAL; 2628 } 2629 2630 return 0; 2631 } 2632 2633 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2634 { 2635 struct pci_dev *pdev = hdev->pdev; 2636 int ret = 0; 2637 2638 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2639 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2640 hclgevf_misc_irq_uninit(hdev); 2641 hclgevf_uninit_msi(hdev); 2642 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2643 } 2644 2645 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2646 pci_set_master(pdev); 2647 ret = hclgevf_init_msi(hdev); 2648 if (ret) { 2649 dev_err(&pdev->dev, 2650 "failed(%d) to init MSI/MSI-X\n", ret); 2651 return ret; 2652 } 2653 2654 ret = hclgevf_misc_irq_init(hdev); 2655 if (ret) { 2656 hclgevf_uninit_msi(hdev); 2657 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2658 ret); 2659 return ret; 2660 } 2661 2662 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2663 } 2664 2665 return ret; 2666 } 2667 2668 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2669 { 2670 struct pci_dev *pdev = hdev->pdev; 2671 int ret; 2672 2673 ret = hclgevf_pci_reset(hdev); 2674 if (ret) { 2675 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2676 return ret; 2677 } 2678 2679 ret = hclgevf_cmd_init(hdev); 2680 if (ret) { 2681 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2682 return ret; 2683 } 2684 2685 ret = hclgevf_rss_init_hw(hdev); 2686 if (ret) { 2687 dev_err(&hdev->pdev->dev, 2688 "failed(%d) to initialize RSS\n", ret); 2689 return ret; 2690 } 2691 2692 ret = hclgevf_config_gro(hdev, true); 2693 if (ret) 2694 return ret; 2695 2696 ret = hclgevf_init_vlan_config(hdev); 2697 if (ret) { 2698 dev_err(&hdev->pdev->dev, 2699 "failed(%d) to initialize VLAN config\n", ret); 2700 return ret; 2701 } 2702 2703 dev_info(&hdev->pdev->dev, "Reset done\n"); 2704 2705 return 0; 2706 } 2707 2708 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2709 { 2710 struct pci_dev *pdev = hdev->pdev; 2711 int ret; 2712 2713 ret = hclgevf_pci_init(hdev); 2714 if (ret) 2715 return ret; 2716 2717 ret = hclgevf_cmd_queue_init(hdev); 2718 if (ret) 2719 goto err_cmd_queue_init; 2720 2721 ret = hclgevf_cmd_init(hdev); 2722 if (ret) 2723 goto err_cmd_init; 2724 2725 /* Get vf resource */ 2726 ret = hclgevf_query_vf_resource(hdev); 2727 if (ret) 2728 goto err_cmd_init; 2729 2730 ret = hclgevf_init_msi(hdev); 2731 if (ret) { 2732 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2733 goto err_cmd_init; 2734 } 2735 2736 hclgevf_state_init(hdev); 2737 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2738 hdev->reset_type = HNAE3_NONE_RESET; 2739 2740 ret = hclgevf_misc_irq_init(hdev); 2741 if (ret) 2742 goto err_misc_irq_init; 2743 2744 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2745 2746 ret = hclgevf_configure(hdev); 2747 if (ret) { 2748 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2749 goto err_config; 2750 } 2751 2752 ret = hclgevf_alloc_tqps(hdev); 2753 if (ret) { 2754 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2755 goto err_config; 2756 } 2757 2758 ret = hclgevf_set_handle_info(hdev); 2759 if (ret) 2760 goto err_config; 2761 2762 ret = hclgevf_config_gro(hdev, true); 2763 if (ret) 2764 goto err_config; 2765 2766 /* Initialize RSS for this VF */ 2767 ret = hclgevf_rss_init_hw(hdev); 2768 if (ret) { 2769 dev_err(&hdev->pdev->dev, 2770 "failed(%d) to initialize RSS\n", ret); 2771 goto err_config; 2772 } 2773 2774 ret = hclgevf_init_vlan_config(hdev); 2775 if (ret) { 2776 dev_err(&hdev->pdev->dev, 2777 "failed(%d) to initialize VLAN config\n", ret); 2778 goto err_config; 2779 } 2780 2781 hdev->last_reset_time = jiffies; 2782 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 2783 HCLGEVF_DRIVER_NAME); 2784 2785 hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 2786 2787 return 0; 2788 2789 err_config: 2790 hclgevf_misc_irq_uninit(hdev); 2791 err_misc_irq_init: 2792 hclgevf_state_uninit(hdev); 2793 hclgevf_uninit_msi(hdev); 2794 err_cmd_init: 2795 hclgevf_cmd_uninit(hdev); 2796 err_cmd_queue_init: 2797 hclgevf_pci_uninit(hdev); 2798 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2799 return ret; 2800 } 2801 2802 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2803 { 2804 hclgevf_state_uninit(hdev); 2805 2806 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2807 hclgevf_misc_irq_uninit(hdev); 2808 hclgevf_uninit_msi(hdev); 2809 } 2810 2811 hclgevf_pci_uninit(hdev); 2812 hclgevf_cmd_uninit(hdev); 2813 } 2814 2815 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2816 { 2817 struct pci_dev *pdev = ae_dev->pdev; 2818 int ret; 2819 2820 ret = hclgevf_alloc_hdev(ae_dev); 2821 if (ret) { 2822 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2823 return ret; 2824 } 2825 2826 ret = hclgevf_init_hdev(ae_dev->priv); 2827 if (ret) { 2828 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2829 return ret; 2830 } 2831 2832 return 0; 2833 } 2834 2835 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2836 { 2837 struct hclgevf_dev *hdev = ae_dev->priv; 2838 2839 hclgevf_uninit_hdev(hdev); 2840 ae_dev->priv = NULL; 2841 } 2842 2843 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2844 { 2845 struct hnae3_handle *nic = &hdev->nic; 2846 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2847 2848 return min_t(u32, hdev->rss_size_max, 2849 hdev->num_tqps / kinfo->num_tc); 2850 } 2851 2852 /** 2853 * hclgevf_get_channels - Get the current channels enabled and max supported. 2854 * @handle: hardware information for network interface 2855 * @ch: ethtool channels structure 2856 * 2857 * We don't support separate tx and rx queues as channels. The other count 2858 * represents how many queues are being used for control. max_combined counts 2859 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2860 * q_vectors since we support a lot more queue pairs than q_vectors. 2861 **/ 2862 static void hclgevf_get_channels(struct hnae3_handle *handle, 2863 struct ethtool_channels *ch) 2864 { 2865 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2866 2867 ch->max_combined = hclgevf_get_max_channels(hdev); 2868 ch->other_count = 0; 2869 ch->max_other = 0; 2870 ch->combined_count = handle->kinfo.rss_size; 2871 } 2872 2873 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2874 u16 *alloc_tqps, u16 *max_rss_size) 2875 { 2876 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2877 2878 *alloc_tqps = hdev->num_tqps; 2879 *max_rss_size = hdev->rss_size_max; 2880 } 2881 2882 static void hclgevf_update_rss_size(struct hnae3_handle *handle, 2883 u32 new_tqps_num) 2884 { 2885 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 2886 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2887 u16 max_rss_size; 2888 2889 kinfo->req_rss_size = new_tqps_num; 2890 2891 max_rss_size = min_t(u16, hdev->rss_size_max, 2892 hdev->num_tqps / kinfo->num_tc); 2893 2894 /* Use the user's configuration when it is not larger than 2895 * max_rss_size, otherwise, use the maximum specification value. 2896 */ 2897 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 2898 kinfo->req_rss_size <= max_rss_size) 2899 kinfo->rss_size = kinfo->req_rss_size; 2900 else if (kinfo->rss_size > max_rss_size || 2901 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 2902 kinfo->rss_size = max_rss_size; 2903 2904 kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; 2905 } 2906 2907 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 2908 bool rxfh_configured) 2909 { 2910 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2911 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 2912 u16 cur_rss_size = kinfo->rss_size; 2913 u16 cur_tqps = kinfo->num_tqps; 2914 u32 *rss_indir; 2915 unsigned int i; 2916 int ret; 2917 2918 hclgevf_update_rss_size(handle, new_tqps_num); 2919 2920 ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); 2921 if (ret) 2922 return ret; 2923 2924 /* RSS indirection table has been configuared by user */ 2925 if (rxfh_configured) 2926 goto out; 2927 2928 /* Reinitializes the rss indirect table according to the new RSS size */ 2929 rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 2930 if (!rss_indir) 2931 return -ENOMEM; 2932 2933 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 2934 rss_indir[i] = i % kinfo->rss_size; 2935 2936 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 2937 if (ret) 2938 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 2939 ret); 2940 2941 kfree(rss_indir); 2942 2943 out: 2944 if (!ret) 2945 dev_info(&hdev->pdev->dev, 2946 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 2947 cur_rss_size, kinfo->rss_size, 2948 cur_tqps, kinfo->rss_size * kinfo->num_tc); 2949 2950 return ret; 2951 } 2952 2953 static int hclgevf_get_status(struct hnae3_handle *handle) 2954 { 2955 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2956 2957 return hdev->hw.mac.link; 2958 } 2959 2960 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2961 u8 *auto_neg, u32 *speed, 2962 u8 *duplex) 2963 { 2964 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2965 2966 if (speed) 2967 *speed = hdev->hw.mac.speed; 2968 if (duplex) 2969 *duplex = hdev->hw.mac.duplex; 2970 if (auto_neg) 2971 *auto_neg = AUTONEG_DISABLE; 2972 } 2973 2974 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 2975 u8 duplex) 2976 { 2977 hdev->hw.mac.speed = speed; 2978 hdev->hw.mac.duplex = duplex; 2979 } 2980 2981 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 2982 { 2983 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2984 2985 return hclgevf_config_gro(hdev, enable); 2986 } 2987 2988 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 2989 u8 *module_type) 2990 { 2991 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2992 2993 if (media_type) 2994 *media_type = hdev->hw.mac.media_type; 2995 2996 if (module_type) 2997 *module_type = hdev->hw.mac.module_type; 2998 } 2999 3000 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 3001 { 3002 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3003 3004 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 3005 } 3006 3007 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 3008 { 3009 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3010 3011 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 3012 } 3013 3014 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 3015 { 3016 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3017 3018 return hdev->rst_stats.hw_rst_done_cnt; 3019 } 3020 3021 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 3022 unsigned long *supported, 3023 unsigned long *advertising) 3024 { 3025 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3026 3027 *supported = hdev->hw.mac.supported; 3028 *advertising = hdev->hw.mac.advertising; 3029 } 3030 3031 #define MAX_SEPARATE_NUM 4 3032 #define SEPARATOR_VALUE 0xFFFFFFFF 3033 #define REG_NUM_PER_LINE 4 3034 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 3035 3036 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 3037 { 3038 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 3039 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3040 3041 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 3042 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 3043 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 3044 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 3045 3046 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 3047 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 3048 } 3049 3050 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 3051 void *data) 3052 { 3053 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3054 int i, j, reg_um, separator_num; 3055 u32 *reg = data; 3056 3057 *version = hdev->fw_version; 3058 3059 /* fetching per-VF registers values from VF PCIe register space */ 3060 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 3061 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3062 for (i = 0; i < reg_um; i++) 3063 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 3064 for (i = 0; i < separator_num; i++) 3065 *reg++ = SEPARATOR_VALUE; 3066 3067 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 3068 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3069 for (i = 0; i < reg_um; i++) 3070 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 3071 for (i = 0; i < separator_num; i++) 3072 *reg++ = SEPARATOR_VALUE; 3073 3074 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 3075 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3076 for (j = 0; j < hdev->num_tqps; j++) { 3077 for (i = 0; i < reg_um; i++) 3078 *reg++ = hclgevf_read_dev(&hdev->hw, 3079 ring_reg_addr_list[i] + 3080 0x200 * j); 3081 for (i = 0; i < separator_num; i++) 3082 *reg++ = SEPARATOR_VALUE; 3083 } 3084 3085 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 3086 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3087 for (j = 0; j < hdev->num_msi_used - 1; j++) { 3088 for (i = 0; i < reg_um; i++) 3089 *reg++ = hclgevf_read_dev(&hdev->hw, 3090 tqp_intr_reg_addr_list[i] + 3091 4 * j); 3092 for (i = 0; i < separator_num; i++) 3093 *reg++ = SEPARATOR_VALUE; 3094 } 3095 } 3096 3097 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 3098 u8 *port_base_vlan_info, u8 data_size) 3099 { 3100 struct hnae3_handle *nic = &hdev->nic; 3101 3102 rtnl_lock(); 3103 hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3104 rtnl_unlock(); 3105 3106 /* send msg to PF and wait update port based vlan info */ 3107 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 3108 HCLGE_MBX_PORT_BASE_VLAN_CFG, 3109 port_base_vlan_info, data_size, 3110 false, NULL, 0); 3111 3112 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3113 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; 3114 else 3115 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3116 3117 rtnl_lock(); 3118 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 3119 rtnl_unlock(); 3120 } 3121 3122 static const struct hnae3_ae_ops hclgevf_ops = { 3123 .init_ae_dev = hclgevf_init_ae_dev, 3124 .uninit_ae_dev = hclgevf_uninit_ae_dev, 3125 .flr_prepare = hclgevf_flr_prepare, 3126 .flr_done = hclgevf_flr_done, 3127 .init_client_instance = hclgevf_init_client_instance, 3128 .uninit_client_instance = hclgevf_uninit_client_instance, 3129 .start = hclgevf_ae_start, 3130 .stop = hclgevf_ae_stop, 3131 .client_start = hclgevf_client_start, 3132 .client_stop = hclgevf_client_stop, 3133 .map_ring_to_vector = hclgevf_map_ring_to_vector, 3134 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3135 .get_vector = hclgevf_get_vector, 3136 .put_vector = hclgevf_put_vector, 3137 .reset_queue = hclgevf_reset_tqp, 3138 .get_mac_addr = hclgevf_get_mac_addr, 3139 .set_mac_addr = hclgevf_set_mac_addr, 3140 .add_uc_addr = hclgevf_add_uc_addr, 3141 .rm_uc_addr = hclgevf_rm_uc_addr, 3142 .add_mc_addr = hclgevf_add_mc_addr, 3143 .rm_mc_addr = hclgevf_rm_mc_addr, 3144 .get_stats = hclgevf_get_stats, 3145 .update_stats = hclgevf_update_stats, 3146 .get_strings = hclgevf_get_strings, 3147 .get_sset_count = hclgevf_get_sset_count, 3148 .get_rss_key_size = hclgevf_get_rss_key_size, 3149 .get_rss_indir_size = hclgevf_get_rss_indir_size, 3150 .get_rss = hclgevf_get_rss, 3151 .set_rss = hclgevf_set_rss, 3152 .get_rss_tuple = hclgevf_get_rss_tuple, 3153 .set_rss_tuple = hclgevf_set_rss_tuple, 3154 .get_tc_size = hclgevf_get_tc_size, 3155 .get_fw_version = hclgevf_get_fw_version, 3156 .set_vlan_filter = hclgevf_set_vlan_filter, 3157 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 3158 .reset_event = hclgevf_reset_event, 3159 .set_default_reset_request = hclgevf_set_def_reset_request, 3160 .set_channels = hclgevf_set_channels, 3161 .get_channels = hclgevf_get_channels, 3162 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 3163 .get_regs_len = hclgevf_get_regs_len, 3164 .get_regs = hclgevf_get_regs, 3165 .get_status = hclgevf_get_status, 3166 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3167 .get_media_type = hclgevf_get_media_type, 3168 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 3169 .ae_dev_resetting = hclgevf_ae_dev_resetting, 3170 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 3171 .set_gro_en = hclgevf_gro_en, 3172 .set_mtu = hclgevf_set_mtu, 3173 .get_global_queue_id = hclgevf_get_qid_global, 3174 .set_timer_task = hclgevf_set_timer_task, 3175 .get_link_mode = hclgevf_get_link_mode, 3176 .set_promisc_mode = hclgevf_set_promisc_mode, 3177 }; 3178 3179 static struct hnae3_ae_algo ae_algovf = { 3180 .ops = &hclgevf_ops, 3181 .pdev_id_table = ae_algovf_pci_tbl, 3182 }; 3183 3184 static int hclgevf_init(void) 3185 { 3186 pr_info("%s is initializing\n", HCLGEVF_NAME); 3187 3188 hclgevf_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGEVF_NAME); 3189 if (!hclgevf_wq) { 3190 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 3191 return -ENOMEM; 3192 } 3193 3194 hnae3_register_ae_algo(&ae_algovf); 3195 3196 return 0; 3197 } 3198 3199 static void hclgevf_exit(void) 3200 { 3201 hnae3_unregister_ae_algo(&ae_algovf); 3202 destroy_workqueue(hclgevf_wq); 3203 } 3204 module_init(hclgevf_init); 3205 module_exit(hclgevf_exit); 3206 3207 MODULE_LICENSE("GPL"); 3208 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3209 MODULE_DESCRIPTION("HCLGEVF Driver"); 3210 MODULE_VERSION(HCLGEVF_MOD_VERSION); 3211