1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 #define HCLGEVF_RESET_MAX_FAIL_CNT 5 15 16 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 17 static struct hnae3_ae_algo ae_algovf; 18 19 static const struct pci_device_id ae_algovf_pci_tbl[] = { 20 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 21 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 22 /* required last entry */ 23 {0, } 24 }; 25 26 static const u8 hclgevf_hash_key[] = { 27 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 28 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 29 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 30 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 31 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 32 }; 33 34 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 35 36 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 37 HCLGEVF_CMDQ_TX_ADDR_H_REG, 38 HCLGEVF_CMDQ_TX_DEPTH_REG, 39 HCLGEVF_CMDQ_TX_TAIL_REG, 40 HCLGEVF_CMDQ_TX_HEAD_REG, 41 HCLGEVF_CMDQ_RX_ADDR_L_REG, 42 HCLGEVF_CMDQ_RX_ADDR_H_REG, 43 HCLGEVF_CMDQ_RX_DEPTH_REG, 44 HCLGEVF_CMDQ_RX_TAIL_REG, 45 HCLGEVF_CMDQ_RX_HEAD_REG, 46 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 47 HCLGEVF_CMDQ_INTR_STS_REG, 48 HCLGEVF_CMDQ_INTR_EN_REG, 49 HCLGEVF_CMDQ_INTR_GEN_REG}; 50 51 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 52 HCLGEVF_RST_ING, 53 HCLGEVF_GRO_EN_REG}; 54 55 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 56 HCLGEVF_RING_RX_ADDR_H_REG, 57 HCLGEVF_RING_RX_BD_NUM_REG, 58 HCLGEVF_RING_RX_BD_LENGTH_REG, 59 HCLGEVF_RING_RX_MERGE_EN_REG, 60 HCLGEVF_RING_RX_TAIL_REG, 61 HCLGEVF_RING_RX_HEAD_REG, 62 HCLGEVF_RING_RX_FBD_NUM_REG, 63 HCLGEVF_RING_RX_OFFSET_REG, 64 HCLGEVF_RING_RX_FBD_OFFSET_REG, 65 HCLGEVF_RING_RX_STASH_REG, 66 HCLGEVF_RING_RX_BD_ERR_REG, 67 HCLGEVF_RING_TX_ADDR_L_REG, 68 HCLGEVF_RING_TX_ADDR_H_REG, 69 HCLGEVF_RING_TX_BD_NUM_REG, 70 HCLGEVF_RING_TX_PRIORITY_REG, 71 HCLGEVF_RING_TX_TC_REG, 72 HCLGEVF_RING_TX_MERGE_EN_REG, 73 HCLGEVF_RING_TX_TAIL_REG, 74 HCLGEVF_RING_TX_HEAD_REG, 75 HCLGEVF_RING_TX_FBD_NUM_REG, 76 HCLGEVF_RING_TX_OFFSET_REG, 77 HCLGEVF_RING_TX_EBD_NUM_REG, 78 HCLGEVF_RING_TX_EBD_OFFSET_REG, 79 HCLGEVF_RING_TX_BD_ERR_REG, 80 HCLGEVF_RING_EN_REG}; 81 82 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 83 HCLGEVF_TQP_INTR_GL0_REG, 84 HCLGEVF_TQP_INTR_GL1_REG, 85 HCLGEVF_TQP_INTR_GL2_REG, 86 HCLGEVF_TQP_INTR_RL_REG}; 87 88 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 89 { 90 if (!handle->client) 91 return container_of(handle, struct hclgevf_dev, nic); 92 else if (handle->client->type == HNAE3_CLIENT_ROCE) 93 return container_of(handle, struct hclgevf_dev, roce); 94 else 95 return container_of(handle, struct hclgevf_dev, nic); 96 } 97 98 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 99 { 100 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 101 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 102 struct hclgevf_desc desc; 103 struct hclgevf_tqp *tqp; 104 int status; 105 int i; 106 107 for (i = 0; i < kinfo->num_tqps; i++) { 108 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 109 hclgevf_cmd_setup_basic_desc(&desc, 110 HCLGEVF_OPC_QUERY_RX_STATUS, 111 true); 112 113 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 114 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 115 if (status) { 116 dev_err(&hdev->pdev->dev, 117 "Query tqp stat fail, status = %d,queue = %d\n", 118 status, i); 119 return status; 120 } 121 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 122 le32_to_cpu(desc.data[1]); 123 124 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 125 true); 126 127 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 128 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 129 if (status) { 130 dev_err(&hdev->pdev->dev, 131 "Query tqp stat fail, status = %d,queue = %d\n", 132 status, i); 133 return status; 134 } 135 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 136 le32_to_cpu(desc.data[1]); 137 } 138 139 return 0; 140 } 141 142 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 143 { 144 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 145 struct hclgevf_tqp *tqp; 146 u64 *buff = data; 147 int i; 148 149 for (i = 0; i < kinfo->num_tqps; i++) { 150 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 151 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 152 } 153 for (i = 0; i < kinfo->num_tqps; i++) { 154 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 155 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 156 } 157 158 return buff; 159 } 160 161 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 162 { 163 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 164 165 return kinfo->num_tqps * 2; 166 } 167 168 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 169 { 170 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 171 u8 *buff = data; 172 int i = 0; 173 174 for (i = 0; i < kinfo->num_tqps; i++) { 175 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 176 struct hclgevf_tqp, q); 177 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 178 tqp->index); 179 buff += ETH_GSTRING_LEN; 180 } 181 182 for (i = 0; i < kinfo->num_tqps; i++) { 183 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 184 struct hclgevf_tqp, q); 185 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 186 tqp->index); 187 buff += ETH_GSTRING_LEN; 188 } 189 190 return buff; 191 } 192 193 static void hclgevf_update_stats(struct hnae3_handle *handle, 194 struct net_device_stats *net_stats) 195 { 196 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 197 int status; 198 199 status = hclgevf_tqps_update_stats(handle); 200 if (status) 201 dev_err(&hdev->pdev->dev, 202 "VF update of TQPS stats fail, status = %d.\n", 203 status); 204 } 205 206 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 207 { 208 if (strset == ETH_SS_TEST) 209 return -EOPNOTSUPP; 210 else if (strset == ETH_SS_STATS) 211 return hclgevf_tqps_get_sset_count(handle, strset); 212 213 return 0; 214 } 215 216 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 217 u8 *data) 218 { 219 u8 *p = (char *)data; 220 221 if (strset == ETH_SS_STATS) 222 p = hclgevf_tqps_get_strings(handle, p); 223 } 224 225 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 226 { 227 hclgevf_tqps_get_stats(handle, data); 228 } 229 230 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 231 { 232 u8 resp_msg; 233 int status; 234 235 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 236 true, &resp_msg, sizeof(resp_msg)); 237 if (status) { 238 dev_err(&hdev->pdev->dev, 239 "VF request to get TC info from PF failed %d", 240 status); 241 return status; 242 } 243 244 hdev->hw_tc_map = resp_msg; 245 246 return 0; 247 } 248 249 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 250 { 251 struct hnae3_handle *nic = &hdev->nic; 252 u8 resp_msg; 253 int ret; 254 255 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 256 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, 257 NULL, 0, true, &resp_msg, sizeof(u8)); 258 if (ret) { 259 dev_err(&hdev->pdev->dev, 260 "VF request to get port based vlan state failed %d", 261 ret); 262 return ret; 263 } 264 265 nic->port_base_vlan_state = resp_msg; 266 267 return 0; 268 } 269 270 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 271 { 272 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 273 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 274 int status; 275 276 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 277 true, resp_msg, 278 HCLGEVF_TQPS_RSS_INFO_LEN); 279 if (status) { 280 dev_err(&hdev->pdev->dev, 281 "VF request to get tqp info from PF failed %d", 282 status); 283 return status; 284 } 285 286 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 287 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 288 memcpy(&hdev->rx_buf_len, &resp_msg[4], sizeof(u16)); 289 290 return 0; 291 } 292 293 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 294 { 295 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 296 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 297 int ret; 298 299 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QDEPTH, 0, NULL, 0, 300 true, resp_msg, 301 HCLGEVF_TQPS_DEPTH_INFO_LEN); 302 if (ret) { 303 dev_err(&hdev->pdev->dev, 304 "VF request to get tqp depth info from PF failed %d", 305 ret); 306 return ret; 307 } 308 309 memcpy(&hdev->num_tx_desc, &resp_msg[0], sizeof(u16)); 310 memcpy(&hdev->num_rx_desc, &resp_msg[2], sizeof(u16)); 311 312 return 0; 313 } 314 315 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 316 { 317 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 318 u8 msg_data[2], resp_data[2]; 319 u16 qid_in_pf = 0; 320 int ret; 321 322 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 323 324 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, 325 sizeof(msg_data), true, resp_data, 326 sizeof(resp_data)); 327 if (!ret) 328 qid_in_pf = *(u16 *)resp_data; 329 330 return qid_in_pf; 331 } 332 333 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 334 { 335 u8 resp_msg[2]; 336 int ret; 337 338 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0, 339 true, resp_msg, sizeof(resp_msg)); 340 if (ret) { 341 dev_err(&hdev->pdev->dev, 342 "VF request to get the pf port media type failed %d", 343 ret); 344 return ret; 345 } 346 347 hdev->hw.mac.media_type = resp_msg[0]; 348 hdev->hw.mac.module_type = resp_msg[1]; 349 350 return 0; 351 } 352 353 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 354 { 355 struct hclgevf_tqp *tqp; 356 int i; 357 358 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 359 sizeof(struct hclgevf_tqp), GFP_KERNEL); 360 if (!hdev->htqp) 361 return -ENOMEM; 362 363 tqp = hdev->htqp; 364 365 for (i = 0; i < hdev->num_tqps; i++) { 366 tqp->dev = &hdev->pdev->dev; 367 tqp->index = i; 368 369 tqp->q.ae_algo = &ae_algovf; 370 tqp->q.buf_size = hdev->rx_buf_len; 371 tqp->q.tx_desc_num = hdev->num_tx_desc; 372 tqp->q.rx_desc_num = hdev->num_rx_desc; 373 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 374 i * HCLGEVF_TQP_REG_SIZE; 375 376 tqp++; 377 } 378 379 return 0; 380 } 381 382 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 383 { 384 struct hnae3_handle *nic = &hdev->nic; 385 struct hnae3_knic_private_info *kinfo; 386 u16 new_tqps = hdev->num_tqps; 387 unsigned int i; 388 389 kinfo = &nic->kinfo; 390 kinfo->num_tc = 0; 391 kinfo->num_tx_desc = hdev->num_tx_desc; 392 kinfo->num_rx_desc = hdev->num_rx_desc; 393 kinfo->rx_buf_len = hdev->rx_buf_len; 394 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 395 if (hdev->hw_tc_map & BIT(i)) 396 kinfo->num_tc++; 397 398 kinfo->rss_size 399 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 400 new_tqps = kinfo->rss_size * kinfo->num_tc; 401 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 402 403 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 404 sizeof(struct hnae3_queue *), GFP_KERNEL); 405 if (!kinfo->tqp) 406 return -ENOMEM; 407 408 for (i = 0; i < kinfo->num_tqps; i++) { 409 hdev->htqp[i].q.handle = &hdev->nic; 410 hdev->htqp[i].q.tqp_index = i; 411 kinfo->tqp[i] = &hdev->htqp[i].q; 412 } 413 414 /* after init the max rss_size and tqps, adjust the default tqp numbers 415 * and rss size with the actual vector numbers 416 */ 417 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 418 kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc, 419 kinfo->rss_size); 420 421 return 0; 422 } 423 424 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 425 { 426 int status; 427 u8 resp_msg; 428 429 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 430 0, false, &resp_msg, sizeof(resp_msg)); 431 if (status) 432 dev_err(&hdev->pdev->dev, 433 "VF failed to fetch link status(%d) from PF", status); 434 } 435 436 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 437 { 438 struct hnae3_handle *rhandle = &hdev->roce; 439 struct hnae3_handle *handle = &hdev->nic; 440 struct hnae3_client *rclient; 441 struct hnae3_client *client; 442 443 client = handle->client; 444 rclient = hdev->roce_client; 445 446 link_state = 447 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 448 449 if (link_state != hdev->hw.mac.link) { 450 client->ops->link_status_change(handle, !!link_state); 451 if (rclient && rclient->ops->link_status_change) 452 rclient->ops->link_status_change(rhandle, !!link_state); 453 hdev->hw.mac.link = link_state; 454 } 455 } 456 457 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 458 { 459 #define HCLGEVF_ADVERTISING 0 460 #define HCLGEVF_SUPPORTED 1 461 u8 send_msg; 462 u8 resp_msg; 463 464 send_msg = HCLGEVF_ADVERTISING; 465 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, 466 &send_msg, sizeof(send_msg), false, 467 &resp_msg, sizeof(resp_msg)); 468 send_msg = HCLGEVF_SUPPORTED; 469 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, 470 &send_msg, sizeof(send_msg), false, 471 &resp_msg, sizeof(resp_msg)); 472 } 473 474 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 475 { 476 struct hnae3_handle *nic = &hdev->nic; 477 int ret; 478 479 nic->ae_algo = &ae_algovf; 480 nic->pdev = hdev->pdev; 481 nic->numa_node_mask = hdev->numa_node_mask; 482 nic->flags |= HNAE3_SUPPORT_VF; 483 484 ret = hclgevf_knic_setup(hdev); 485 if (ret) 486 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 487 ret); 488 return ret; 489 } 490 491 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 492 { 493 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 494 dev_warn(&hdev->pdev->dev, 495 "vector(vector_id %d) has been freed.\n", vector_id); 496 return; 497 } 498 499 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 500 hdev->num_msi_left += 1; 501 hdev->num_msi_used -= 1; 502 } 503 504 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 505 struct hnae3_vector_info *vector_info) 506 { 507 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 508 struct hnae3_vector_info *vector = vector_info; 509 int alloc = 0; 510 int i, j; 511 512 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 513 vector_num = min(hdev->num_msi_left, vector_num); 514 515 for (j = 0; j < vector_num; j++) { 516 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 517 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 518 vector->vector = pci_irq_vector(hdev->pdev, i); 519 vector->io_addr = hdev->hw.io_base + 520 HCLGEVF_VECTOR_REG_BASE + 521 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 522 hdev->vector_status[i] = 0; 523 hdev->vector_irq[i] = vector->vector; 524 525 vector++; 526 alloc++; 527 528 break; 529 } 530 } 531 } 532 hdev->num_msi_left -= alloc; 533 hdev->num_msi_used += alloc; 534 535 return alloc; 536 } 537 538 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 539 { 540 int i; 541 542 for (i = 0; i < hdev->num_msi; i++) 543 if (vector == hdev->vector_irq[i]) 544 return i; 545 546 return -EINVAL; 547 } 548 549 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 550 const u8 hfunc, const u8 *key) 551 { 552 struct hclgevf_rss_config_cmd *req; 553 unsigned int key_offset = 0; 554 struct hclgevf_desc desc; 555 int key_counts; 556 int key_size; 557 int ret; 558 559 key_counts = HCLGEVF_RSS_KEY_SIZE; 560 req = (struct hclgevf_rss_config_cmd *)desc.data; 561 562 while (key_counts) { 563 hclgevf_cmd_setup_basic_desc(&desc, 564 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 565 false); 566 567 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 568 req->hash_config |= 569 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 570 571 key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 572 memcpy(req->hash_key, 573 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 574 575 key_counts -= key_size; 576 key_offset++; 577 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 578 if (ret) { 579 dev_err(&hdev->pdev->dev, 580 "Configure RSS config fail, status = %d\n", 581 ret); 582 return ret; 583 } 584 } 585 586 return 0; 587 } 588 589 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 590 { 591 return HCLGEVF_RSS_KEY_SIZE; 592 } 593 594 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 595 { 596 return HCLGEVF_RSS_IND_TBL_SIZE; 597 } 598 599 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 600 { 601 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 602 struct hclgevf_rss_indirection_table_cmd *req; 603 struct hclgevf_desc desc; 604 int status; 605 int i, j; 606 607 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 608 609 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 610 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 611 false); 612 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 613 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 614 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 615 req->rss_result[j] = 616 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 617 618 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 619 if (status) { 620 dev_err(&hdev->pdev->dev, 621 "VF failed(=%d) to set RSS indirection table\n", 622 status); 623 return status; 624 } 625 } 626 627 return 0; 628 } 629 630 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 631 { 632 struct hclgevf_rss_tc_mode_cmd *req; 633 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 634 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 635 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 636 struct hclgevf_desc desc; 637 u16 roundup_size; 638 int status; 639 unsigned int i; 640 641 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 642 643 roundup_size = roundup_pow_of_two(rss_size); 644 roundup_size = ilog2(roundup_size); 645 646 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 647 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 648 tc_size[i] = roundup_size; 649 tc_offset[i] = rss_size * i; 650 } 651 652 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 653 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 654 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 655 (tc_valid[i] & 0x1)); 656 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 657 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 658 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 659 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 660 } 661 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 662 if (status) 663 dev_err(&hdev->pdev->dev, 664 "VF failed(=%d) to set rss tc mode\n", status); 665 666 return status; 667 } 668 669 /* for revision 0x20, vf shared the same rss config with pf */ 670 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 671 { 672 #define HCLGEVF_RSS_MBX_RESP_LEN 8 673 674 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 675 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 676 u16 msg_num, hash_key_index; 677 u8 index; 678 int ret; 679 680 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 681 HCLGEVF_RSS_MBX_RESP_LEN; 682 for (index = 0; index < msg_num; index++) { 683 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_RSS_KEY, 0, 684 &index, sizeof(index), 685 true, resp_msg, 686 HCLGEVF_RSS_MBX_RESP_LEN); 687 if (ret) { 688 dev_err(&hdev->pdev->dev, 689 "VF get rss hash key from PF failed, ret=%d", 690 ret); 691 return ret; 692 } 693 694 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 695 if (index == msg_num - 1) 696 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 697 &resp_msg[0], 698 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 699 else 700 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 701 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 702 } 703 704 return 0; 705 } 706 707 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 708 u8 *hfunc) 709 { 710 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 711 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 712 int i, ret; 713 714 if (handle->pdev->revision >= 0x21) { 715 /* Get hash algorithm */ 716 if (hfunc) { 717 switch (rss_cfg->hash_algo) { 718 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 719 *hfunc = ETH_RSS_HASH_TOP; 720 break; 721 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 722 *hfunc = ETH_RSS_HASH_XOR; 723 break; 724 default: 725 *hfunc = ETH_RSS_HASH_UNKNOWN; 726 break; 727 } 728 } 729 730 /* Get the RSS Key required by the user */ 731 if (key) 732 memcpy(key, rss_cfg->rss_hash_key, 733 HCLGEVF_RSS_KEY_SIZE); 734 } else { 735 if (hfunc) 736 *hfunc = ETH_RSS_HASH_TOP; 737 if (key) { 738 ret = hclgevf_get_rss_hash_key(hdev); 739 if (ret) 740 return ret; 741 memcpy(key, rss_cfg->rss_hash_key, 742 HCLGEVF_RSS_KEY_SIZE); 743 } 744 } 745 746 if (indir) 747 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 748 indir[i] = rss_cfg->rss_indirection_tbl[i]; 749 750 return 0; 751 } 752 753 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 754 const u8 *key, const u8 hfunc) 755 { 756 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 757 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 758 int ret, i; 759 760 if (handle->pdev->revision >= 0x21) { 761 /* Set the RSS Hash Key if specififed by the user */ 762 if (key) { 763 switch (hfunc) { 764 case ETH_RSS_HASH_TOP: 765 rss_cfg->hash_algo = 766 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 767 break; 768 case ETH_RSS_HASH_XOR: 769 rss_cfg->hash_algo = 770 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 771 break; 772 case ETH_RSS_HASH_NO_CHANGE: 773 break; 774 default: 775 return -EINVAL; 776 } 777 778 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 779 key); 780 if (ret) 781 return ret; 782 783 /* Update the shadow RSS key with user specified qids */ 784 memcpy(rss_cfg->rss_hash_key, key, 785 HCLGEVF_RSS_KEY_SIZE); 786 } 787 } 788 789 /* update the shadow RSS table with user specified qids */ 790 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 791 rss_cfg->rss_indirection_tbl[i] = indir[i]; 792 793 /* update the hardware */ 794 return hclgevf_set_rss_indir_table(hdev); 795 } 796 797 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 798 { 799 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 800 801 if (nfc->data & RXH_L4_B_2_3) 802 hash_sets |= HCLGEVF_D_PORT_BIT; 803 else 804 hash_sets &= ~HCLGEVF_D_PORT_BIT; 805 806 if (nfc->data & RXH_IP_SRC) 807 hash_sets |= HCLGEVF_S_IP_BIT; 808 else 809 hash_sets &= ~HCLGEVF_S_IP_BIT; 810 811 if (nfc->data & RXH_IP_DST) 812 hash_sets |= HCLGEVF_D_IP_BIT; 813 else 814 hash_sets &= ~HCLGEVF_D_IP_BIT; 815 816 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 817 hash_sets |= HCLGEVF_V_TAG_BIT; 818 819 return hash_sets; 820 } 821 822 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 823 struct ethtool_rxnfc *nfc) 824 { 825 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 826 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 827 struct hclgevf_rss_input_tuple_cmd *req; 828 struct hclgevf_desc desc; 829 u8 tuple_sets; 830 int ret; 831 832 if (handle->pdev->revision == 0x20) 833 return -EOPNOTSUPP; 834 835 if (nfc->data & 836 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 837 return -EINVAL; 838 839 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 840 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 841 842 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 843 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 844 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 845 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 846 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 847 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 848 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 849 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 850 851 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 852 switch (nfc->flow_type) { 853 case TCP_V4_FLOW: 854 req->ipv4_tcp_en = tuple_sets; 855 break; 856 case TCP_V6_FLOW: 857 req->ipv6_tcp_en = tuple_sets; 858 break; 859 case UDP_V4_FLOW: 860 req->ipv4_udp_en = tuple_sets; 861 break; 862 case UDP_V6_FLOW: 863 req->ipv6_udp_en = tuple_sets; 864 break; 865 case SCTP_V4_FLOW: 866 req->ipv4_sctp_en = tuple_sets; 867 break; 868 case SCTP_V6_FLOW: 869 if ((nfc->data & RXH_L4_B_0_1) || 870 (nfc->data & RXH_L4_B_2_3)) 871 return -EINVAL; 872 873 req->ipv6_sctp_en = tuple_sets; 874 break; 875 case IPV4_FLOW: 876 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 877 break; 878 case IPV6_FLOW: 879 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 880 break; 881 default: 882 return -EINVAL; 883 } 884 885 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 886 if (ret) { 887 dev_err(&hdev->pdev->dev, 888 "Set rss tuple fail, status = %d\n", ret); 889 return ret; 890 } 891 892 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 893 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 894 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 895 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 896 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 897 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 898 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 899 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 900 return 0; 901 } 902 903 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 904 struct ethtool_rxnfc *nfc) 905 { 906 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 907 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 908 u8 tuple_sets; 909 910 if (handle->pdev->revision == 0x20) 911 return -EOPNOTSUPP; 912 913 nfc->data = 0; 914 915 switch (nfc->flow_type) { 916 case TCP_V4_FLOW: 917 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 918 break; 919 case UDP_V4_FLOW: 920 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 921 break; 922 case TCP_V6_FLOW: 923 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 924 break; 925 case UDP_V6_FLOW: 926 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 927 break; 928 case SCTP_V4_FLOW: 929 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 930 break; 931 case SCTP_V6_FLOW: 932 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 933 break; 934 case IPV4_FLOW: 935 case IPV6_FLOW: 936 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 937 break; 938 default: 939 return -EINVAL; 940 } 941 942 if (!tuple_sets) 943 return 0; 944 945 if (tuple_sets & HCLGEVF_D_PORT_BIT) 946 nfc->data |= RXH_L4_B_2_3; 947 if (tuple_sets & HCLGEVF_S_PORT_BIT) 948 nfc->data |= RXH_L4_B_0_1; 949 if (tuple_sets & HCLGEVF_D_IP_BIT) 950 nfc->data |= RXH_IP_DST; 951 if (tuple_sets & HCLGEVF_S_IP_BIT) 952 nfc->data |= RXH_IP_SRC; 953 954 return 0; 955 } 956 957 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 958 struct hclgevf_rss_cfg *rss_cfg) 959 { 960 struct hclgevf_rss_input_tuple_cmd *req; 961 struct hclgevf_desc desc; 962 int ret; 963 964 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 965 966 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 967 968 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 969 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 970 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 971 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 972 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 973 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 974 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 975 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 976 977 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 978 if (ret) 979 dev_err(&hdev->pdev->dev, 980 "Configure rss input fail, status = %d\n", ret); 981 return ret; 982 } 983 984 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 985 { 986 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 987 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 988 989 return rss_cfg->rss_size; 990 } 991 992 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 993 int vector_id, 994 struct hnae3_ring_chain_node *ring_chain) 995 { 996 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 997 struct hnae3_ring_chain_node *node; 998 struct hclge_mbx_vf_to_pf_cmd *req; 999 struct hclgevf_desc desc; 1000 int i = 0; 1001 int status; 1002 u8 type; 1003 1004 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 1005 type = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 1006 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1007 1008 for (node = ring_chain; node; node = node->next) { 1009 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1010 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 1011 1012 if (i == 0) { 1013 hclgevf_cmd_setup_basic_desc(&desc, 1014 HCLGEVF_OPC_MBX_VF_TO_PF, 1015 false); 1016 req->msg[0] = type; 1017 req->msg[1] = vector_id; 1018 } 1019 1020 req->msg[idx_offset] = 1021 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1022 req->msg[idx_offset + 1] = node->tqp_index; 1023 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 1024 HNAE3_RING_GL_IDX_M, 1025 HNAE3_RING_GL_IDX_S); 1026 1027 i++; 1028 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 1029 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 1030 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 1031 !node->next) { 1032 req->msg[2] = i; 1033 1034 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1035 if (status) { 1036 dev_err(&hdev->pdev->dev, 1037 "Map TQP fail, status is %d.\n", 1038 status); 1039 return status; 1040 } 1041 i = 0; 1042 hclgevf_cmd_setup_basic_desc(&desc, 1043 HCLGEVF_OPC_MBX_VF_TO_PF, 1044 false); 1045 req->msg[0] = type; 1046 req->msg[1] = vector_id; 1047 } 1048 } 1049 1050 return 0; 1051 } 1052 1053 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1054 struct hnae3_ring_chain_node *ring_chain) 1055 { 1056 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1057 int vector_id; 1058 1059 vector_id = hclgevf_get_vector_index(hdev, vector); 1060 if (vector_id < 0) { 1061 dev_err(&handle->pdev->dev, 1062 "Get vector index fail. ret =%d\n", vector_id); 1063 return vector_id; 1064 } 1065 1066 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1067 } 1068 1069 static int hclgevf_unmap_ring_from_vector( 1070 struct hnae3_handle *handle, 1071 int vector, 1072 struct hnae3_ring_chain_node *ring_chain) 1073 { 1074 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1075 int ret, vector_id; 1076 1077 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1078 return 0; 1079 1080 vector_id = hclgevf_get_vector_index(hdev, vector); 1081 if (vector_id < 0) { 1082 dev_err(&handle->pdev->dev, 1083 "Get vector index fail. ret =%d\n", vector_id); 1084 return vector_id; 1085 } 1086 1087 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1088 if (ret) 1089 dev_err(&handle->pdev->dev, 1090 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1091 vector_id, 1092 ret); 1093 1094 return ret; 1095 } 1096 1097 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1098 { 1099 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1100 int vector_id; 1101 1102 vector_id = hclgevf_get_vector_index(hdev, vector); 1103 if (vector_id < 0) { 1104 dev_err(&handle->pdev->dev, 1105 "hclgevf_put_vector get vector index fail. ret =%d\n", 1106 vector_id); 1107 return vector_id; 1108 } 1109 1110 hclgevf_free_vector(hdev, vector_id); 1111 1112 return 0; 1113 } 1114 1115 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1116 bool en_uc_pmc, bool en_mc_pmc, 1117 bool en_bc_pmc) 1118 { 1119 struct hclge_mbx_vf_to_pf_cmd *req; 1120 struct hclgevf_desc desc; 1121 int ret; 1122 1123 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 1124 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 1125 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 1126 req->msg[1] = en_bc_pmc ? 1 : 0; 1127 req->msg[2] = en_uc_pmc ? 1 : 0; 1128 req->msg[3] = en_mc_pmc ? 1 : 0; 1129 1130 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1131 if (ret) 1132 dev_err(&hdev->pdev->dev, 1133 "Set promisc mode fail, status is %d.\n", ret); 1134 1135 return ret; 1136 } 1137 1138 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 1139 bool en_mc_pmc) 1140 { 1141 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1142 struct pci_dev *pdev = hdev->pdev; 1143 bool en_bc_pmc; 1144 1145 en_bc_pmc = pdev->revision != 0x20; 1146 1147 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 1148 en_bc_pmc); 1149 } 1150 1151 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, 1152 int stream_id, bool enable) 1153 { 1154 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1155 struct hclgevf_desc desc; 1156 int status; 1157 1158 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1159 1160 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1161 false); 1162 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1163 req->stream_id = cpu_to_le16(stream_id); 1164 if (enable) 1165 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1166 1167 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1168 if (status) 1169 dev_err(&hdev->pdev->dev, 1170 "TQP enable fail, status =%d.\n", status); 1171 1172 return status; 1173 } 1174 1175 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1176 { 1177 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1178 struct hclgevf_tqp *tqp; 1179 int i; 1180 1181 for (i = 0; i < kinfo->num_tqps; i++) { 1182 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1183 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1184 } 1185 } 1186 1187 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 1188 { 1189 u8 host_mac[ETH_ALEN]; 1190 int status; 1191 1192 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MAC_ADDR, 0, NULL, 0, 1193 true, host_mac, ETH_ALEN); 1194 if (status) { 1195 dev_err(&hdev->pdev->dev, 1196 "fail to get VF MAC from host %d", status); 1197 return status; 1198 } 1199 1200 ether_addr_copy(p, host_mac); 1201 1202 return 0; 1203 } 1204 1205 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1206 { 1207 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1208 u8 host_mac_addr[ETH_ALEN]; 1209 1210 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 1211 return; 1212 1213 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 1214 if (hdev->has_pf_mac) 1215 ether_addr_copy(p, host_mac_addr); 1216 else 1217 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1218 } 1219 1220 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1221 bool is_first) 1222 { 1223 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1224 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1225 u8 *new_mac_addr = (u8 *)p; 1226 u8 msg_data[ETH_ALEN * 2]; 1227 u16 subcode; 1228 int status; 1229 1230 ether_addr_copy(msg_data, new_mac_addr); 1231 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 1232 1233 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 1234 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1235 1236 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1237 subcode, msg_data, sizeof(msg_data), 1238 true, NULL, 0); 1239 if (!status) 1240 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1241 1242 return status; 1243 } 1244 1245 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1246 const unsigned char *addr) 1247 { 1248 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1249 1250 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1251 HCLGE_MBX_MAC_VLAN_UC_ADD, 1252 addr, ETH_ALEN, false, NULL, 0); 1253 } 1254 1255 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1256 const unsigned char *addr) 1257 { 1258 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1259 1260 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1261 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1262 addr, ETH_ALEN, false, NULL, 0); 1263 } 1264 1265 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1266 const unsigned char *addr) 1267 { 1268 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1269 1270 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1271 HCLGE_MBX_MAC_VLAN_MC_ADD, 1272 addr, ETH_ALEN, false, NULL, 0); 1273 } 1274 1275 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1276 const unsigned char *addr) 1277 { 1278 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1279 1280 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1281 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1282 addr, ETH_ALEN, false, NULL, 0); 1283 } 1284 1285 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1286 __be16 proto, u16 vlan_id, 1287 bool is_kill) 1288 { 1289 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1290 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1291 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1292 int ret; 1293 1294 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1295 return -EINVAL; 1296 1297 if (proto != htons(ETH_P_8021Q)) 1298 return -EPROTONOSUPPORT; 1299 1300 /* When device is resetting, firmware is unable to handle 1301 * mailbox. Just record the vlan id, and remove it after 1302 * reset finished. 1303 */ 1304 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) { 1305 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1306 return -EBUSY; 1307 } 1308 1309 msg_data[0] = is_kill; 1310 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1311 memcpy(&msg_data[3], &proto, sizeof(proto)); 1312 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1313 HCLGE_MBX_VLAN_FILTER, msg_data, 1314 HCLGEVF_VLAN_MBX_MSG_LEN, true, NULL, 0); 1315 1316 /* when remove hw vlan filter failed, record the vlan id, 1317 * and try to remove it from hw later, to be consistence 1318 * with stack. 1319 */ 1320 if (is_kill && ret) 1321 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1322 1323 return ret; 1324 } 1325 1326 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1327 { 1328 #define HCLGEVF_MAX_SYNC_COUNT 60 1329 struct hnae3_handle *handle = &hdev->nic; 1330 int ret, sync_cnt = 0; 1331 u16 vlan_id; 1332 1333 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1334 while (vlan_id != VLAN_N_VID) { 1335 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1336 vlan_id, true); 1337 if (ret) 1338 return; 1339 1340 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1341 sync_cnt++; 1342 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1343 return; 1344 1345 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1346 } 1347 } 1348 1349 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1350 { 1351 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1352 u8 msg_data; 1353 1354 msg_data = enable ? 1 : 0; 1355 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1356 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1357 1, false, NULL, 0); 1358 } 1359 1360 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1361 { 1362 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1363 u8 msg_data[2]; 1364 int ret; 1365 1366 memcpy(msg_data, &queue_id, sizeof(queue_id)); 1367 1368 /* disable vf queue before send queue reset msg to PF */ 1369 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1370 if (ret) 1371 return ret; 1372 1373 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1374 sizeof(msg_data), true, NULL, 0); 1375 } 1376 1377 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1378 { 1379 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1380 1381 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu, 1382 sizeof(new_mtu), true, NULL, 0); 1383 } 1384 1385 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1386 enum hnae3_reset_notify_type type) 1387 { 1388 struct hnae3_client *client = hdev->nic_client; 1389 struct hnae3_handle *handle = &hdev->nic; 1390 int ret; 1391 1392 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1393 !client) 1394 return 0; 1395 1396 if (!client->ops->reset_notify) 1397 return -EOPNOTSUPP; 1398 1399 ret = client->ops->reset_notify(handle, type); 1400 if (ret) 1401 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1402 type, ret); 1403 1404 return ret; 1405 } 1406 1407 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 1408 { 1409 struct hclgevf_dev *hdev = ae_dev->priv; 1410 1411 set_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1412 } 1413 1414 static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev, 1415 unsigned long delay_us, 1416 unsigned long wait_cnt) 1417 { 1418 unsigned long cnt = 0; 1419 1420 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && 1421 cnt++ < wait_cnt) 1422 usleep_range(delay_us, delay_us * 2); 1423 1424 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { 1425 dev_err(&hdev->pdev->dev, 1426 "flr wait timeout\n"); 1427 return -ETIMEDOUT; 1428 } 1429 1430 return 0; 1431 } 1432 1433 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1434 { 1435 #define HCLGEVF_RESET_WAIT_US 20000 1436 #define HCLGEVF_RESET_WAIT_CNT 2000 1437 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1438 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1439 1440 u32 val; 1441 int ret; 1442 1443 if (hdev->reset_type == HNAE3_FLR_RESET) 1444 return hclgevf_flr_poll_timeout(hdev, 1445 HCLGEVF_RESET_WAIT_US, 1446 HCLGEVF_RESET_WAIT_CNT); 1447 else if (hdev->reset_type == HNAE3_VF_RESET) 1448 ret = readl_poll_timeout(hdev->hw.io_base + 1449 HCLGEVF_VF_RST_ING, val, 1450 !(val & HCLGEVF_VF_RST_ING_BIT), 1451 HCLGEVF_RESET_WAIT_US, 1452 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1453 else 1454 ret = readl_poll_timeout(hdev->hw.io_base + 1455 HCLGEVF_RST_ING, val, 1456 !(val & HCLGEVF_RST_ING_BITS), 1457 HCLGEVF_RESET_WAIT_US, 1458 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1459 1460 /* hardware completion status should be available by this time */ 1461 if (ret) { 1462 dev_err(&hdev->pdev->dev, 1463 "could'nt get reset done status from h/w, timeout!\n"); 1464 return ret; 1465 } 1466 1467 /* we will wait a bit more to let reset of the stack to complete. This 1468 * might happen in case reset assertion was made by PF. Yes, this also 1469 * means we might end up waiting bit more even for VF reset. 1470 */ 1471 msleep(5000); 1472 1473 return 0; 1474 } 1475 1476 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 1477 { 1478 u32 reg_val; 1479 1480 reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); 1481 if (enable) 1482 reg_val |= HCLGEVF_NIC_SW_RST_RDY; 1483 else 1484 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 1485 1486 hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 1487 reg_val); 1488 } 1489 1490 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1491 { 1492 int ret; 1493 1494 /* uninitialize the nic client */ 1495 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1496 if (ret) 1497 return ret; 1498 1499 /* re-initialize the hclge device */ 1500 ret = hclgevf_reset_hdev(hdev); 1501 if (ret) { 1502 dev_err(&hdev->pdev->dev, 1503 "hclge device re-init failed, VF is disabled!\n"); 1504 return ret; 1505 } 1506 1507 /* bring up the nic client again */ 1508 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1509 if (ret) 1510 return ret; 1511 1512 ret = hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); 1513 if (ret) 1514 return ret; 1515 1516 /* clear handshake status with IMP */ 1517 hclgevf_reset_handshake(hdev, false); 1518 1519 return 0; 1520 } 1521 1522 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1523 { 1524 #define HCLGEVF_RESET_SYNC_TIME 100 1525 1526 int ret = 0; 1527 1528 switch (hdev->reset_type) { 1529 case HNAE3_VF_FUNC_RESET: 1530 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1531 0, true, NULL, sizeof(u8)); 1532 hdev->rst_stats.vf_func_rst_cnt++; 1533 break; 1534 case HNAE3_FLR_RESET: 1535 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1536 hdev->rst_stats.flr_rst_cnt++; 1537 break; 1538 default: 1539 break; 1540 } 1541 1542 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1543 /* inform hardware that preparatory work is done */ 1544 msleep(HCLGEVF_RESET_SYNC_TIME); 1545 hclgevf_reset_handshake(hdev, true); 1546 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1547 hdev->reset_type, ret); 1548 1549 return ret; 1550 } 1551 1552 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 1553 { 1554 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 1555 hdev->rst_stats.vf_func_rst_cnt); 1556 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1557 hdev->rst_stats.flr_rst_cnt); 1558 dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 1559 hdev->rst_stats.vf_rst_cnt); 1560 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1561 hdev->rst_stats.rst_done_cnt); 1562 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1563 hdev->rst_stats.hw_rst_done_cnt); 1564 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1565 hdev->rst_stats.rst_cnt); 1566 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1567 hdev->rst_stats.rst_fail_cnt); 1568 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1569 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 1570 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 1571 hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STAT_REG)); 1572 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1573 hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); 1574 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1575 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 1576 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1577 } 1578 1579 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1580 { 1581 /* recover handshake status with IMP when reset fail */ 1582 hclgevf_reset_handshake(hdev, true); 1583 hdev->rst_stats.rst_fail_cnt++; 1584 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1585 hdev->rst_stats.rst_fail_cnt); 1586 1587 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1588 set_bit(hdev->reset_type, &hdev->reset_pending); 1589 1590 if (hclgevf_is_reset_pending(hdev)) { 1591 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1592 hclgevf_reset_task_schedule(hdev); 1593 } else { 1594 hclgevf_dump_rst_info(hdev); 1595 } 1596 } 1597 1598 static int hclgevf_reset(struct hclgevf_dev *hdev) 1599 { 1600 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1601 int ret; 1602 1603 /* Initialize ae_dev reset status as well, in case enet layer wants to 1604 * know if device is undergoing reset 1605 */ 1606 ae_dev->reset_type = hdev->reset_type; 1607 hdev->rst_stats.rst_cnt++; 1608 rtnl_lock(); 1609 1610 /* bring down the nic to stop any ongoing TX/RX */ 1611 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1612 if (ret) 1613 goto err_reset_lock; 1614 1615 rtnl_unlock(); 1616 1617 ret = hclgevf_reset_prepare_wait(hdev); 1618 if (ret) 1619 goto err_reset; 1620 1621 /* check if VF could successfully fetch the hardware reset completion 1622 * status from the hardware 1623 */ 1624 ret = hclgevf_reset_wait(hdev); 1625 if (ret) { 1626 /* can't do much in this situation, will disable VF */ 1627 dev_err(&hdev->pdev->dev, 1628 "VF failed(=%d) to fetch H/W reset completion status\n", 1629 ret); 1630 goto err_reset; 1631 } 1632 1633 hdev->rst_stats.hw_rst_done_cnt++; 1634 1635 rtnl_lock(); 1636 1637 /* now, re-initialize the nic client and ae device */ 1638 ret = hclgevf_reset_stack(hdev); 1639 if (ret) { 1640 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1641 goto err_reset_lock; 1642 } 1643 1644 /* bring up the nic to enable TX/RX again */ 1645 ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1646 if (ret) 1647 goto err_reset_lock; 1648 1649 rtnl_unlock(); 1650 1651 hdev->last_reset_time = jiffies; 1652 ae_dev->reset_type = HNAE3_NONE_RESET; 1653 hdev->rst_stats.rst_done_cnt++; 1654 hdev->rst_stats.rst_fail_cnt = 0; 1655 1656 return ret; 1657 err_reset_lock: 1658 rtnl_unlock(); 1659 err_reset: 1660 hclgevf_reset_err_handle(hdev); 1661 1662 return ret; 1663 } 1664 1665 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1666 unsigned long *addr) 1667 { 1668 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1669 1670 /* return the highest priority reset level amongst all */ 1671 if (test_bit(HNAE3_VF_RESET, addr)) { 1672 rst_level = HNAE3_VF_RESET; 1673 clear_bit(HNAE3_VF_RESET, addr); 1674 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1675 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1676 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1677 rst_level = HNAE3_VF_FULL_RESET; 1678 clear_bit(HNAE3_VF_FULL_RESET, addr); 1679 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1680 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1681 rst_level = HNAE3_VF_PF_FUNC_RESET; 1682 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1683 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1684 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1685 rst_level = HNAE3_VF_FUNC_RESET; 1686 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1687 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1688 rst_level = HNAE3_FLR_RESET; 1689 clear_bit(HNAE3_FLR_RESET, addr); 1690 } 1691 1692 return rst_level; 1693 } 1694 1695 static void hclgevf_reset_event(struct pci_dev *pdev, 1696 struct hnae3_handle *handle) 1697 { 1698 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1699 struct hclgevf_dev *hdev = ae_dev->priv; 1700 1701 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1702 1703 if (hdev->default_reset_request) 1704 hdev->reset_level = 1705 hclgevf_get_reset_level(hdev, 1706 &hdev->default_reset_request); 1707 else 1708 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1709 1710 /* reset of this VF requested */ 1711 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1712 hclgevf_reset_task_schedule(hdev); 1713 1714 hdev->last_reset_time = jiffies; 1715 } 1716 1717 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1718 enum hnae3_reset_type rst_type) 1719 { 1720 struct hclgevf_dev *hdev = ae_dev->priv; 1721 1722 set_bit(rst_type, &hdev->default_reset_request); 1723 } 1724 1725 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1726 { 1727 #define HCLGEVF_FLR_WAIT_MS 100 1728 #define HCLGEVF_FLR_WAIT_CNT 50 1729 struct hclgevf_dev *hdev = ae_dev->priv; 1730 int cnt = 0; 1731 1732 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1733 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1734 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); 1735 hclgevf_reset_event(hdev->pdev, NULL); 1736 1737 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && 1738 cnt++ < HCLGEVF_FLR_WAIT_CNT) 1739 msleep(HCLGEVF_FLR_WAIT_MS); 1740 1741 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) 1742 dev_err(&hdev->pdev->dev, 1743 "flr wait down timeout: %d\n", cnt); 1744 } 1745 1746 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1747 { 1748 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1749 1750 return hdev->fw_version; 1751 } 1752 1753 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1754 { 1755 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1756 1757 vector->vector_irq = pci_irq_vector(hdev->pdev, 1758 HCLGEVF_MISC_VECTOR_NUM); 1759 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1760 /* vector status always valid for Vector 0 */ 1761 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1762 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1763 1764 hdev->num_msi_left -= 1; 1765 hdev->num_msi_used += 1; 1766 } 1767 1768 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1769 { 1770 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1771 !test_bit(HCLGEVF_STATE_REMOVING, &hdev->state)) { 1772 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1773 schedule_work(&hdev->rst_service_task); 1774 } 1775 } 1776 1777 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1778 { 1779 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1780 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1781 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1782 schedule_work(&hdev->mbx_service_task); 1783 } 1784 } 1785 1786 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1787 { 1788 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1789 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1790 schedule_work(&hdev->service_task); 1791 } 1792 1793 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1794 { 1795 /* if we have any pending mailbox event then schedule the mbx task */ 1796 if (hdev->mbx_event_pending) 1797 hclgevf_mbx_task_schedule(hdev); 1798 1799 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1800 hclgevf_reset_task_schedule(hdev); 1801 } 1802 1803 static void hclgevf_service_timer(struct timer_list *t) 1804 { 1805 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1806 1807 mod_timer(&hdev->service_timer, jiffies + 1808 HCLGEVF_GENERAL_TASK_INTERVAL * HZ); 1809 1810 hdev->stats_timer++; 1811 hclgevf_task_schedule(hdev); 1812 } 1813 1814 static void hclgevf_reset_service_task(struct work_struct *work) 1815 { 1816 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 1817 1818 struct hclgevf_dev *hdev = 1819 container_of(work, struct hclgevf_dev, rst_service_task); 1820 int ret; 1821 1822 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1823 return; 1824 1825 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1826 1827 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1828 &hdev->reset_state)) { 1829 /* PF has initmated that it is about to reset the hardware. 1830 * We now have to poll & check if hardware has actually 1831 * completed the reset sequence. On hardware reset completion, 1832 * VF needs to reset the client and ae device. 1833 */ 1834 hdev->reset_attempts = 0; 1835 1836 hdev->last_reset_time = jiffies; 1837 while ((hdev->reset_type = 1838 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 1839 != HNAE3_NONE_RESET) { 1840 ret = hclgevf_reset(hdev); 1841 if (ret) 1842 dev_err(&hdev->pdev->dev, 1843 "VF stack reset failed %d.\n", ret); 1844 } 1845 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1846 &hdev->reset_state)) { 1847 /* we could be here when either of below happens: 1848 * 1. reset was initiated due to watchdog timeout caused by 1849 * a. IMP was earlier reset and our TX got choked down and 1850 * which resulted in watchdog reacting and inducing VF 1851 * reset. This also means our cmdq would be unreliable. 1852 * b. problem in TX due to other lower layer(example link 1853 * layer not functioning properly etc.) 1854 * 2. VF reset might have been initiated due to some config 1855 * change. 1856 * 1857 * NOTE: Theres no clear way to detect above cases than to react 1858 * to the response of PF for this reset request. PF will ack the 1859 * 1b and 2. cases but we will not get any intimation about 1a 1860 * from PF as cmdq would be in unreliable state i.e. mailbox 1861 * communication between PF and VF would be broken. 1862 * 1863 * if we are never geting into pending state it means either: 1864 * 1. PF is not receiving our request which could be due to IMP 1865 * reset 1866 * 2. PF is screwed 1867 * We cannot do much for 2. but to check first we can try reset 1868 * our PCIe + stack and see if it alleviates the problem. 1869 */ 1870 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 1871 /* prepare for full reset of stack + pcie interface */ 1872 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1873 1874 /* "defer" schedule the reset task again */ 1875 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1876 } else { 1877 hdev->reset_attempts++; 1878 1879 set_bit(hdev->reset_level, &hdev->reset_pending); 1880 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1881 } 1882 hclgevf_reset_task_schedule(hdev); 1883 } 1884 1885 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1886 } 1887 1888 static void hclgevf_mailbox_service_task(struct work_struct *work) 1889 { 1890 struct hclgevf_dev *hdev; 1891 1892 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1893 1894 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1895 return; 1896 1897 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1898 1899 hclgevf_mbx_async_handler(hdev); 1900 1901 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1902 } 1903 1904 static void hclgevf_keep_alive_timer(struct timer_list *t) 1905 { 1906 struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); 1907 1908 schedule_work(&hdev->keep_alive_task); 1909 mod_timer(&hdev->keep_alive_timer, jiffies + 1910 HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ); 1911 } 1912 1913 static void hclgevf_keep_alive_task(struct work_struct *work) 1914 { 1915 struct hclgevf_dev *hdev; 1916 u8 respmsg; 1917 int ret; 1918 1919 hdev = container_of(work, struct hclgevf_dev, keep_alive_task); 1920 1921 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) 1922 return; 1923 1924 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, 1925 0, false, &respmsg, sizeof(respmsg)); 1926 if (ret) 1927 dev_err(&hdev->pdev->dev, 1928 "VF sends keep alive cmd failed(=%d)\n", ret); 1929 } 1930 1931 static void hclgevf_service_task(struct work_struct *work) 1932 { 1933 struct hnae3_handle *handle; 1934 struct hclgevf_dev *hdev; 1935 1936 hdev = container_of(work, struct hclgevf_dev, service_task); 1937 handle = &hdev->nic; 1938 1939 if (hdev->stats_timer >= HCLGEVF_STATS_TIMER_INTERVAL) { 1940 hclgevf_tqps_update_stats(handle); 1941 hdev->stats_timer = 0; 1942 } 1943 1944 /* request the link status from the PF. PF would be able to tell VF 1945 * about such updates in future so we might remove this later 1946 */ 1947 hclgevf_request_link_info(hdev); 1948 1949 hclgevf_update_link_mode(hdev); 1950 1951 hclgevf_sync_vlan_filter(hdev); 1952 1953 hclgevf_deferred_task_schedule(hdev); 1954 1955 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1956 } 1957 1958 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1959 { 1960 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1961 } 1962 1963 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1964 u32 *clearval) 1965 { 1966 u32 val, cmdq_stat_reg, rst_ing_reg; 1967 1968 /* fetch the events from their corresponding regs */ 1969 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 1970 HCLGEVF_VECTOR0_CMDQ_STAT_REG); 1971 1972 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 1973 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1974 dev_info(&hdev->pdev->dev, 1975 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1976 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1977 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1978 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1979 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 1980 hdev->rst_stats.vf_rst_cnt++; 1981 /* set up VF hardware reset status, its PF will clear 1982 * this status when PF has initialized done. 1983 */ 1984 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 1985 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 1986 val | HCLGEVF_VF_RST_ING_BIT); 1987 return HCLGEVF_VECTOR0_EVENT_RST; 1988 } 1989 1990 /* check for vector0 mailbox(=CMDQ RX) event source */ 1991 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 1992 /* for revision 0x21, clearing interrupt is writing bit 0 1993 * to the clear register, writing bit 1 means to keep the 1994 * old value. 1995 * for revision 0x20, the clear register is a read & write 1996 * register, so we should just write 0 to the bit we are 1997 * handling, and keep other bits as cmdq_stat_reg. 1998 */ 1999 if (hdev->pdev->revision >= 0x21) 2000 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2001 else 2002 *clearval = cmdq_stat_reg & 2003 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2004 2005 return HCLGEVF_VECTOR0_EVENT_MBX; 2006 } 2007 2008 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 2009 2010 return HCLGEVF_VECTOR0_EVENT_OTHER; 2011 } 2012 2013 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 2014 { 2015 writel(en ? 1 : 0, vector->addr); 2016 } 2017 2018 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2019 { 2020 enum hclgevf_evt_cause event_cause; 2021 struct hclgevf_dev *hdev = data; 2022 u32 clearval; 2023 2024 hclgevf_enable_vector(&hdev->misc_vector, false); 2025 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2026 2027 switch (event_cause) { 2028 case HCLGEVF_VECTOR0_EVENT_RST: 2029 hclgevf_reset_task_schedule(hdev); 2030 break; 2031 case HCLGEVF_VECTOR0_EVENT_MBX: 2032 hclgevf_mbx_handler(hdev); 2033 break; 2034 default: 2035 break; 2036 } 2037 2038 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 2039 hclgevf_clear_event_cause(hdev, clearval); 2040 hclgevf_enable_vector(&hdev->misc_vector, true); 2041 } 2042 2043 return IRQ_HANDLED; 2044 } 2045 2046 static int hclgevf_configure(struct hclgevf_dev *hdev) 2047 { 2048 int ret; 2049 2050 /* get current port based vlan state from PF */ 2051 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2052 if (ret) 2053 return ret; 2054 2055 /* get queue configuration from PF */ 2056 ret = hclgevf_get_queue_info(hdev); 2057 if (ret) 2058 return ret; 2059 2060 /* get queue depth info from PF */ 2061 ret = hclgevf_get_queue_depth(hdev); 2062 if (ret) 2063 return ret; 2064 2065 ret = hclgevf_get_pf_media_type(hdev); 2066 if (ret) 2067 return ret; 2068 2069 /* get tc configuration from PF */ 2070 return hclgevf_get_tc_info(hdev); 2071 } 2072 2073 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 2074 { 2075 struct pci_dev *pdev = ae_dev->pdev; 2076 struct hclgevf_dev *hdev; 2077 2078 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 2079 if (!hdev) 2080 return -ENOMEM; 2081 2082 hdev->pdev = pdev; 2083 hdev->ae_dev = ae_dev; 2084 ae_dev->priv = hdev; 2085 2086 return 0; 2087 } 2088 2089 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2090 { 2091 struct hnae3_handle *roce = &hdev->roce; 2092 struct hnae3_handle *nic = &hdev->nic; 2093 2094 roce->rinfo.num_vectors = hdev->num_roce_msix; 2095 2096 if (hdev->num_msi_left < roce->rinfo.num_vectors || 2097 hdev->num_msi_left == 0) 2098 return -EINVAL; 2099 2100 roce->rinfo.base_vector = hdev->roce_base_vector; 2101 2102 roce->rinfo.netdev = nic->kinfo.netdev; 2103 roce->rinfo.roce_io_base = hdev->hw.io_base; 2104 2105 roce->pdev = nic->pdev; 2106 roce->ae_algo = nic->ae_algo; 2107 roce->numa_node_mask = nic->numa_node_mask; 2108 2109 return 0; 2110 } 2111 2112 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 2113 { 2114 struct hclgevf_cfg_gro_status_cmd *req; 2115 struct hclgevf_desc desc; 2116 int ret; 2117 2118 if (!hnae3_dev_gro_supported(hdev)) 2119 return 0; 2120 2121 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2122 false); 2123 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2124 2125 req->gro_en = cpu_to_le16(en ? 1 : 0); 2126 2127 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2128 if (ret) 2129 dev_err(&hdev->pdev->dev, 2130 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2131 2132 return ret; 2133 } 2134 2135 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2136 { 2137 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2138 int ret; 2139 u32 i; 2140 2141 rss_cfg->rss_size = hdev->nic.kinfo.rss_size; 2142 2143 if (hdev->pdev->revision >= 0x21) { 2144 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 2145 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2146 HCLGEVF_RSS_KEY_SIZE); 2147 2148 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 2149 rss_cfg->rss_hash_key); 2150 if (ret) 2151 return ret; 2152 2153 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 2154 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2155 rss_cfg->rss_tuple_sets.ipv4_udp_en = 2156 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2157 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 2158 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2159 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 2160 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2161 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 2162 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2163 rss_cfg->rss_tuple_sets.ipv6_udp_en = 2164 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2165 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 2166 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2167 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 2168 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2169 2170 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2171 if (ret) 2172 return ret; 2173 } 2174 2175 /* Initialize RSS indirect table */ 2176 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 2177 rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 2178 2179 ret = hclgevf_set_rss_indir_table(hdev); 2180 if (ret) 2181 return ret; 2182 2183 return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); 2184 } 2185 2186 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2187 { 2188 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2189 false); 2190 } 2191 2192 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2193 { 2194 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2195 2196 if (enable) { 2197 mod_timer(&hdev->service_timer, jiffies + HZ); 2198 } else { 2199 del_timer_sync(&hdev->service_timer); 2200 cancel_work_sync(&hdev->service_task); 2201 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 2202 } 2203 } 2204 2205 static int hclgevf_ae_start(struct hnae3_handle *handle) 2206 { 2207 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2208 2209 hclgevf_reset_tqp_stats(handle); 2210 2211 hclgevf_request_link_info(hdev); 2212 2213 hclgevf_update_link_mode(hdev); 2214 2215 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2216 2217 return 0; 2218 } 2219 2220 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2221 { 2222 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2223 int i; 2224 2225 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2226 2227 if (hdev->reset_type != HNAE3_VF_RESET) 2228 for (i = 0; i < handle->kinfo.num_tqps; i++) 2229 if (hclgevf_reset_tqp(handle, i)) 2230 break; 2231 2232 hclgevf_reset_tqp_stats(handle); 2233 hclgevf_update_link_status(hdev, 0); 2234 } 2235 2236 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2237 { 2238 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2239 u8 msg_data; 2240 2241 msg_data = alive ? 1 : 0; 2242 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE, 2243 0, &msg_data, 1, false, NULL, 0); 2244 } 2245 2246 static int hclgevf_client_start(struct hnae3_handle *handle) 2247 { 2248 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2249 int ret; 2250 2251 ret = hclgevf_set_alive(handle, true); 2252 if (ret) 2253 return ret; 2254 2255 mod_timer(&hdev->keep_alive_timer, jiffies + 2256 HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ); 2257 2258 return 0; 2259 } 2260 2261 static void hclgevf_client_stop(struct hnae3_handle *handle) 2262 { 2263 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2264 int ret; 2265 2266 ret = hclgevf_set_alive(handle, false); 2267 if (ret) 2268 dev_warn(&hdev->pdev->dev, 2269 "%s failed %d\n", __func__, ret); 2270 2271 del_timer_sync(&hdev->keep_alive_timer); 2272 cancel_work_sync(&hdev->keep_alive_task); 2273 } 2274 2275 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2276 { 2277 /* setup tasks for the MBX */ 2278 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 2279 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2280 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2281 2282 /* setup tasks for service timer */ 2283 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 2284 2285 INIT_WORK(&hdev->service_task, hclgevf_service_task); 2286 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 2287 2288 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 2289 2290 mutex_init(&hdev->mbx_resp.mbx_mutex); 2291 2292 /* bring the device down */ 2293 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2294 } 2295 2296 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2297 { 2298 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2299 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2300 2301 if (hdev->keep_alive_timer.function) 2302 del_timer_sync(&hdev->keep_alive_timer); 2303 if (hdev->keep_alive_task.func) 2304 cancel_work_sync(&hdev->keep_alive_task); 2305 if (hdev->service_timer.function) 2306 del_timer_sync(&hdev->service_timer); 2307 if (hdev->service_task.func) 2308 cancel_work_sync(&hdev->service_task); 2309 if (hdev->mbx_service_task.func) 2310 cancel_work_sync(&hdev->mbx_service_task); 2311 if (hdev->rst_service_task.func) 2312 cancel_work_sync(&hdev->rst_service_task); 2313 2314 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2315 } 2316 2317 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2318 { 2319 struct pci_dev *pdev = hdev->pdev; 2320 int vectors; 2321 int i; 2322 2323 if (hnae3_dev_roce_supported(hdev)) 2324 vectors = pci_alloc_irq_vectors(pdev, 2325 hdev->roce_base_msix_offset + 1, 2326 hdev->num_msi, 2327 PCI_IRQ_MSIX); 2328 else 2329 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2330 hdev->num_msi, 2331 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2332 2333 if (vectors < 0) { 2334 dev_err(&pdev->dev, 2335 "failed(%d) to allocate MSI/MSI-X vectors\n", 2336 vectors); 2337 return vectors; 2338 } 2339 if (vectors < hdev->num_msi) 2340 dev_warn(&hdev->pdev->dev, 2341 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2342 hdev->num_msi, vectors); 2343 2344 hdev->num_msi = vectors; 2345 hdev->num_msi_left = vectors; 2346 2347 hdev->base_msi_vector = pdev->irq; 2348 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2349 2350 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2351 sizeof(u16), GFP_KERNEL); 2352 if (!hdev->vector_status) { 2353 pci_free_irq_vectors(pdev); 2354 return -ENOMEM; 2355 } 2356 2357 for (i = 0; i < hdev->num_msi; i++) 2358 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2359 2360 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2361 sizeof(int), GFP_KERNEL); 2362 if (!hdev->vector_irq) { 2363 devm_kfree(&pdev->dev, hdev->vector_status); 2364 pci_free_irq_vectors(pdev); 2365 return -ENOMEM; 2366 } 2367 2368 return 0; 2369 } 2370 2371 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2372 { 2373 struct pci_dev *pdev = hdev->pdev; 2374 2375 devm_kfree(&pdev->dev, hdev->vector_status); 2376 devm_kfree(&pdev->dev, hdev->vector_irq); 2377 pci_free_irq_vectors(pdev); 2378 } 2379 2380 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2381 { 2382 int ret; 2383 2384 hclgevf_get_misc_vector(hdev); 2385 2386 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2387 0, "hclgevf_cmd", hdev); 2388 if (ret) { 2389 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2390 hdev->misc_vector.vector_irq); 2391 return ret; 2392 } 2393 2394 hclgevf_clear_event_cause(hdev, 0); 2395 2396 /* enable misc. vector(vector 0) */ 2397 hclgevf_enable_vector(&hdev->misc_vector, true); 2398 2399 return ret; 2400 } 2401 2402 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2403 { 2404 /* disable misc vector(vector 0) */ 2405 hclgevf_enable_vector(&hdev->misc_vector, false); 2406 synchronize_irq(hdev->misc_vector.vector_irq); 2407 free_irq(hdev->misc_vector.vector_irq, hdev); 2408 hclgevf_free_vector(hdev, 0); 2409 } 2410 2411 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2412 { 2413 struct device *dev = &hdev->pdev->dev; 2414 2415 dev_info(dev, "VF info begin:\n"); 2416 2417 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2418 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2419 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2420 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2421 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2422 dev_info(dev, "PF media type of this VF: %u\n", 2423 hdev->hw.mac.media_type); 2424 2425 dev_info(dev, "VF info end.\n"); 2426 } 2427 2428 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2429 struct hnae3_client *client) 2430 { 2431 struct hclgevf_dev *hdev = ae_dev->priv; 2432 int ret; 2433 2434 ret = client->ops->init_instance(&hdev->nic); 2435 if (ret) 2436 return ret; 2437 2438 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2439 hnae3_set_client_init_flag(client, ae_dev, 1); 2440 2441 if (netif_msg_drv(&hdev->nic)) 2442 hclgevf_info_show(hdev); 2443 2444 return 0; 2445 } 2446 2447 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2448 struct hnae3_client *client) 2449 { 2450 struct hclgevf_dev *hdev = ae_dev->priv; 2451 int ret; 2452 2453 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2454 !hdev->nic_client) 2455 return 0; 2456 2457 ret = hclgevf_init_roce_base_info(hdev); 2458 if (ret) 2459 return ret; 2460 2461 ret = client->ops->init_instance(&hdev->roce); 2462 if (ret) 2463 return ret; 2464 2465 hnae3_set_client_init_flag(client, ae_dev, 1); 2466 2467 return 0; 2468 } 2469 2470 static int hclgevf_init_client_instance(struct hnae3_client *client, 2471 struct hnae3_ae_dev *ae_dev) 2472 { 2473 struct hclgevf_dev *hdev = ae_dev->priv; 2474 int ret; 2475 2476 switch (client->type) { 2477 case HNAE3_CLIENT_KNIC: 2478 hdev->nic_client = client; 2479 hdev->nic.client = client; 2480 2481 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2482 if (ret) 2483 goto clear_nic; 2484 2485 ret = hclgevf_init_roce_client_instance(ae_dev, 2486 hdev->roce_client); 2487 if (ret) 2488 goto clear_roce; 2489 2490 break; 2491 case HNAE3_CLIENT_ROCE: 2492 if (hnae3_dev_roce_supported(hdev)) { 2493 hdev->roce_client = client; 2494 hdev->roce.client = client; 2495 } 2496 2497 ret = hclgevf_init_roce_client_instance(ae_dev, client); 2498 if (ret) 2499 goto clear_roce; 2500 2501 break; 2502 default: 2503 return -EINVAL; 2504 } 2505 2506 return 0; 2507 2508 clear_nic: 2509 hdev->nic_client = NULL; 2510 hdev->nic.client = NULL; 2511 return ret; 2512 clear_roce: 2513 hdev->roce_client = NULL; 2514 hdev->roce.client = NULL; 2515 return ret; 2516 } 2517 2518 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2519 struct hnae3_ae_dev *ae_dev) 2520 { 2521 struct hclgevf_dev *hdev = ae_dev->priv; 2522 2523 /* un-init roce, if it exists */ 2524 if (hdev->roce_client) { 2525 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2526 hdev->roce_client = NULL; 2527 hdev->roce.client = NULL; 2528 } 2529 2530 /* un-init nic/unic, if this was not called by roce client */ 2531 if (client->ops->uninit_instance && hdev->nic_client && 2532 client->type != HNAE3_CLIENT_ROCE) { 2533 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2534 2535 client->ops->uninit_instance(&hdev->nic, 0); 2536 hdev->nic_client = NULL; 2537 hdev->nic.client = NULL; 2538 } 2539 } 2540 2541 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2542 { 2543 struct pci_dev *pdev = hdev->pdev; 2544 struct hclgevf_hw *hw; 2545 int ret; 2546 2547 ret = pci_enable_device(pdev); 2548 if (ret) { 2549 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2550 return ret; 2551 } 2552 2553 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2554 if (ret) { 2555 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2556 goto err_disable_device; 2557 } 2558 2559 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2560 if (ret) { 2561 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2562 goto err_disable_device; 2563 } 2564 2565 pci_set_master(pdev); 2566 hw = &hdev->hw; 2567 hw->hdev = hdev; 2568 hw->io_base = pci_iomap(pdev, 2, 0); 2569 if (!hw->io_base) { 2570 dev_err(&pdev->dev, "can't map configuration register space\n"); 2571 ret = -ENOMEM; 2572 goto err_clr_master; 2573 } 2574 2575 return 0; 2576 2577 err_clr_master: 2578 pci_clear_master(pdev); 2579 pci_release_regions(pdev); 2580 err_disable_device: 2581 pci_disable_device(pdev); 2582 2583 return ret; 2584 } 2585 2586 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2587 { 2588 struct pci_dev *pdev = hdev->pdev; 2589 2590 pci_iounmap(pdev, hdev->hw.io_base); 2591 pci_clear_master(pdev); 2592 pci_release_regions(pdev); 2593 pci_disable_device(pdev); 2594 } 2595 2596 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2597 { 2598 struct hclgevf_query_res_cmd *req; 2599 struct hclgevf_desc desc; 2600 int ret; 2601 2602 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2603 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2604 if (ret) { 2605 dev_err(&hdev->pdev->dev, 2606 "query vf resource failed, ret = %d.\n", ret); 2607 return ret; 2608 } 2609 2610 req = (struct hclgevf_query_res_cmd *)desc.data; 2611 2612 if (hnae3_dev_roce_supported(hdev)) { 2613 hdev->roce_base_msix_offset = 2614 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 2615 HCLGEVF_MSIX_OFT_ROCEE_M, 2616 HCLGEVF_MSIX_OFT_ROCEE_S); 2617 hdev->num_roce_msix = 2618 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2619 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2620 2621 /* nic's msix numbers is always equals to the roce's. */ 2622 hdev->num_nic_msix = hdev->num_roce_msix; 2623 2624 /* VF should have NIC vectors and Roce vectors, NIC vectors 2625 * are queued before Roce vectors. The offset is fixed to 64. 2626 */ 2627 hdev->num_msi = hdev->num_roce_msix + 2628 hdev->roce_base_msix_offset; 2629 } else { 2630 hdev->num_msi = 2631 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2632 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2633 2634 hdev->num_nic_msix = hdev->num_msi; 2635 } 2636 2637 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 2638 dev_err(&hdev->pdev->dev, 2639 "Just %u msi resources, not enough for vf(min:2).\n", 2640 hdev->num_nic_msix); 2641 return -EINVAL; 2642 } 2643 2644 return 0; 2645 } 2646 2647 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2648 { 2649 struct pci_dev *pdev = hdev->pdev; 2650 int ret = 0; 2651 2652 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2653 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2654 hclgevf_misc_irq_uninit(hdev); 2655 hclgevf_uninit_msi(hdev); 2656 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2657 } 2658 2659 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2660 pci_set_master(pdev); 2661 ret = hclgevf_init_msi(hdev); 2662 if (ret) { 2663 dev_err(&pdev->dev, 2664 "failed(%d) to init MSI/MSI-X\n", ret); 2665 return ret; 2666 } 2667 2668 ret = hclgevf_misc_irq_init(hdev); 2669 if (ret) { 2670 hclgevf_uninit_msi(hdev); 2671 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2672 ret); 2673 return ret; 2674 } 2675 2676 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2677 } 2678 2679 return ret; 2680 } 2681 2682 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2683 { 2684 struct pci_dev *pdev = hdev->pdev; 2685 int ret; 2686 2687 ret = hclgevf_pci_reset(hdev); 2688 if (ret) { 2689 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2690 return ret; 2691 } 2692 2693 ret = hclgevf_cmd_init(hdev); 2694 if (ret) { 2695 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2696 return ret; 2697 } 2698 2699 ret = hclgevf_rss_init_hw(hdev); 2700 if (ret) { 2701 dev_err(&hdev->pdev->dev, 2702 "failed(%d) to initialize RSS\n", ret); 2703 return ret; 2704 } 2705 2706 ret = hclgevf_config_gro(hdev, true); 2707 if (ret) 2708 return ret; 2709 2710 ret = hclgevf_init_vlan_config(hdev); 2711 if (ret) { 2712 dev_err(&hdev->pdev->dev, 2713 "failed(%d) to initialize VLAN config\n", ret); 2714 return ret; 2715 } 2716 2717 dev_info(&hdev->pdev->dev, "Reset done\n"); 2718 2719 return 0; 2720 } 2721 2722 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2723 { 2724 struct pci_dev *pdev = hdev->pdev; 2725 int ret; 2726 2727 ret = hclgevf_pci_init(hdev); 2728 if (ret) { 2729 dev_err(&pdev->dev, "PCI initialization failed\n"); 2730 return ret; 2731 } 2732 2733 ret = hclgevf_cmd_queue_init(hdev); 2734 if (ret) { 2735 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); 2736 goto err_cmd_queue_init; 2737 } 2738 2739 ret = hclgevf_cmd_init(hdev); 2740 if (ret) 2741 goto err_cmd_init; 2742 2743 /* Get vf resource */ 2744 ret = hclgevf_query_vf_resource(hdev); 2745 if (ret) { 2746 dev_err(&hdev->pdev->dev, 2747 "Query vf status error, ret = %d.\n", ret); 2748 goto err_cmd_init; 2749 } 2750 2751 ret = hclgevf_init_msi(hdev); 2752 if (ret) { 2753 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2754 goto err_cmd_init; 2755 } 2756 2757 hclgevf_state_init(hdev); 2758 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2759 2760 ret = hclgevf_misc_irq_init(hdev); 2761 if (ret) { 2762 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2763 ret); 2764 goto err_misc_irq_init; 2765 } 2766 2767 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2768 2769 ret = hclgevf_configure(hdev); 2770 if (ret) { 2771 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2772 goto err_config; 2773 } 2774 2775 ret = hclgevf_alloc_tqps(hdev); 2776 if (ret) { 2777 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2778 goto err_config; 2779 } 2780 2781 ret = hclgevf_set_handle_info(hdev); 2782 if (ret) { 2783 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2784 goto err_config; 2785 } 2786 2787 ret = hclgevf_config_gro(hdev, true); 2788 if (ret) 2789 goto err_config; 2790 2791 /* Initialize RSS for this VF */ 2792 ret = hclgevf_rss_init_hw(hdev); 2793 if (ret) { 2794 dev_err(&hdev->pdev->dev, 2795 "failed(%d) to initialize RSS\n", ret); 2796 goto err_config; 2797 } 2798 2799 ret = hclgevf_init_vlan_config(hdev); 2800 if (ret) { 2801 dev_err(&hdev->pdev->dev, 2802 "failed(%d) to initialize VLAN config\n", ret); 2803 goto err_config; 2804 } 2805 2806 hdev->last_reset_time = jiffies; 2807 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 2808 HCLGEVF_DRIVER_NAME); 2809 2810 return 0; 2811 2812 err_config: 2813 hclgevf_misc_irq_uninit(hdev); 2814 err_misc_irq_init: 2815 hclgevf_state_uninit(hdev); 2816 hclgevf_uninit_msi(hdev); 2817 err_cmd_init: 2818 hclgevf_cmd_uninit(hdev); 2819 err_cmd_queue_init: 2820 hclgevf_pci_uninit(hdev); 2821 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2822 return ret; 2823 } 2824 2825 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2826 { 2827 hclgevf_state_uninit(hdev); 2828 2829 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2830 hclgevf_misc_irq_uninit(hdev); 2831 hclgevf_uninit_msi(hdev); 2832 } 2833 2834 hclgevf_pci_uninit(hdev); 2835 hclgevf_cmd_uninit(hdev); 2836 } 2837 2838 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2839 { 2840 struct pci_dev *pdev = ae_dev->pdev; 2841 struct hclgevf_dev *hdev; 2842 int ret; 2843 2844 ret = hclgevf_alloc_hdev(ae_dev); 2845 if (ret) { 2846 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2847 return ret; 2848 } 2849 2850 ret = hclgevf_init_hdev(ae_dev->priv); 2851 if (ret) { 2852 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2853 return ret; 2854 } 2855 2856 hdev = ae_dev->priv; 2857 timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0); 2858 INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task); 2859 2860 return 0; 2861 } 2862 2863 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2864 { 2865 struct hclgevf_dev *hdev = ae_dev->priv; 2866 2867 hclgevf_uninit_hdev(hdev); 2868 ae_dev->priv = NULL; 2869 } 2870 2871 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2872 { 2873 struct hnae3_handle *nic = &hdev->nic; 2874 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2875 2876 return min_t(u32, hdev->rss_size_max, 2877 hdev->num_tqps / kinfo->num_tc); 2878 } 2879 2880 /** 2881 * hclgevf_get_channels - Get the current channels enabled and max supported. 2882 * @handle: hardware information for network interface 2883 * @ch: ethtool channels structure 2884 * 2885 * We don't support separate tx and rx queues as channels. The other count 2886 * represents how many queues are being used for control. max_combined counts 2887 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2888 * q_vectors since we support a lot more queue pairs than q_vectors. 2889 **/ 2890 static void hclgevf_get_channels(struct hnae3_handle *handle, 2891 struct ethtool_channels *ch) 2892 { 2893 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2894 2895 ch->max_combined = hclgevf_get_max_channels(hdev); 2896 ch->other_count = 0; 2897 ch->max_other = 0; 2898 ch->combined_count = handle->kinfo.rss_size; 2899 } 2900 2901 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2902 u16 *alloc_tqps, u16 *max_rss_size) 2903 { 2904 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2905 2906 *alloc_tqps = hdev->num_tqps; 2907 *max_rss_size = hdev->rss_size_max; 2908 } 2909 2910 static void hclgevf_update_rss_size(struct hnae3_handle *handle, 2911 u32 new_tqps_num) 2912 { 2913 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 2914 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2915 u16 max_rss_size; 2916 2917 kinfo->req_rss_size = new_tqps_num; 2918 2919 max_rss_size = min_t(u16, hdev->rss_size_max, 2920 hdev->num_tqps / kinfo->num_tc); 2921 2922 /* Use the user's configuration when it is not larger than 2923 * max_rss_size, otherwise, use the maximum specification value. 2924 */ 2925 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 2926 kinfo->req_rss_size <= max_rss_size) 2927 kinfo->rss_size = kinfo->req_rss_size; 2928 else if (kinfo->rss_size > max_rss_size || 2929 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 2930 kinfo->rss_size = max_rss_size; 2931 2932 kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; 2933 } 2934 2935 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 2936 bool rxfh_configured) 2937 { 2938 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2939 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 2940 u16 cur_rss_size = kinfo->rss_size; 2941 u16 cur_tqps = kinfo->num_tqps; 2942 u32 *rss_indir; 2943 unsigned int i; 2944 int ret; 2945 2946 hclgevf_update_rss_size(handle, new_tqps_num); 2947 2948 ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); 2949 if (ret) 2950 return ret; 2951 2952 /* RSS indirection table has been configuared by user */ 2953 if (rxfh_configured) 2954 goto out; 2955 2956 /* Reinitializes the rss indirect table according to the new RSS size */ 2957 rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 2958 if (!rss_indir) 2959 return -ENOMEM; 2960 2961 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 2962 rss_indir[i] = i % kinfo->rss_size; 2963 2964 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 2965 if (ret) 2966 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 2967 ret); 2968 2969 kfree(rss_indir); 2970 2971 out: 2972 if (!ret) 2973 dev_info(&hdev->pdev->dev, 2974 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 2975 cur_rss_size, kinfo->rss_size, 2976 cur_tqps, kinfo->rss_size * kinfo->num_tc); 2977 2978 return ret; 2979 } 2980 2981 static int hclgevf_get_status(struct hnae3_handle *handle) 2982 { 2983 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2984 2985 return hdev->hw.mac.link; 2986 } 2987 2988 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2989 u8 *auto_neg, u32 *speed, 2990 u8 *duplex) 2991 { 2992 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2993 2994 if (speed) 2995 *speed = hdev->hw.mac.speed; 2996 if (duplex) 2997 *duplex = hdev->hw.mac.duplex; 2998 if (auto_neg) 2999 *auto_neg = AUTONEG_DISABLE; 3000 } 3001 3002 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 3003 u8 duplex) 3004 { 3005 hdev->hw.mac.speed = speed; 3006 hdev->hw.mac.duplex = duplex; 3007 } 3008 3009 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 3010 { 3011 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3012 3013 return hclgevf_config_gro(hdev, enable); 3014 } 3015 3016 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 3017 u8 *module_type) 3018 { 3019 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3020 3021 if (media_type) 3022 *media_type = hdev->hw.mac.media_type; 3023 3024 if (module_type) 3025 *module_type = hdev->hw.mac.module_type; 3026 } 3027 3028 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 3029 { 3030 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3031 3032 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 3033 } 3034 3035 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 3036 { 3037 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3038 3039 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 3040 } 3041 3042 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 3043 { 3044 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3045 3046 return hdev->rst_stats.hw_rst_done_cnt; 3047 } 3048 3049 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 3050 unsigned long *supported, 3051 unsigned long *advertising) 3052 { 3053 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3054 3055 *supported = hdev->hw.mac.supported; 3056 *advertising = hdev->hw.mac.advertising; 3057 } 3058 3059 #define MAX_SEPARATE_NUM 4 3060 #define SEPARATOR_VALUE 0xFFFFFFFF 3061 #define REG_NUM_PER_LINE 4 3062 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 3063 3064 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 3065 { 3066 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 3067 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3068 3069 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 3070 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 3071 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 3072 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 3073 3074 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 3075 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 3076 } 3077 3078 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 3079 void *data) 3080 { 3081 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3082 int i, j, reg_um, separator_num; 3083 u32 *reg = data; 3084 3085 *version = hdev->fw_version; 3086 3087 /* fetching per-VF registers values from VF PCIe register space */ 3088 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 3089 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3090 for (i = 0; i < reg_um; i++) 3091 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 3092 for (i = 0; i < separator_num; i++) 3093 *reg++ = SEPARATOR_VALUE; 3094 3095 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 3096 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3097 for (i = 0; i < reg_um; i++) 3098 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 3099 for (i = 0; i < separator_num; i++) 3100 *reg++ = SEPARATOR_VALUE; 3101 3102 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 3103 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3104 for (j = 0; j < hdev->num_tqps; j++) { 3105 for (i = 0; i < reg_um; i++) 3106 *reg++ = hclgevf_read_dev(&hdev->hw, 3107 ring_reg_addr_list[i] + 3108 0x200 * j); 3109 for (i = 0; i < separator_num; i++) 3110 *reg++ = SEPARATOR_VALUE; 3111 } 3112 3113 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 3114 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3115 for (j = 0; j < hdev->num_msi_used - 1; j++) { 3116 for (i = 0; i < reg_um; i++) 3117 *reg++ = hclgevf_read_dev(&hdev->hw, 3118 tqp_intr_reg_addr_list[i] + 3119 4 * j); 3120 for (i = 0; i < separator_num; i++) 3121 *reg++ = SEPARATOR_VALUE; 3122 } 3123 } 3124 3125 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 3126 u8 *port_base_vlan_info, u8 data_size) 3127 { 3128 struct hnae3_handle *nic = &hdev->nic; 3129 3130 rtnl_lock(); 3131 hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3132 rtnl_unlock(); 3133 3134 /* send msg to PF and wait update port based vlan info */ 3135 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 3136 HCLGE_MBX_PORT_BASE_VLAN_CFG, 3137 port_base_vlan_info, data_size, 3138 false, NULL, 0); 3139 3140 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3141 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; 3142 else 3143 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3144 3145 rtnl_lock(); 3146 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 3147 rtnl_unlock(); 3148 } 3149 3150 static const struct hnae3_ae_ops hclgevf_ops = { 3151 .init_ae_dev = hclgevf_init_ae_dev, 3152 .uninit_ae_dev = hclgevf_uninit_ae_dev, 3153 .flr_prepare = hclgevf_flr_prepare, 3154 .flr_done = hclgevf_flr_done, 3155 .init_client_instance = hclgevf_init_client_instance, 3156 .uninit_client_instance = hclgevf_uninit_client_instance, 3157 .start = hclgevf_ae_start, 3158 .stop = hclgevf_ae_stop, 3159 .client_start = hclgevf_client_start, 3160 .client_stop = hclgevf_client_stop, 3161 .map_ring_to_vector = hclgevf_map_ring_to_vector, 3162 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3163 .get_vector = hclgevf_get_vector, 3164 .put_vector = hclgevf_put_vector, 3165 .reset_queue = hclgevf_reset_tqp, 3166 .get_mac_addr = hclgevf_get_mac_addr, 3167 .set_mac_addr = hclgevf_set_mac_addr, 3168 .add_uc_addr = hclgevf_add_uc_addr, 3169 .rm_uc_addr = hclgevf_rm_uc_addr, 3170 .add_mc_addr = hclgevf_add_mc_addr, 3171 .rm_mc_addr = hclgevf_rm_mc_addr, 3172 .get_stats = hclgevf_get_stats, 3173 .update_stats = hclgevf_update_stats, 3174 .get_strings = hclgevf_get_strings, 3175 .get_sset_count = hclgevf_get_sset_count, 3176 .get_rss_key_size = hclgevf_get_rss_key_size, 3177 .get_rss_indir_size = hclgevf_get_rss_indir_size, 3178 .get_rss = hclgevf_get_rss, 3179 .set_rss = hclgevf_set_rss, 3180 .get_rss_tuple = hclgevf_get_rss_tuple, 3181 .set_rss_tuple = hclgevf_set_rss_tuple, 3182 .get_tc_size = hclgevf_get_tc_size, 3183 .get_fw_version = hclgevf_get_fw_version, 3184 .set_vlan_filter = hclgevf_set_vlan_filter, 3185 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 3186 .reset_event = hclgevf_reset_event, 3187 .set_default_reset_request = hclgevf_set_def_reset_request, 3188 .set_channels = hclgevf_set_channels, 3189 .get_channels = hclgevf_get_channels, 3190 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 3191 .get_regs_len = hclgevf_get_regs_len, 3192 .get_regs = hclgevf_get_regs, 3193 .get_status = hclgevf_get_status, 3194 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3195 .get_media_type = hclgevf_get_media_type, 3196 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 3197 .ae_dev_resetting = hclgevf_ae_dev_resetting, 3198 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 3199 .set_gro_en = hclgevf_gro_en, 3200 .set_mtu = hclgevf_set_mtu, 3201 .get_global_queue_id = hclgevf_get_qid_global, 3202 .set_timer_task = hclgevf_set_timer_task, 3203 .get_link_mode = hclgevf_get_link_mode, 3204 .set_promisc_mode = hclgevf_set_promisc_mode, 3205 }; 3206 3207 static struct hnae3_ae_algo ae_algovf = { 3208 .ops = &hclgevf_ops, 3209 .pdev_id_table = ae_algovf_pci_tbl, 3210 }; 3211 3212 static int hclgevf_init(void) 3213 { 3214 pr_info("%s is initializing\n", HCLGEVF_NAME); 3215 3216 hnae3_register_ae_algo(&ae_algovf); 3217 3218 return 0; 3219 } 3220 3221 static void hclgevf_exit(void) 3222 { 3223 hnae3_unregister_ae_algo(&ae_algovf); 3224 } 3225 module_init(hclgevf_init); 3226 module_exit(hclgevf_exit); 3227 3228 MODULE_LICENSE("GPL"); 3229 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3230 MODULE_DESCRIPTION("HCLGEVF Driver"); 3231 MODULE_VERSION(HCLGEVF_MOD_VERSION); 3232