1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 static const u8 hclgevf_hash_key[] = { 25 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 26 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 27 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 28 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 29 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 30 }; 31 32 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 33 34 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 35 HCLGEVF_CMDQ_TX_ADDR_H_REG, 36 HCLGEVF_CMDQ_TX_DEPTH_REG, 37 HCLGEVF_CMDQ_TX_TAIL_REG, 38 HCLGEVF_CMDQ_TX_HEAD_REG, 39 HCLGEVF_CMDQ_RX_ADDR_L_REG, 40 HCLGEVF_CMDQ_RX_ADDR_H_REG, 41 HCLGEVF_CMDQ_RX_DEPTH_REG, 42 HCLGEVF_CMDQ_RX_TAIL_REG, 43 HCLGEVF_CMDQ_RX_HEAD_REG, 44 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 45 HCLGEVF_CMDQ_INTR_STS_REG, 46 HCLGEVF_CMDQ_INTR_EN_REG, 47 HCLGEVF_CMDQ_INTR_GEN_REG}; 48 49 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 50 HCLGEVF_RST_ING, 51 HCLGEVF_GRO_EN_REG}; 52 53 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 54 HCLGEVF_RING_RX_ADDR_H_REG, 55 HCLGEVF_RING_RX_BD_NUM_REG, 56 HCLGEVF_RING_RX_BD_LENGTH_REG, 57 HCLGEVF_RING_RX_MERGE_EN_REG, 58 HCLGEVF_RING_RX_TAIL_REG, 59 HCLGEVF_RING_RX_HEAD_REG, 60 HCLGEVF_RING_RX_FBD_NUM_REG, 61 HCLGEVF_RING_RX_OFFSET_REG, 62 HCLGEVF_RING_RX_FBD_OFFSET_REG, 63 HCLGEVF_RING_RX_STASH_REG, 64 HCLGEVF_RING_RX_BD_ERR_REG, 65 HCLGEVF_RING_TX_ADDR_L_REG, 66 HCLGEVF_RING_TX_ADDR_H_REG, 67 HCLGEVF_RING_TX_BD_NUM_REG, 68 HCLGEVF_RING_TX_PRIORITY_REG, 69 HCLGEVF_RING_TX_TC_REG, 70 HCLGEVF_RING_TX_MERGE_EN_REG, 71 HCLGEVF_RING_TX_TAIL_REG, 72 HCLGEVF_RING_TX_HEAD_REG, 73 HCLGEVF_RING_TX_FBD_NUM_REG, 74 HCLGEVF_RING_TX_OFFSET_REG, 75 HCLGEVF_RING_TX_EBD_NUM_REG, 76 HCLGEVF_RING_TX_EBD_OFFSET_REG, 77 HCLGEVF_RING_TX_BD_ERR_REG, 78 HCLGEVF_RING_EN_REG}; 79 80 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 81 HCLGEVF_TQP_INTR_GL0_REG, 82 HCLGEVF_TQP_INTR_GL1_REG, 83 HCLGEVF_TQP_INTR_GL2_REG, 84 HCLGEVF_TQP_INTR_RL_REG}; 85 86 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 87 { 88 if (!handle->client) 89 return container_of(handle, struct hclgevf_dev, nic); 90 else if (handle->client->type == HNAE3_CLIENT_ROCE) 91 return container_of(handle, struct hclgevf_dev, roce); 92 else 93 return container_of(handle, struct hclgevf_dev, nic); 94 } 95 96 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 97 { 98 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 99 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 100 struct hclgevf_desc desc; 101 struct hclgevf_tqp *tqp; 102 int status; 103 int i; 104 105 for (i = 0; i < kinfo->num_tqps; i++) { 106 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 107 hclgevf_cmd_setup_basic_desc(&desc, 108 HCLGEVF_OPC_QUERY_RX_STATUS, 109 true); 110 111 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 112 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 113 if (status) { 114 dev_err(&hdev->pdev->dev, 115 "Query tqp stat fail, status = %d,queue = %d\n", 116 status, i); 117 return status; 118 } 119 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 120 le32_to_cpu(desc.data[1]); 121 122 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 123 true); 124 125 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 126 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 127 if (status) { 128 dev_err(&hdev->pdev->dev, 129 "Query tqp stat fail, status = %d,queue = %d\n", 130 status, i); 131 return status; 132 } 133 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 134 le32_to_cpu(desc.data[1]); 135 } 136 137 return 0; 138 } 139 140 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 141 { 142 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 143 struct hclgevf_tqp *tqp; 144 u64 *buff = data; 145 int i; 146 147 for (i = 0; i < kinfo->num_tqps; i++) { 148 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 149 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 150 } 151 for (i = 0; i < kinfo->num_tqps; i++) { 152 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 153 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 154 } 155 156 return buff; 157 } 158 159 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 160 { 161 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 162 163 return kinfo->num_tqps * 2; 164 } 165 166 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 167 { 168 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 169 u8 *buff = data; 170 int i = 0; 171 172 for (i = 0; i < kinfo->num_tqps; i++) { 173 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 174 struct hclgevf_tqp, q); 175 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 176 tqp->index); 177 buff += ETH_GSTRING_LEN; 178 } 179 180 for (i = 0; i < kinfo->num_tqps; i++) { 181 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 182 struct hclgevf_tqp, q); 183 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 184 tqp->index); 185 buff += ETH_GSTRING_LEN; 186 } 187 188 return buff; 189 } 190 191 static void hclgevf_update_stats(struct hnae3_handle *handle, 192 struct net_device_stats *net_stats) 193 { 194 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 195 int status; 196 197 status = hclgevf_tqps_update_stats(handle); 198 if (status) 199 dev_err(&hdev->pdev->dev, 200 "VF update of TQPS stats fail, status = %d.\n", 201 status); 202 } 203 204 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 205 { 206 if (strset == ETH_SS_TEST) 207 return -EOPNOTSUPP; 208 else if (strset == ETH_SS_STATS) 209 return hclgevf_tqps_get_sset_count(handle, strset); 210 211 return 0; 212 } 213 214 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 215 u8 *data) 216 { 217 u8 *p = (char *)data; 218 219 if (strset == ETH_SS_STATS) 220 p = hclgevf_tqps_get_strings(handle, p); 221 } 222 223 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 224 { 225 hclgevf_tqps_get_stats(handle, data); 226 } 227 228 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 229 { 230 u8 resp_msg; 231 int status; 232 233 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 234 true, &resp_msg, sizeof(resp_msg)); 235 if (status) { 236 dev_err(&hdev->pdev->dev, 237 "VF request to get TC info from PF failed %d", 238 status); 239 return status; 240 } 241 242 hdev->hw_tc_map = resp_msg; 243 244 return 0; 245 } 246 247 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 248 { 249 struct hnae3_handle *nic = &hdev->nic; 250 u8 resp_msg; 251 int ret; 252 253 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 254 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, 255 NULL, 0, true, &resp_msg, sizeof(u8)); 256 if (ret) { 257 dev_err(&hdev->pdev->dev, 258 "VF request to get port based vlan state failed %d", 259 ret); 260 return ret; 261 } 262 263 nic->port_base_vlan_state = resp_msg; 264 265 return 0; 266 } 267 268 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 269 { 270 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 271 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 272 int status; 273 274 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 275 true, resp_msg, 276 HCLGEVF_TQPS_RSS_INFO_LEN); 277 if (status) { 278 dev_err(&hdev->pdev->dev, 279 "VF request to get tqp info from PF failed %d", 280 status); 281 return status; 282 } 283 284 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 285 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 286 memcpy(&hdev->rx_buf_len, &resp_msg[4], sizeof(u16)); 287 288 return 0; 289 } 290 291 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 292 { 293 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 294 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 295 int ret; 296 297 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QDEPTH, 0, NULL, 0, 298 true, resp_msg, 299 HCLGEVF_TQPS_DEPTH_INFO_LEN); 300 if (ret) { 301 dev_err(&hdev->pdev->dev, 302 "VF request to get tqp depth info from PF failed %d", 303 ret); 304 return ret; 305 } 306 307 memcpy(&hdev->num_tx_desc, &resp_msg[0], sizeof(u16)); 308 memcpy(&hdev->num_rx_desc, &resp_msg[2], sizeof(u16)); 309 310 return 0; 311 } 312 313 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 314 { 315 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 316 u8 msg_data[2], resp_data[2]; 317 u16 qid_in_pf = 0; 318 int ret; 319 320 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 321 322 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, 323 sizeof(msg_data), true, resp_data, 324 sizeof(resp_data)); 325 if (!ret) 326 qid_in_pf = *(u16 *)resp_data; 327 328 return qid_in_pf; 329 } 330 331 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 332 { 333 u8 resp_msg[2]; 334 int ret; 335 336 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0, 337 true, resp_msg, sizeof(resp_msg)); 338 if (ret) { 339 dev_err(&hdev->pdev->dev, 340 "VF request to get the pf port media type failed %d", 341 ret); 342 return ret; 343 } 344 345 hdev->hw.mac.media_type = resp_msg[0]; 346 hdev->hw.mac.module_type = resp_msg[1]; 347 348 return 0; 349 } 350 351 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 352 { 353 struct hclgevf_tqp *tqp; 354 int i; 355 356 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 357 sizeof(struct hclgevf_tqp), GFP_KERNEL); 358 if (!hdev->htqp) 359 return -ENOMEM; 360 361 tqp = hdev->htqp; 362 363 for (i = 0; i < hdev->num_tqps; i++) { 364 tqp->dev = &hdev->pdev->dev; 365 tqp->index = i; 366 367 tqp->q.ae_algo = &ae_algovf; 368 tqp->q.buf_size = hdev->rx_buf_len; 369 tqp->q.tx_desc_num = hdev->num_tx_desc; 370 tqp->q.rx_desc_num = hdev->num_rx_desc; 371 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 372 i * HCLGEVF_TQP_REG_SIZE; 373 374 tqp++; 375 } 376 377 return 0; 378 } 379 380 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 381 { 382 struct hnae3_handle *nic = &hdev->nic; 383 struct hnae3_knic_private_info *kinfo; 384 u16 new_tqps = hdev->num_tqps; 385 unsigned int i; 386 387 kinfo = &nic->kinfo; 388 kinfo->num_tc = 0; 389 kinfo->num_tx_desc = hdev->num_tx_desc; 390 kinfo->num_rx_desc = hdev->num_rx_desc; 391 kinfo->rx_buf_len = hdev->rx_buf_len; 392 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 393 if (hdev->hw_tc_map & BIT(i)) 394 kinfo->num_tc++; 395 396 kinfo->rss_size 397 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 398 new_tqps = kinfo->rss_size * kinfo->num_tc; 399 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 400 401 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 402 sizeof(struct hnae3_queue *), GFP_KERNEL); 403 if (!kinfo->tqp) 404 return -ENOMEM; 405 406 for (i = 0; i < kinfo->num_tqps; i++) { 407 hdev->htqp[i].q.handle = &hdev->nic; 408 hdev->htqp[i].q.tqp_index = i; 409 kinfo->tqp[i] = &hdev->htqp[i].q; 410 } 411 412 return 0; 413 } 414 415 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 416 { 417 int status; 418 u8 resp_msg; 419 420 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 421 0, false, &resp_msg, sizeof(resp_msg)); 422 if (status) 423 dev_err(&hdev->pdev->dev, 424 "VF failed to fetch link status(%d) from PF", status); 425 } 426 427 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 428 { 429 struct hnae3_handle *rhandle = &hdev->roce; 430 struct hnae3_handle *handle = &hdev->nic; 431 struct hnae3_client *rclient; 432 struct hnae3_client *client; 433 434 client = handle->client; 435 rclient = hdev->roce_client; 436 437 link_state = 438 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 439 440 if (link_state != hdev->hw.mac.link) { 441 client->ops->link_status_change(handle, !!link_state); 442 if (rclient && rclient->ops->link_status_change) 443 rclient->ops->link_status_change(rhandle, !!link_state); 444 hdev->hw.mac.link = link_state; 445 } 446 } 447 448 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 449 { 450 #define HCLGEVF_ADVERTISING 0 451 #define HCLGEVF_SUPPORTED 1 452 u8 send_msg; 453 u8 resp_msg; 454 455 send_msg = HCLGEVF_ADVERTISING; 456 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, 457 &send_msg, sizeof(send_msg), false, 458 &resp_msg, sizeof(resp_msg)); 459 send_msg = HCLGEVF_SUPPORTED; 460 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, 461 &send_msg, sizeof(send_msg), false, 462 &resp_msg, sizeof(resp_msg)); 463 } 464 465 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 466 { 467 struct hnae3_handle *nic = &hdev->nic; 468 int ret; 469 470 nic->ae_algo = &ae_algovf; 471 nic->pdev = hdev->pdev; 472 nic->numa_node_mask = hdev->numa_node_mask; 473 nic->flags |= HNAE3_SUPPORT_VF; 474 475 ret = hclgevf_knic_setup(hdev); 476 if (ret) 477 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 478 ret); 479 return ret; 480 } 481 482 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 483 { 484 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 485 dev_warn(&hdev->pdev->dev, 486 "vector(vector_id %d) has been freed.\n", vector_id); 487 return; 488 } 489 490 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 491 hdev->num_msi_left += 1; 492 hdev->num_msi_used -= 1; 493 } 494 495 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 496 struct hnae3_vector_info *vector_info) 497 { 498 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 499 struct hnae3_vector_info *vector = vector_info; 500 int alloc = 0; 501 int i, j; 502 503 vector_num = min(hdev->num_msi_left, vector_num); 504 505 for (j = 0; j < vector_num; j++) { 506 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 507 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 508 vector->vector = pci_irq_vector(hdev->pdev, i); 509 vector->io_addr = hdev->hw.io_base + 510 HCLGEVF_VECTOR_REG_BASE + 511 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 512 hdev->vector_status[i] = 0; 513 hdev->vector_irq[i] = vector->vector; 514 515 vector++; 516 alloc++; 517 518 break; 519 } 520 } 521 } 522 hdev->num_msi_left -= alloc; 523 hdev->num_msi_used += alloc; 524 525 return alloc; 526 } 527 528 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 529 { 530 int i; 531 532 for (i = 0; i < hdev->num_msi; i++) 533 if (vector == hdev->vector_irq[i]) 534 return i; 535 536 return -EINVAL; 537 } 538 539 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 540 const u8 hfunc, const u8 *key) 541 { 542 struct hclgevf_rss_config_cmd *req; 543 unsigned int key_offset = 0; 544 struct hclgevf_desc desc; 545 int key_counts; 546 int key_size; 547 int ret; 548 549 key_counts = HCLGEVF_RSS_KEY_SIZE; 550 req = (struct hclgevf_rss_config_cmd *)desc.data; 551 552 while (key_counts) { 553 hclgevf_cmd_setup_basic_desc(&desc, 554 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 555 false); 556 557 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 558 req->hash_config |= 559 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 560 561 key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 562 memcpy(req->hash_key, 563 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 564 565 key_counts -= key_size; 566 key_offset++; 567 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 568 if (ret) { 569 dev_err(&hdev->pdev->dev, 570 "Configure RSS config fail, status = %d\n", 571 ret); 572 return ret; 573 } 574 } 575 576 return 0; 577 } 578 579 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 580 { 581 return HCLGEVF_RSS_KEY_SIZE; 582 } 583 584 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 585 { 586 return HCLGEVF_RSS_IND_TBL_SIZE; 587 } 588 589 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 590 { 591 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 592 struct hclgevf_rss_indirection_table_cmd *req; 593 struct hclgevf_desc desc; 594 int status; 595 int i, j; 596 597 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 598 599 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 600 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 601 false); 602 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 603 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 604 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 605 req->rss_result[j] = 606 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 607 608 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 609 if (status) { 610 dev_err(&hdev->pdev->dev, 611 "VF failed(=%d) to set RSS indirection table\n", 612 status); 613 return status; 614 } 615 } 616 617 return 0; 618 } 619 620 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 621 { 622 struct hclgevf_rss_tc_mode_cmd *req; 623 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 624 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 625 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 626 struct hclgevf_desc desc; 627 u16 roundup_size; 628 int status; 629 unsigned int i; 630 631 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 632 633 roundup_size = roundup_pow_of_two(rss_size); 634 roundup_size = ilog2(roundup_size); 635 636 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 637 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 638 tc_size[i] = roundup_size; 639 tc_offset[i] = rss_size * i; 640 } 641 642 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 643 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 644 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 645 (tc_valid[i] & 0x1)); 646 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 647 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 648 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 649 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 650 } 651 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 652 if (status) 653 dev_err(&hdev->pdev->dev, 654 "VF failed(=%d) to set rss tc mode\n", status); 655 656 return status; 657 } 658 659 /* for revision 0x20, vf shared the same rss config with pf */ 660 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 661 { 662 #define HCLGEVF_RSS_MBX_RESP_LEN 8 663 664 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 665 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 666 u16 msg_num, hash_key_index; 667 u8 index; 668 int ret; 669 670 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 671 HCLGEVF_RSS_MBX_RESP_LEN; 672 for (index = 0; index < msg_num; index++) { 673 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_RSS_KEY, 0, 674 &index, sizeof(index), 675 true, resp_msg, 676 HCLGEVF_RSS_MBX_RESP_LEN); 677 if (ret) { 678 dev_err(&hdev->pdev->dev, 679 "VF get rss hash key from PF failed, ret=%d", 680 ret); 681 return ret; 682 } 683 684 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 685 if (index == msg_num - 1) 686 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 687 &resp_msg[0], 688 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 689 else 690 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 691 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 692 } 693 694 return 0; 695 } 696 697 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 698 u8 *hfunc) 699 { 700 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 701 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 702 int i, ret; 703 704 if (handle->pdev->revision >= 0x21) { 705 /* Get hash algorithm */ 706 if (hfunc) { 707 switch (rss_cfg->hash_algo) { 708 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 709 *hfunc = ETH_RSS_HASH_TOP; 710 break; 711 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 712 *hfunc = ETH_RSS_HASH_XOR; 713 break; 714 default: 715 *hfunc = ETH_RSS_HASH_UNKNOWN; 716 break; 717 } 718 } 719 720 /* Get the RSS Key required by the user */ 721 if (key) 722 memcpy(key, rss_cfg->rss_hash_key, 723 HCLGEVF_RSS_KEY_SIZE); 724 } else { 725 if (hfunc) 726 *hfunc = ETH_RSS_HASH_TOP; 727 if (key) { 728 ret = hclgevf_get_rss_hash_key(hdev); 729 if (ret) 730 return ret; 731 memcpy(key, rss_cfg->rss_hash_key, 732 HCLGEVF_RSS_KEY_SIZE); 733 } 734 } 735 736 if (indir) 737 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 738 indir[i] = rss_cfg->rss_indirection_tbl[i]; 739 740 return 0; 741 } 742 743 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 744 const u8 *key, const u8 hfunc) 745 { 746 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 747 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 748 int ret, i; 749 750 if (handle->pdev->revision >= 0x21) { 751 /* Set the RSS Hash Key if specififed by the user */ 752 if (key) { 753 switch (hfunc) { 754 case ETH_RSS_HASH_TOP: 755 rss_cfg->hash_algo = 756 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 757 break; 758 case ETH_RSS_HASH_XOR: 759 rss_cfg->hash_algo = 760 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 761 break; 762 case ETH_RSS_HASH_NO_CHANGE: 763 break; 764 default: 765 return -EINVAL; 766 } 767 768 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 769 key); 770 if (ret) 771 return ret; 772 773 /* Update the shadow RSS key with user specified qids */ 774 memcpy(rss_cfg->rss_hash_key, key, 775 HCLGEVF_RSS_KEY_SIZE); 776 } 777 } 778 779 /* update the shadow RSS table with user specified qids */ 780 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 781 rss_cfg->rss_indirection_tbl[i] = indir[i]; 782 783 /* update the hardware */ 784 return hclgevf_set_rss_indir_table(hdev); 785 } 786 787 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 788 { 789 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 790 791 if (nfc->data & RXH_L4_B_2_3) 792 hash_sets |= HCLGEVF_D_PORT_BIT; 793 else 794 hash_sets &= ~HCLGEVF_D_PORT_BIT; 795 796 if (nfc->data & RXH_IP_SRC) 797 hash_sets |= HCLGEVF_S_IP_BIT; 798 else 799 hash_sets &= ~HCLGEVF_S_IP_BIT; 800 801 if (nfc->data & RXH_IP_DST) 802 hash_sets |= HCLGEVF_D_IP_BIT; 803 else 804 hash_sets &= ~HCLGEVF_D_IP_BIT; 805 806 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 807 hash_sets |= HCLGEVF_V_TAG_BIT; 808 809 return hash_sets; 810 } 811 812 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 813 struct ethtool_rxnfc *nfc) 814 { 815 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 816 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 817 struct hclgevf_rss_input_tuple_cmd *req; 818 struct hclgevf_desc desc; 819 u8 tuple_sets; 820 int ret; 821 822 if (handle->pdev->revision == 0x20) 823 return -EOPNOTSUPP; 824 825 if (nfc->data & 826 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 827 return -EINVAL; 828 829 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 830 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 831 832 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 833 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 834 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 835 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 836 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 837 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 838 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 839 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 840 841 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 842 switch (nfc->flow_type) { 843 case TCP_V4_FLOW: 844 req->ipv4_tcp_en = tuple_sets; 845 break; 846 case TCP_V6_FLOW: 847 req->ipv6_tcp_en = tuple_sets; 848 break; 849 case UDP_V4_FLOW: 850 req->ipv4_udp_en = tuple_sets; 851 break; 852 case UDP_V6_FLOW: 853 req->ipv6_udp_en = tuple_sets; 854 break; 855 case SCTP_V4_FLOW: 856 req->ipv4_sctp_en = tuple_sets; 857 break; 858 case SCTP_V6_FLOW: 859 if ((nfc->data & RXH_L4_B_0_1) || 860 (nfc->data & RXH_L4_B_2_3)) 861 return -EINVAL; 862 863 req->ipv6_sctp_en = tuple_sets; 864 break; 865 case IPV4_FLOW: 866 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 867 break; 868 case IPV6_FLOW: 869 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 870 break; 871 default: 872 return -EINVAL; 873 } 874 875 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 876 if (ret) { 877 dev_err(&hdev->pdev->dev, 878 "Set rss tuple fail, status = %d\n", ret); 879 return ret; 880 } 881 882 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 883 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 884 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 885 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 886 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 887 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 888 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 889 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 890 return 0; 891 } 892 893 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 894 struct ethtool_rxnfc *nfc) 895 { 896 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 897 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 898 u8 tuple_sets; 899 900 if (handle->pdev->revision == 0x20) 901 return -EOPNOTSUPP; 902 903 nfc->data = 0; 904 905 switch (nfc->flow_type) { 906 case TCP_V4_FLOW: 907 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 908 break; 909 case UDP_V4_FLOW: 910 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 911 break; 912 case TCP_V6_FLOW: 913 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 914 break; 915 case UDP_V6_FLOW: 916 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 917 break; 918 case SCTP_V4_FLOW: 919 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 920 break; 921 case SCTP_V6_FLOW: 922 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 923 break; 924 case IPV4_FLOW: 925 case IPV6_FLOW: 926 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 927 break; 928 default: 929 return -EINVAL; 930 } 931 932 if (!tuple_sets) 933 return 0; 934 935 if (tuple_sets & HCLGEVF_D_PORT_BIT) 936 nfc->data |= RXH_L4_B_2_3; 937 if (tuple_sets & HCLGEVF_S_PORT_BIT) 938 nfc->data |= RXH_L4_B_0_1; 939 if (tuple_sets & HCLGEVF_D_IP_BIT) 940 nfc->data |= RXH_IP_DST; 941 if (tuple_sets & HCLGEVF_S_IP_BIT) 942 nfc->data |= RXH_IP_SRC; 943 944 return 0; 945 } 946 947 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 948 struct hclgevf_rss_cfg *rss_cfg) 949 { 950 struct hclgevf_rss_input_tuple_cmd *req; 951 struct hclgevf_desc desc; 952 int ret; 953 954 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 955 956 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 957 958 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 959 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 960 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 961 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 962 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 963 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 964 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 965 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 966 967 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 968 if (ret) 969 dev_err(&hdev->pdev->dev, 970 "Configure rss input fail, status = %d\n", ret); 971 return ret; 972 } 973 974 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 975 { 976 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 977 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 978 979 return rss_cfg->rss_size; 980 } 981 982 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 983 int vector_id, 984 struct hnae3_ring_chain_node *ring_chain) 985 { 986 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 987 struct hnae3_ring_chain_node *node; 988 struct hclge_mbx_vf_to_pf_cmd *req; 989 struct hclgevf_desc desc; 990 int i = 0; 991 int status; 992 u8 type; 993 994 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 995 996 for (node = ring_chain; node; node = node->next) { 997 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 998 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 999 1000 if (i == 0) { 1001 hclgevf_cmd_setup_basic_desc(&desc, 1002 HCLGEVF_OPC_MBX_VF_TO_PF, 1003 false); 1004 type = en ? 1005 HCLGE_MBX_MAP_RING_TO_VECTOR : 1006 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1007 req->msg[0] = type; 1008 req->msg[1] = vector_id; 1009 } 1010 1011 req->msg[idx_offset] = 1012 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1013 req->msg[idx_offset + 1] = node->tqp_index; 1014 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 1015 HNAE3_RING_GL_IDX_M, 1016 HNAE3_RING_GL_IDX_S); 1017 1018 i++; 1019 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 1020 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 1021 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 1022 !node->next) { 1023 req->msg[2] = i; 1024 1025 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1026 if (status) { 1027 dev_err(&hdev->pdev->dev, 1028 "Map TQP fail, status is %d.\n", 1029 status); 1030 return status; 1031 } 1032 i = 0; 1033 hclgevf_cmd_setup_basic_desc(&desc, 1034 HCLGEVF_OPC_MBX_VF_TO_PF, 1035 false); 1036 req->msg[0] = type; 1037 req->msg[1] = vector_id; 1038 } 1039 } 1040 1041 return 0; 1042 } 1043 1044 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1045 struct hnae3_ring_chain_node *ring_chain) 1046 { 1047 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1048 int vector_id; 1049 1050 vector_id = hclgevf_get_vector_index(hdev, vector); 1051 if (vector_id < 0) { 1052 dev_err(&handle->pdev->dev, 1053 "Get vector index fail. ret =%d\n", vector_id); 1054 return vector_id; 1055 } 1056 1057 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1058 } 1059 1060 static int hclgevf_unmap_ring_from_vector( 1061 struct hnae3_handle *handle, 1062 int vector, 1063 struct hnae3_ring_chain_node *ring_chain) 1064 { 1065 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1066 int ret, vector_id; 1067 1068 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1069 return 0; 1070 1071 vector_id = hclgevf_get_vector_index(hdev, vector); 1072 if (vector_id < 0) { 1073 dev_err(&handle->pdev->dev, 1074 "Get vector index fail. ret =%d\n", vector_id); 1075 return vector_id; 1076 } 1077 1078 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1079 if (ret) 1080 dev_err(&handle->pdev->dev, 1081 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1082 vector_id, 1083 ret); 1084 1085 return ret; 1086 } 1087 1088 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1089 { 1090 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1091 int vector_id; 1092 1093 vector_id = hclgevf_get_vector_index(hdev, vector); 1094 if (vector_id < 0) { 1095 dev_err(&handle->pdev->dev, 1096 "hclgevf_put_vector get vector index fail. ret =%d\n", 1097 vector_id); 1098 return vector_id; 1099 } 1100 1101 hclgevf_free_vector(hdev, vector_id); 1102 1103 return 0; 1104 } 1105 1106 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1107 bool en_bc_pmc) 1108 { 1109 struct hclge_mbx_vf_to_pf_cmd *req; 1110 struct hclgevf_desc desc; 1111 int ret; 1112 1113 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 1114 1115 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 1116 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 1117 req->msg[1] = en_bc_pmc ? 1 : 0; 1118 1119 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1120 if (ret) 1121 dev_err(&hdev->pdev->dev, 1122 "Set promisc mode fail, status is %d.\n", ret); 1123 1124 return ret; 1125 } 1126 1127 static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc) 1128 { 1129 return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc); 1130 } 1131 1132 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, 1133 int stream_id, bool enable) 1134 { 1135 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1136 struct hclgevf_desc desc; 1137 int status; 1138 1139 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1140 1141 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1142 false); 1143 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1144 req->stream_id = cpu_to_le16(stream_id); 1145 if (enable) 1146 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1147 1148 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1149 if (status) 1150 dev_err(&hdev->pdev->dev, 1151 "TQP enable fail, status =%d.\n", status); 1152 1153 return status; 1154 } 1155 1156 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1157 { 1158 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1159 struct hclgevf_tqp *tqp; 1160 int i; 1161 1162 for (i = 0; i < kinfo->num_tqps; i++) { 1163 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1164 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1165 } 1166 } 1167 1168 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1169 { 1170 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1171 1172 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1173 } 1174 1175 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1176 bool is_first) 1177 { 1178 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1179 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1180 u8 *new_mac_addr = (u8 *)p; 1181 u8 msg_data[ETH_ALEN * 2]; 1182 u16 subcode; 1183 int status; 1184 1185 ether_addr_copy(msg_data, new_mac_addr); 1186 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 1187 1188 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 1189 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1190 1191 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1192 subcode, msg_data, sizeof(msg_data), 1193 true, NULL, 0); 1194 if (!status) 1195 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1196 1197 return status; 1198 } 1199 1200 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1201 const unsigned char *addr) 1202 { 1203 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1204 1205 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1206 HCLGE_MBX_MAC_VLAN_UC_ADD, 1207 addr, ETH_ALEN, false, NULL, 0); 1208 } 1209 1210 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1211 const unsigned char *addr) 1212 { 1213 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1214 1215 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1216 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1217 addr, ETH_ALEN, false, NULL, 0); 1218 } 1219 1220 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1221 const unsigned char *addr) 1222 { 1223 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1224 1225 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1226 HCLGE_MBX_MAC_VLAN_MC_ADD, 1227 addr, ETH_ALEN, false, NULL, 0); 1228 } 1229 1230 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1231 const unsigned char *addr) 1232 { 1233 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1234 1235 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1236 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1237 addr, ETH_ALEN, false, NULL, 0); 1238 } 1239 1240 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1241 __be16 proto, u16 vlan_id, 1242 bool is_kill) 1243 { 1244 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1245 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1246 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1247 int ret; 1248 1249 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1250 return -EINVAL; 1251 1252 if (proto != htons(ETH_P_8021Q)) 1253 return -EPROTONOSUPPORT; 1254 1255 /* When device is resetting, firmware is unable to handle 1256 * mailbox. Just record the vlan id, and remove it after 1257 * reset finished. 1258 */ 1259 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) { 1260 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1261 return -EBUSY; 1262 } 1263 1264 msg_data[0] = is_kill; 1265 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1266 memcpy(&msg_data[3], &proto, sizeof(proto)); 1267 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1268 HCLGE_MBX_VLAN_FILTER, msg_data, 1269 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 1270 1271 /* When remove hw vlan filter failed, record the vlan id, 1272 * and try to remove it from hw later, to be consistence 1273 * with stack. 1274 */ 1275 if (is_kill && ret) 1276 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1277 1278 return ret; 1279 } 1280 1281 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1282 { 1283 #define HCLGEVF_MAX_SYNC_COUNT 60 1284 struct hnae3_handle *handle = &hdev->nic; 1285 int ret, sync_cnt = 0; 1286 u16 vlan_id; 1287 1288 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1289 while (vlan_id != VLAN_N_VID) { 1290 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1291 vlan_id, true); 1292 if (ret) 1293 return; 1294 1295 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1296 sync_cnt++; 1297 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1298 return; 1299 1300 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1301 } 1302 } 1303 1304 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1305 { 1306 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1307 u8 msg_data; 1308 1309 msg_data = enable ? 1 : 0; 1310 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1311 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1312 1, false, NULL, 0); 1313 } 1314 1315 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1316 { 1317 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1318 u8 msg_data[2]; 1319 int ret; 1320 1321 memcpy(msg_data, &queue_id, sizeof(queue_id)); 1322 1323 /* disable vf queue before send queue reset msg to PF */ 1324 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1325 if (ret) 1326 return ret; 1327 1328 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1329 sizeof(msg_data), true, NULL, 0); 1330 } 1331 1332 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1333 { 1334 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1335 1336 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu, 1337 sizeof(new_mtu), true, NULL, 0); 1338 } 1339 1340 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1341 enum hnae3_reset_notify_type type) 1342 { 1343 struct hnae3_client *client = hdev->nic_client; 1344 struct hnae3_handle *handle = &hdev->nic; 1345 int ret; 1346 1347 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1348 !client) 1349 return 0; 1350 1351 if (!client->ops->reset_notify) 1352 return -EOPNOTSUPP; 1353 1354 ret = client->ops->reset_notify(handle, type); 1355 if (ret) 1356 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1357 type, ret); 1358 1359 return ret; 1360 } 1361 1362 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 1363 { 1364 struct hclgevf_dev *hdev = ae_dev->priv; 1365 1366 set_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1367 } 1368 1369 static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev, 1370 unsigned long delay_us, 1371 unsigned long wait_cnt) 1372 { 1373 unsigned long cnt = 0; 1374 1375 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && 1376 cnt++ < wait_cnt) 1377 usleep_range(delay_us, delay_us * 2); 1378 1379 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { 1380 dev_err(&hdev->pdev->dev, 1381 "flr wait timeout\n"); 1382 return -ETIMEDOUT; 1383 } 1384 1385 return 0; 1386 } 1387 1388 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1389 { 1390 #define HCLGEVF_RESET_WAIT_US 20000 1391 #define HCLGEVF_RESET_WAIT_CNT 2000 1392 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1393 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1394 1395 u32 val; 1396 int ret; 1397 1398 /* wait to check the hardware reset completion status */ 1399 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1400 dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val); 1401 1402 if (hdev->reset_type == HNAE3_FLR_RESET) 1403 return hclgevf_flr_poll_timeout(hdev, 1404 HCLGEVF_RESET_WAIT_US, 1405 HCLGEVF_RESET_WAIT_CNT); 1406 1407 ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val, 1408 !(val & HCLGEVF_RST_ING_BITS), 1409 HCLGEVF_RESET_WAIT_US, 1410 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1411 1412 /* hardware completion status should be available by this time */ 1413 if (ret) { 1414 dev_err(&hdev->pdev->dev, 1415 "could'nt get reset done status from h/w, timeout!\n"); 1416 return ret; 1417 } 1418 1419 /* we will wait a bit more to let reset of the stack to complete. This 1420 * might happen in case reset assertion was made by PF. Yes, this also 1421 * means we might end up waiting bit more even for VF reset. 1422 */ 1423 msleep(5000); 1424 1425 return 0; 1426 } 1427 1428 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1429 { 1430 int ret; 1431 1432 /* uninitialize the nic client */ 1433 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1434 if (ret) 1435 return ret; 1436 1437 /* re-initialize the hclge device */ 1438 ret = hclgevf_reset_hdev(hdev); 1439 if (ret) { 1440 dev_err(&hdev->pdev->dev, 1441 "hclge device re-init failed, VF is disabled!\n"); 1442 return ret; 1443 } 1444 1445 /* bring up the nic client again */ 1446 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1447 if (ret) 1448 return ret; 1449 1450 return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); 1451 } 1452 1453 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1454 { 1455 #define HCLGEVF_RESET_SYNC_TIME 100 1456 1457 int ret = 0; 1458 1459 switch (hdev->reset_type) { 1460 case HNAE3_VF_FUNC_RESET: 1461 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1462 0, true, NULL, sizeof(u8)); 1463 hdev->rst_stats.vf_func_rst_cnt++; 1464 break; 1465 case HNAE3_FLR_RESET: 1466 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1467 hdev->rst_stats.flr_rst_cnt++; 1468 break; 1469 default: 1470 break; 1471 } 1472 1473 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1474 /* inform hardware that preparatory work is done */ 1475 msleep(HCLGEVF_RESET_SYNC_TIME); 1476 hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 1477 HCLGEVF_NIC_CMQ_ENABLE); 1478 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1479 hdev->reset_type, ret); 1480 1481 return ret; 1482 } 1483 1484 static int hclgevf_reset(struct hclgevf_dev *hdev) 1485 { 1486 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1487 int ret; 1488 1489 /* Initialize ae_dev reset status as well, in case enet layer wants to 1490 * know if device is undergoing reset 1491 */ 1492 ae_dev->reset_type = hdev->reset_type; 1493 hdev->rst_stats.rst_cnt++; 1494 rtnl_lock(); 1495 1496 /* bring down the nic to stop any ongoing TX/RX */ 1497 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1498 if (ret) 1499 goto err_reset_lock; 1500 1501 rtnl_unlock(); 1502 1503 ret = hclgevf_reset_prepare_wait(hdev); 1504 if (ret) 1505 goto err_reset; 1506 1507 /* check if VF could successfully fetch the hardware reset completion 1508 * status from the hardware 1509 */ 1510 ret = hclgevf_reset_wait(hdev); 1511 if (ret) { 1512 /* can't do much in this situation, will disable VF */ 1513 dev_err(&hdev->pdev->dev, 1514 "VF failed(=%d) to fetch H/W reset completion status\n", 1515 ret); 1516 goto err_reset; 1517 } 1518 1519 hdev->rst_stats.hw_rst_done_cnt++; 1520 1521 rtnl_lock(); 1522 1523 /* now, re-initialize the nic client and ae device*/ 1524 ret = hclgevf_reset_stack(hdev); 1525 if (ret) { 1526 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1527 goto err_reset_lock; 1528 } 1529 1530 /* bring up the nic to enable TX/RX again */ 1531 ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1532 if (ret) 1533 goto err_reset_lock; 1534 1535 rtnl_unlock(); 1536 1537 hdev->last_reset_time = jiffies; 1538 ae_dev->reset_type = HNAE3_NONE_RESET; 1539 hdev->rst_stats.rst_done_cnt++; 1540 1541 return ret; 1542 err_reset_lock: 1543 rtnl_unlock(); 1544 err_reset: 1545 /* When VF reset failed, only the higher level reset asserted by PF 1546 * can restore it, so re-initialize the command queue to receive 1547 * this higher reset event. 1548 */ 1549 hclgevf_cmd_init(hdev); 1550 dev_err(&hdev->pdev->dev, "failed to reset VF\n"); 1551 if (hclgevf_is_reset_pending(hdev)) 1552 hclgevf_reset_task_schedule(hdev); 1553 1554 return ret; 1555 } 1556 1557 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1558 unsigned long *addr) 1559 { 1560 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1561 1562 /* return the highest priority reset level amongst all */ 1563 if (test_bit(HNAE3_VF_RESET, addr)) { 1564 rst_level = HNAE3_VF_RESET; 1565 clear_bit(HNAE3_VF_RESET, addr); 1566 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1567 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1568 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1569 rst_level = HNAE3_VF_FULL_RESET; 1570 clear_bit(HNAE3_VF_FULL_RESET, addr); 1571 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1572 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1573 rst_level = HNAE3_VF_PF_FUNC_RESET; 1574 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1575 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1576 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1577 rst_level = HNAE3_VF_FUNC_RESET; 1578 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1579 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1580 rst_level = HNAE3_FLR_RESET; 1581 clear_bit(HNAE3_FLR_RESET, addr); 1582 } 1583 1584 return rst_level; 1585 } 1586 1587 static void hclgevf_reset_event(struct pci_dev *pdev, 1588 struct hnae3_handle *handle) 1589 { 1590 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1591 struct hclgevf_dev *hdev = ae_dev->priv; 1592 1593 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1594 1595 if (hdev->default_reset_request) 1596 hdev->reset_level = 1597 hclgevf_get_reset_level(hdev, 1598 &hdev->default_reset_request); 1599 else 1600 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1601 1602 /* reset of this VF requested */ 1603 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1604 hclgevf_reset_task_schedule(hdev); 1605 1606 hdev->last_reset_time = jiffies; 1607 } 1608 1609 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1610 enum hnae3_reset_type rst_type) 1611 { 1612 struct hclgevf_dev *hdev = ae_dev->priv; 1613 1614 set_bit(rst_type, &hdev->default_reset_request); 1615 } 1616 1617 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1618 { 1619 #define HCLGEVF_FLR_WAIT_MS 100 1620 #define HCLGEVF_FLR_WAIT_CNT 50 1621 struct hclgevf_dev *hdev = ae_dev->priv; 1622 int cnt = 0; 1623 1624 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1625 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1626 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); 1627 hclgevf_reset_event(hdev->pdev, NULL); 1628 1629 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && 1630 cnt++ < HCLGEVF_FLR_WAIT_CNT) 1631 msleep(HCLGEVF_FLR_WAIT_MS); 1632 1633 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) 1634 dev_err(&hdev->pdev->dev, 1635 "flr wait down timeout: %d\n", cnt); 1636 } 1637 1638 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1639 { 1640 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1641 1642 return hdev->fw_version; 1643 } 1644 1645 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1646 { 1647 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1648 1649 vector->vector_irq = pci_irq_vector(hdev->pdev, 1650 HCLGEVF_MISC_VECTOR_NUM); 1651 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1652 /* vector status always valid for Vector 0 */ 1653 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1654 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1655 1656 hdev->num_msi_left -= 1; 1657 hdev->num_msi_used += 1; 1658 } 1659 1660 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1661 { 1662 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1663 !test_bit(HCLGEVF_STATE_REMOVING, &hdev->state)) { 1664 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1665 schedule_work(&hdev->rst_service_task); 1666 } 1667 } 1668 1669 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1670 { 1671 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1672 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1673 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1674 schedule_work(&hdev->mbx_service_task); 1675 } 1676 } 1677 1678 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1679 { 1680 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1681 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1682 schedule_work(&hdev->service_task); 1683 } 1684 1685 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1686 { 1687 /* if we have any pending mailbox event then schedule the mbx task */ 1688 if (hdev->mbx_event_pending) 1689 hclgevf_mbx_task_schedule(hdev); 1690 1691 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1692 hclgevf_reset_task_schedule(hdev); 1693 } 1694 1695 static void hclgevf_service_timer(struct timer_list *t) 1696 { 1697 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1698 1699 mod_timer(&hdev->service_timer, jiffies + 1700 HCLGEVF_GENERAL_TASK_INTERVAL * HZ); 1701 1702 hdev->stats_timer++; 1703 hclgevf_task_schedule(hdev); 1704 } 1705 1706 static void hclgevf_reset_service_task(struct work_struct *work) 1707 { 1708 struct hclgevf_dev *hdev = 1709 container_of(work, struct hclgevf_dev, rst_service_task); 1710 int ret; 1711 1712 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1713 return; 1714 1715 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1716 1717 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1718 &hdev->reset_state)) { 1719 /* PF has initmated that it is about to reset the hardware. 1720 * We now have to poll & check if hardware has actually 1721 * completed the reset sequence. On hardware reset completion, 1722 * VF needs to reset the client and ae device. 1723 */ 1724 hdev->reset_attempts = 0; 1725 1726 hdev->last_reset_time = jiffies; 1727 while ((hdev->reset_type = 1728 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 1729 != HNAE3_NONE_RESET) { 1730 ret = hclgevf_reset(hdev); 1731 if (ret) 1732 dev_err(&hdev->pdev->dev, 1733 "VF stack reset failed %d.\n", ret); 1734 } 1735 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1736 &hdev->reset_state)) { 1737 /* we could be here when either of below happens: 1738 * 1. reset was initiated due to watchdog timeout caused by 1739 * a. IMP was earlier reset and our TX got choked down and 1740 * which resulted in watchdog reacting and inducing VF 1741 * reset. This also means our cmdq would be unreliable. 1742 * b. problem in TX due to other lower layer(example link 1743 * layer not functioning properly etc.) 1744 * 2. VF reset might have been initiated due to some config 1745 * change. 1746 * 1747 * NOTE: Theres no clear way to detect above cases than to react 1748 * to the response of PF for this reset request. PF will ack the 1749 * 1b and 2. cases but we will not get any intimation about 1a 1750 * from PF as cmdq would be in unreliable state i.e. mailbox 1751 * communication between PF and VF would be broken. 1752 */ 1753 1754 /* if we are never geting into pending state it means either: 1755 * 1. PF is not receiving our request which could be due to IMP 1756 * reset 1757 * 2. PF is screwed 1758 * We cannot do much for 2. but to check first we can try reset 1759 * our PCIe + stack and see if it alleviates the problem. 1760 */ 1761 if (hdev->reset_attempts > 3) { 1762 /* prepare for full reset of stack + pcie interface */ 1763 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1764 1765 /* "defer" schedule the reset task again */ 1766 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1767 } else { 1768 hdev->reset_attempts++; 1769 1770 set_bit(hdev->reset_level, &hdev->reset_pending); 1771 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1772 } 1773 hclgevf_reset_task_schedule(hdev); 1774 } 1775 1776 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1777 } 1778 1779 static void hclgevf_mailbox_service_task(struct work_struct *work) 1780 { 1781 struct hclgevf_dev *hdev; 1782 1783 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1784 1785 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1786 return; 1787 1788 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1789 1790 hclgevf_mbx_async_handler(hdev); 1791 1792 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1793 } 1794 1795 static void hclgevf_keep_alive_timer(struct timer_list *t) 1796 { 1797 struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); 1798 1799 schedule_work(&hdev->keep_alive_task); 1800 mod_timer(&hdev->keep_alive_timer, jiffies + 1801 HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ); 1802 } 1803 1804 static void hclgevf_keep_alive_task(struct work_struct *work) 1805 { 1806 struct hclgevf_dev *hdev; 1807 u8 respmsg; 1808 int ret; 1809 1810 hdev = container_of(work, struct hclgevf_dev, keep_alive_task); 1811 1812 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) 1813 return; 1814 1815 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, 1816 0, false, &respmsg, sizeof(respmsg)); 1817 if (ret) 1818 dev_err(&hdev->pdev->dev, 1819 "VF sends keep alive cmd failed(=%d)\n", ret); 1820 } 1821 1822 static void hclgevf_service_task(struct work_struct *work) 1823 { 1824 struct hnae3_handle *handle; 1825 struct hclgevf_dev *hdev; 1826 1827 hdev = container_of(work, struct hclgevf_dev, service_task); 1828 handle = &hdev->nic; 1829 1830 if (hdev->stats_timer >= HCLGEVF_STATS_TIMER_INTERVAL) { 1831 hclgevf_tqps_update_stats(handle); 1832 hdev->stats_timer = 0; 1833 } 1834 1835 /* request the link status from the PF. PF would be able to tell VF 1836 * about such updates in future so we might remove this later 1837 */ 1838 hclgevf_request_link_info(hdev); 1839 1840 hclgevf_update_link_mode(hdev); 1841 1842 hclgevf_sync_vlan_filter(hdev); 1843 1844 hclgevf_deferred_task_schedule(hdev); 1845 1846 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1847 } 1848 1849 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1850 { 1851 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1852 } 1853 1854 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1855 u32 *clearval) 1856 { 1857 u32 cmdq_src_reg, rst_ing_reg; 1858 1859 /* fetch the events from their corresponding regs */ 1860 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1861 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1862 1863 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) { 1864 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1865 dev_info(&hdev->pdev->dev, 1866 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1867 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1868 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1869 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1870 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); 1871 *clearval = cmdq_src_reg; 1872 hdev->rst_stats.vf_rst_cnt++; 1873 return HCLGEVF_VECTOR0_EVENT_RST; 1874 } 1875 1876 /* check for vector0 mailbox(=CMDQ RX) event source */ 1877 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1878 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1879 *clearval = cmdq_src_reg; 1880 return HCLGEVF_VECTOR0_EVENT_MBX; 1881 } 1882 1883 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1884 1885 return HCLGEVF_VECTOR0_EVENT_OTHER; 1886 } 1887 1888 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1889 { 1890 writel(en ? 1 : 0, vector->addr); 1891 } 1892 1893 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1894 { 1895 enum hclgevf_evt_cause event_cause; 1896 struct hclgevf_dev *hdev = data; 1897 u32 clearval; 1898 1899 hclgevf_enable_vector(&hdev->misc_vector, false); 1900 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 1901 1902 switch (event_cause) { 1903 case HCLGEVF_VECTOR0_EVENT_RST: 1904 hclgevf_reset_task_schedule(hdev); 1905 break; 1906 case HCLGEVF_VECTOR0_EVENT_MBX: 1907 hclgevf_mbx_handler(hdev); 1908 break; 1909 default: 1910 break; 1911 } 1912 1913 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 1914 hclgevf_clear_event_cause(hdev, clearval); 1915 hclgevf_enable_vector(&hdev->misc_vector, true); 1916 } 1917 1918 return IRQ_HANDLED; 1919 } 1920 1921 static int hclgevf_configure(struct hclgevf_dev *hdev) 1922 { 1923 int ret; 1924 1925 /* get current port based vlan state from PF */ 1926 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 1927 if (ret) 1928 return ret; 1929 1930 /* get queue configuration from PF */ 1931 ret = hclgevf_get_queue_info(hdev); 1932 if (ret) 1933 return ret; 1934 1935 /* get queue depth info from PF */ 1936 ret = hclgevf_get_queue_depth(hdev); 1937 if (ret) 1938 return ret; 1939 1940 ret = hclgevf_get_pf_media_type(hdev); 1941 if (ret) 1942 return ret; 1943 1944 /* get tc configuration from PF */ 1945 return hclgevf_get_tc_info(hdev); 1946 } 1947 1948 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1949 { 1950 struct pci_dev *pdev = ae_dev->pdev; 1951 struct hclgevf_dev *hdev; 1952 1953 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1954 if (!hdev) 1955 return -ENOMEM; 1956 1957 hdev->pdev = pdev; 1958 hdev->ae_dev = ae_dev; 1959 ae_dev->priv = hdev; 1960 1961 return 0; 1962 } 1963 1964 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1965 { 1966 struct hnae3_handle *roce = &hdev->roce; 1967 struct hnae3_handle *nic = &hdev->nic; 1968 1969 roce->rinfo.num_vectors = hdev->num_roce_msix; 1970 1971 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1972 hdev->num_msi_left == 0) 1973 return -EINVAL; 1974 1975 roce->rinfo.base_vector = hdev->roce_base_vector; 1976 1977 roce->rinfo.netdev = nic->kinfo.netdev; 1978 roce->rinfo.roce_io_base = hdev->hw.io_base; 1979 1980 roce->pdev = nic->pdev; 1981 roce->ae_algo = nic->ae_algo; 1982 roce->numa_node_mask = nic->numa_node_mask; 1983 1984 return 0; 1985 } 1986 1987 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 1988 { 1989 struct hclgevf_cfg_gro_status_cmd *req; 1990 struct hclgevf_desc desc; 1991 int ret; 1992 1993 if (!hnae3_dev_gro_supported(hdev)) 1994 return 0; 1995 1996 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 1997 false); 1998 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 1999 2000 req->gro_en = cpu_to_le16(en ? 1 : 0); 2001 2002 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2003 if (ret) 2004 dev_err(&hdev->pdev->dev, 2005 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2006 2007 return ret; 2008 } 2009 2010 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2011 { 2012 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2013 int i, ret; 2014 2015 rss_cfg->rss_size = hdev->rss_size_max; 2016 2017 if (hdev->pdev->revision >= 0x21) { 2018 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 2019 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2020 HCLGEVF_RSS_KEY_SIZE); 2021 2022 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 2023 rss_cfg->rss_hash_key); 2024 if (ret) 2025 return ret; 2026 2027 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 2028 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2029 rss_cfg->rss_tuple_sets.ipv4_udp_en = 2030 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2031 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 2032 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2033 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 2034 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2035 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 2036 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2037 rss_cfg->rss_tuple_sets.ipv6_udp_en = 2038 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2039 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 2040 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2041 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 2042 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2043 2044 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2045 if (ret) 2046 return ret; 2047 2048 } 2049 2050 /* Initialize RSS indirect table */ 2051 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 2052 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 2053 2054 ret = hclgevf_set_rss_indir_table(hdev); 2055 if (ret) 2056 return ret; 2057 2058 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 2059 } 2060 2061 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2062 { 2063 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2064 false); 2065 } 2066 2067 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2068 { 2069 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2070 2071 if (enable) { 2072 mod_timer(&hdev->service_timer, jiffies + HZ); 2073 } else { 2074 del_timer_sync(&hdev->service_timer); 2075 cancel_work_sync(&hdev->service_task); 2076 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 2077 } 2078 } 2079 2080 static int hclgevf_ae_start(struct hnae3_handle *handle) 2081 { 2082 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2083 2084 hclgevf_reset_tqp_stats(handle); 2085 2086 hclgevf_request_link_info(hdev); 2087 2088 hclgevf_update_link_mode(hdev); 2089 2090 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2091 2092 return 0; 2093 } 2094 2095 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2096 { 2097 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2098 int i; 2099 2100 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2101 2102 if (hdev->reset_type != HNAE3_VF_RESET) 2103 for (i = 0; i < handle->kinfo.num_tqps; i++) 2104 if (hclgevf_reset_tqp(handle, i)) 2105 break; 2106 2107 hclgevf_reset_tqp_stats(handle); 2108 hclgevf_update_link_status(hdev, 0); 2109 } 2110 2111 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2112 { 2113 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2114 u8 msg_data; 2115 2116 msg_data = alive ? 1 : 0; 2117 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE, 2118 0, &msg_data, 1, false, NULL, 0); 2119 } 2120 2121 static int hclgevf_client_start(struct hnae3_handle *handle) 2122 { 2123 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2124 int ret; 2125 2126 ret = hclgevf_set_alive(handle, true); 2127 if (ret) 2128 return ret; 2129 2130 mod_timer(&hdev->keep_alive_timer, jiffies + 2131 HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ); 2132 2133 return 0; 2134 } 2135 2136 static void hclgevf_client_stop(struct hnae3_handle *handle) 2137 { 2138 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2139 int ret; 2140 2141 ret = hclgevf_set_alive(handle, false); 2142 if (ret) 2143 dev_warn(&hdev->pdev->dev, 2144 "%s failed %d\n", __func__, ret); 2145 2146 del_timer_sync(&hdev->keep_alive_timer); 2147 cancel_work_sync(&hdev->keep_alive_task); 2148 } 2149 2150 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2151 { 2152 /* setup tasks for the MBX */ 2153 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 2154 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2155 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2156 2157 /* setup tasks for service timer */ 2158 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 2159 2160 INIT_WORK(&hdev->service_task, hclgevf_service_task); 2161 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 2162 2163 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 2164 2165 mutex_init(&hdev->mbx_resp.mbx_mutex); 2166 2167 /* bring the device down */ 2168 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2169 } 2170 2171 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2172 { 2173 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2174 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2175 2176 if (hdev->keep_alive_timer.function) 2177 del_timer_sync(&hdev->keep_alive_timer); 2178 if (hdev->keep_alive_task.func) 2179 cancel_work_sync(&hdev->keep_alive_task); 2180 if (hdev->service_timer.function) 2181 del_timer_sync(&hdev->service_timer); 2182 if (hdev->service_task.func) 2183 cancel_work_sync(&hdev->service_task); 2184 if (hdev->mbx_service_task.func) 2185 cancel_work_sync(&hdev->mbx_service_task); 2186 if (hdev->rst_service_task.func) 2187 cancel_work_sync(&hdev->rst_service_task); 2188 2189 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2190 } 2191 2192 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2193 { 2194 struct pci_dev *pdev = hdev->pdev; 2195 int vectors; 2196 int i; 2197 2198 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 2199 vectors = pci_alloc_irq_vectors(pdev, 2200 hdev->roce_base_msix_offset + 1, 2201 hdev->num_msi, 2202 PCI_IRQ_MSIX); 2203 else 2204 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 2205 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2206 2207 if (vectors < 0) { 2208 dev_err(&pdev->dev, 2209 "failed(%d) to allocate MSI/MSI-X vectors\n", 2210 vectors); 2211 return vectors; 2212 } 2213 if (vectors < hdev->num_msi) 2214 dev_warn(&hdev->pdev->dev, 2215 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2216 hdev->num_msi, vectors); 2217 2218 hdev->num_msi = vectors; 2219 hdev->num_msi_left = vectors; 2220 hdev->base_msi_vector = pdev->irq; 2221 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2222 2223 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2224 sizeof(u16), GFP_KERNEL); 2225 if (!hdev->vector_status) { 2226 pci_free_irq_vectors(pdev); 2227 return -ENOMEM; 2228 } 2229 2230 for (i = 0; i < hdev->num_msi; i++) 2231 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2232 2233 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2234 sizeof(int), GFP_KERNEL); 2235 if (!hdev->vector_irq) { 2236 devm_kfree(&pdev->dev, hdev->vector_status); 2237 pci_free_irq_vectors(pdev); 2238 return -ENOMEM; 2239 } 2240 2241 return 0; 2242 } 2243 2244 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2245 { 2246 struct pci_dev *pdev = hdev->pdev; 2247 2248 devm_kfree(&pdev->dev, hdev->vector_status); 2249 devm_kfree(&pdev->dev, hdev->vector_irq); 2250 pci_free_irq_vectors(pdev); 2251 } 2252 2253 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2254 { 2255 int ret = 0; 2256 2257 hclgevf_get_misc_vector(hdev); 2258 2259 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2260 0, "hclgevf_cmd", hdev); 2261 if (ret) { 2262 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2263 hdev->misc_vector.vector_irq); 2264 return ret; 2265 } 2266 2267 hclgevf_clear_event_cause(hdev, 0); 2268 2269 /* enable misc. vector(vector 0) */ 2270 hclgevf_enable_vector(&hdev->misc_vector, true); 2271 2272 return ret; 2273 } 2274 2275 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2276 { 2277 /* disable misc vector(vector 0) */ 2278 hclgevf_enable_vector(&hdev->misc_vector, false); 2279 synchronize_irq(hdev->misc_vector.vector_irq); 2280 free_irq(hdev->misc_vector.vector_irq, hdev); 2281 hclgevf_free_vector(hdev, 0); 2282 } 2283 2284 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2285 { 2286 struct device *dev = &hdev->pdev->dev; 2287 2288 dev_info(dev, "VF info begin:\n"); 2289 2290 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps); 2291 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc); 2292 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc); 2293 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport); 2294 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map); 2295 dev_info(dev, "PF media type of this VF: %d\n", 2296 hdev->hw.mac.media_type); 2297 2298 dev_info(dev, "VF info end.\n"); 2299 } 2300 2301 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2302 struct hnae3_client *client) 2303 { 2304 struct hclgevf_dev *hdev = ae_dev->priv; 2305 int ret; 2306 2307 ret = client->ops->init_instance(&hdev->nic); 2308 if (ret) 2309 return ret; 2310 2311 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2312 hnae3_set_client_init_flag(client, ae_dev, 1); 2313 2314 if (netif_msg_drv(&hdev->nic)) 2315 hclgevf_info_show(hdev); 2316 2317 return 0; 2318 } 2319 2320 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2321 struct hnae3_client *client) 2322 { 2323 struct hclgevf_dev *hdev = ae_dev->priv; 2324 int ret; 2325 2326 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2327 !hdev->nic_client) 2328 return 0; 2329 2330 ret = hclgevf_init_roce_base_info(hdev); 2331 if (ret) 2332 return ret; 2333 2334 ret = client->ops->init_instance(&hdev->roce); 2335 if (ret) 2336 return ret; 2337 2338 hnae3_set_client_init_flag(client, ae_dev, 1); 2339 2340 return 0; 2341 } 2342 2343 static int hclgevf_init_client_instance(struct hnae3_client *client, 2344 struct hnae3_ae_dev *ae_dev) 2345 { 2346 struct hclgevf_dev *hdev = ae_dev->priv; 2347 int ret; 2348 2349 switch (client->type) { 2350 case HNAE3_CLIENT_KNIC: 2351 hdev->nic_client = client; 2352 hdev->nic.client = client; 2353 2354 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2355 if (ret) 2356 goto clear_nic; 2357 2358 ret = hclgevf_init_roce_client_instance(ae_dev, 2359 hdev->roce_client); 2360 if (ret) 2361 goto clear_roce; 2362 2363 break; 2364 case HNAE3_CLIENT_ROCE: 2365 if (hnae3_dev_roce_supported(hdev)) { 2366 hdev->roce_client = client; 2367 hdev->roce.client = client; 2368 } 2369 2370 ret = hclgevf_init_roce_client_instance(ae_dev, client); 2371 if (ret) 2372 goto clear_roce; 2373 2374 break; 2375 default: 2376 return -EINVAL; 2377 } 2378 2379 return 0; 2380 2381 clear_nic: 2382 hdev->nic_client = NULL; 2383 hdev->nic.client = NULL; 2384 return ret; 2385 clear_roce: 2386 hdev->roce_client = NULL; 2387 hdev->roce.client = NULL; 2388 return ret; 2389 } 2390 2391 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2392 struct hnae3_ae_dev *ae_dev) 2393 { 2394 struct hclgevf_dev *hdev = ae_dev->priv; 2395 2396 /* un-init roce, if it exists */ 2397 if (hdev->roce_client) { 2398 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2399 hdev->roce_client = NULL; 2400 hdev->roce.client = NULL; 2401 } 2402 2403 /* un-init nic/unic, if this was not called by roce client */ 2404 if (client->ops->uninit_instance && hdev->nic_client && 2405 client->type != HNAE3_CLIENT_ROCE) { 2406 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2407 2408 client->ops->uninit_instance(&hdev->nic, 0); 2409 hdev->nic_client = NULL; 2410 hdev->nic.client = NULL; 2411 } 2412 } 2413 2414 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2415 { 2416 struct pci_dev *pdev = hdev->pdev; 2417 struct hclgevf_hw *hw; 2418 int ret; 2419 2420 ret = pci_enable_device(pdev); 2421 if (ret) { 2422 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2423 return ret; 2424 } 2425 2426 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2427 if (ret) { 2428 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2429 goto err_disable_device; 2430 } 2431 2432 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2433 if (ret) { 2434 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2435 goto err_disable_device; 2436 } 2437 2438 pci_set_master(pdev); 2439 hw = &hdev->hw; 2440 hw->hdev = hdev; 2441 hw->io_base = pci_iomap(pdev, 2, 0); 2442 if (!hw->io_base) { 2443 dev_err(&pdev->dev, "can't map configuration register space\n"); 2444 ret = -ENOMEM; 2445 goto err_clr_master; 2446 } 2447 2448 return 0; 2449 2450 err_clr_master: 2451 pci_clear_master(pdev); 2452 pci_release_regions(pdev); 2453 err_disable_device: 2454 pci_disable_device(pdev); 2455 2456 return ret; 2457 } 2458 2459 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2460 { 2461 struct pci_dev *pdev = hdev->pdev; 2462 2463 pci_iounmap(pdev, hdev->hw.io_base); 2464 pci_clear_master(pdev); 2465 pci_release_regions(pdev); 2466 pci_disable_device(pdev); 2467 } 2468 2469 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2470 { 2471 struct hclgevf_query_res_cmd *req; 2472 struct hclgevf_desc desc; 2473 int ret; 2474 2475 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2476 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2477 if (ret) { 2478 dev_err(&hdev->pdev->dev, 2479 "query vf resource failed, ret = %d.\n", ret); 2480 return ret; 2481 } 2482 2483 req = (struct hclgevf_query_res_cmd *)desc.data; 2484 2485 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 2486 hdev->roce_base_msix_offset = 2487 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 2488 HCLGEVF_MSIX_OFT_ROCEE_M, 2489 HCLGEVF_MSIX_OFT_ROCEE_S); 2490 hdev->num_roce_msix = 2491 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2492 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2493 2494 /* VF should have NIC vectors and Roce vectors, NIC vectors 2495 * are queued before Roce vectors. The offset is fixed to 64. 2496 */ 2497 hdev->num_msi = hdev->num_roce_msix + 2498 hdev->roce_base_msix_offset; 2499 } else { 2500 hdev->num_msi = 2501 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2502 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2503 } 2504 2505 return 0; 2506 } 2507 2508 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2509 { 2510 struct pci_dev *pdev = hdev->pdev; 2511 int ret = 0; 2512 2513 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2514 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2515 hclgevf_misc_irq_uninit(hdev); 2516 hclgevf_uninit_msi(hdev); 2517 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2518 } 2519 2520 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2521 pci_set_master(pdev); 2522 ret = hclgevf_init_msi(hdev); 2523 if (ret) { 2524 dev_err(&pdev->dev, 2525 "failed(%d) to init MSI/MSI-X\n", ret); 2526 return ret; 2527 } 2528 2529 ret = hclgevf_misc_irq_init(hdev); 2530 if (ret) { 2531 hclgevf_uninit_msi(hdev); 2532 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2533 ret); 2534 return ret; 2535 } 2536 2537 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2538 } 2539 2540 return ret; 2541 } 2542 2543 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2544 { 2545 struct pci_dev *pdev = hdev->pdev; 2546 int ret; 2547 2548 ret = hclgevf_pci_reset(hdev); 2549 if (ret) { 2550 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2551 return ret; 2552 } 2553 2554 ret = hclgevf_cmd_init(hdev); 2555 if (ret) { 2556 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2557 return ret; 2558 } 2559 2560 ret = hclgevf_rss_init_hw(hdev); 2561 if (ret) { 2562 dev_err(&hdev->pdev->dev, 2563 "failed(%d) to initialize RSS\n", ret); 2564 return ret; 2565 } 2566 2567 ret = hclgevf_config_gro(hdev, true); 2568 if (ret) 2569 return ret; 2570 2571 ret = hclgevf_init_vlan_config(hdev); 2572 if (ret) { 2573 dev_err(&hdev->pdev->dev, 2574 "failed(%d) to initialize VLAN config\n", ret); 2575 return ret; 2576 } 2577 2578 dev_info(&hdev->pdev->dev, "Reset done\n"); 2579 2580 return 0; 2581 } 2582 2583 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2584 { 2585 struct pci_dev *pdev = hdev->pdev; 2586 int ret; 2587 2588 ret = hclgevf_pci_init(hdev); 2589 if (ret) { 2590 dev_err(&pdev->dev, "PCI initialization failed\n"); 2591 return ret; 2592 } 2593 2594 ret = hclgevf_cmd_queue_init(hdev); 2595 if (ret) { 2596 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); 2597 goto err_cmd_queue_init; 2598 } 2599 2600 ret = hclgevf_cmd_init(hdev); 2601 if (ret) 2602 goto err_cmd_init; 2603 2604 /* Get vf resource */ 2605 ret = hclgevf_query_vf_resource(hdev); 2606 if (ret) { 2607 dev_err(&hdev->pdev->dev, 2608 "Query vf status error, ret = %d.\n", ret); 2609 goto err_cmd_init; 2610 } 2611 2612 ret = hclgevf_init_msi(hdev); 2613 if (ret) { 2614 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2615 goto err_cmd_init; 2616 } 2617 2618 hclgevf_state_init(hdev); 2619 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2620 2621 ret = hclgevf_misc_irq_init(hdev); 2622 if (ret) { 2623 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2624 ret); 2625 goto err_misc_irq_init; 2626 } 2627 2628 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2629 2630 ret = hclgevf_configure(hdev); 2631 if (ret) { 2632 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2633 goto err_config; 2634 } 2635 2636 ret = hclgevf_alloc_tqps(hdev); 2637 if (ret) { 2638 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2639 goto err_config; 2640 } 2641 2642 ret = hclgevf_set_handle_info(hdev); 2643 if (ret) { 2644 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2645 goto err_config; 2646 } 2647 2648 ret = hclgevf_config_gro(hdev, true); 2649 if (ret) 2650 goto err_config; 2651 2652 /* vf is not allowed to enable unicast/multicast promisc mode. 2653 * For revision 0x20, default to disable broadcast promisc mode, 2654 * firmware makes sure broadcast packets can be accepted. 2655 * For revision 0x21, default to enable broadcast promisc mode. 2656 */ 2657 ret = hclgevf_set_promisc_mode(hdev, true); 2658 if (ret) 2659 goto err_config; 2660 2661 /* Initialize RSS for this VF */ 2662 ret = hclgevf_rss_init_hw(hdev); 2663 if (ret) { 2664 dev_err(&hdev->pdev->dev, 2665 "failed(%d) to initialize RSS\n", ret); 2666 goto err_config; 2667 } 2668 2669 ret = hclgevf_init_vlan_config(hdev); 2670 if (ret) { 2671 dev_err(&hdev->pdev->dev, 2672 "failed(%d) to initialize VLAN config\n", ret); 2673 goto err_config; 2674 } 2675 2676 hdev->last_reset_time = jiffies; 2677 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 2678 2679 return 0; 2680 2681 err_config: 2682 hclgevf_misc_irq_uninit(hdev); 2683 err_misc_irq_init: 2684 hclgevf_state_uninit(hdev); 2685 hclgevf_uninit_msi(hdev); 2686 err_cmd_init: 2687 hclgevf_cmd_uninit(hdev); 2688 err_cmd_queue_init: 2689 hclgevf_pci_uninit(hdev); 2690 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2691 return ret; 2692 } 2693 2694 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2695 { 2696 hclgevf_state_uninit(hdev); 2697 2698 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2699 hclgevf_misc_irq_uninit(hdev); 2700 hclgevf_uninit_msi(hdev); 2701 } 2702 2703 hclgevf_pci_uninit(hdev); 2704 hclgevf_cmd_uninit(hdev); 2705 } 2706 2707 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2708 { 2709 struct pci_dev *pdev = ae_dev->pdev; 2710 struct hclgevf_dev *hdev; 2711 int ret; 2712 2713 ret = hclgevf_alloc_hdev(ae_dev); 2714 if (ret) { 2715 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2716 return ret; 2717 } 2718 2719 ret = hclgevf_init_hdev(ae_dev->priv); 2720 if (ret) { 2721 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2722 return ret; 2723 } 2724 2725 hdev = ae_dev->priv; 2726 timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0); 2727 INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task); 2728 2729 return 0; 2730 } 2731 2732 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2733 { 2734 struct hclgevf_dev *hdev = ae_dev->priv; 2735 2736 hclgevf_uninit_hdev(hdev); 2737 ae_dev->priv = NULL; 2738 } 2739 2740 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2741 { 2742 struct hnae3_handle *nic = &hdev->nic; 2743 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2744 2745 return min_t(u32, hdev->rss_size_max, 2746 hdev->num_tqps / kinfo->num_tc); 2747 } 2748 2749 /** 2750 * hclgevf_get_channels - Get the current channels enabled and max supported. 2751 * @handle: hardware information for network interface 2752 * @ch: ethtool channels structure 2753 * 2754 * We don't support separate tx and rx queues as channels. The other count 2755 * represents how many queues are being used for control. max_combined counts 2756 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2757 * q_vectors since we support a lot more queue pairs than q_vectors. 2758 **/ 2759 static void hclgevf_get_channels(struct hnae3_handle *handle, 2760 struct ethtool_channels *ch) 2761 { 2762 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2763 2764 ch->max_combined = hclgevf_get_max_channels(hdev); 2765 ch->other_count = 0; 2766 ch->max_other = 0; 2767 ch->combined_count = handle->kinfo.rss_size; 2768 } 2769 2770 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2771 u16 *alloc_tqps, u16 *max_rss_size) 2772 { 2773 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2774 2775 *alloc_tqps = hdev->num_tqps; 2776 *max_rss_size = hdev->rss_size_max; 2777 } 2778 2779 static int hclgevf_get_status(struct hnae3_handle *handle) 2780 { 2781 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2782 2783 return hdev->hw.mac.link; 2784 } 2785 2786 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2787 u8 *auto_neg, u32 *speed, 2788 u8 *duplex) 2789 { 2790 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2791 2792 if (speed) 2793 *speed = hdev->hw.mac.speed; 2794 if (duplex) 2795 *duplex = hdev->hw.mac.duplex; 2796 if (auto_neg) 2797 *auto_neg = AUTONEG_DISABLE; 2798 } 2799 2800 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 2801 u8 duplex) 2802 { 2803 hdev->hw.mac.speed = speed; 2804 hdev->hw.mac.duplex = duplex; 2805 } 2806 2807 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 2808 { 2809 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2810 2811 return hclgevf_config_gro(hdev, enable); 2812 } 2813 2814 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 2815 u8 *module_type) 2816 { 2817 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2818 2819 if (media_type) 2820 *media_type = hdev->hw.mac.media_type; 2821 2822 if (module_type) 2823 *module_type = hdev->hw.mac.module_type; 2824 } 2825 2826 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 2827 { 2828 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2829 2830 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2831 } 2832 2833 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 2834 { 2835 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2836 2837 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2838 } 2839 2840 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 2841 { 2842 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2843 2844 return hdev->rst_stats.hw_rst_done_cnt; 2845 } 2846 2847 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 2848 unsigned long *supported, 2849 unsigned long *advertising) 2850 { 2851 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2852 2853 *supported = hdev->hw.mac.supported; 2854 *advertising = hdev->hw.mac.advertising; 2855 } 2856 2857 #define MAX_SEPARATE_NUM 4 2858 #define SEPARATOR_VALUE 0xFFFFFFFF 2859 #define REG_NUM_PER_LINE 4 2860 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 2861 2862 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 2863 { 2864 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 2865 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2866 2867 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 2868 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 2869 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 2870 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 2871 2872 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 2873 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 2874 } 2875 2876 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 2877 void *data) 2878 { 2879 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2880 int i, j, reg_um, separator_num; 2881 u32 *reg = data; 2882 2883 *version = hdev->fw_version; 2884 2885 /* fetching per-VF registers values from VF PCIe register space */ 2886 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 2887 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2888 for (i = 0; i < reg_um; i++) 2889 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 2890 for (i = 0; i < separator_num; i++) 2891 *reg++ = SEPARATOR_VALUE; 2892 2893 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 2894 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2895 for (i = 0; i < reg_um; i++) 2896 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 2897 for (i = 0; i < separator_num; i++) 2898 *reg++ = SEPARATOR_VALUE; 2899 2900 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 2901 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2902 for (j = 0; j < hdev->num_tqps; j++) { 2903 for (i = 0; i < reg_um; i++) 2904 *reg++ = hclgevf_read_dev(&hdev->hw, 2905 ring_reg_addr_list[i] + 2906 0x200 * j); 2907 for (i = 0; i < separator_num; i++) 2908 *reg++ = SEPARATOR_VALUE; 2909 } 2910 2911 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 2912 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2913 for (j = 0; j < hdev->num_msi_used - 1; j++) { 2914 for (i = 0; i < reg_um; i++) 2915 *reg++ = hclgevf_read_dev(&hdev->hw, 2916 tqp_intr_reg_addr_list[i] + 2917 4 * j); 2918 for (i = 0; i < separator_num; i++) 2919 *reg++ = SEPARATOR_VALUE; 2920 } 2921 } 2922 2923 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 2924 u8 *port_base_vlan_info, u8 data_size) 2925 { 2926 struct hnae3_handle *nic = &hdev->nic; 2927 2928 rtnl_lock(); 2929 hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 2930 rtnl_unlock(); 2931 2932 /* send msg to PF and wait update port based vlan info */ 2933 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 2934 HCLGE_MBX_PORT_BASE_VLAN_CFG, 2935 port_base_vlan_info, data_size, 2936 false, NULL, 0); 2937 2938 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 2939 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; 2940 else 2941 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 2942 2943 rtnl_lock(); 2944 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 2945 rtnl_unlock(); 2946 } 2947 2948 static const struct hnae3_ae_ops hclgevf_ops = { 2949 .init_ae_dev = hclgevf_init_ae_dev, 2950 .uninit_ae_dev = hclgevf_uninit_ae_dev, 2951 .flr_prepare = hclgevf_flr_prepare, 2952 .flr_done = hclgevf_flr_done, 2953 .init_client_instance = hclgevf_init_client_instance, 2954 .uninit_client_instance = hclgevf_uninit_client_instance, 2955 .start = hclgevf_ae_start, 2956 .stop = hclgevf_ae_stop, 2957 .client_start = hclgevf_client_start, 2958 .client_stop = hclgevf_client_stop, 2959 .map_ring_to_vector = hclgevf_map_ring_to_vector, 2960 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2961 .get_vector = hclgevf_get_vector, 2962 .put_vector = hclgevf_put_vector, 2963 .reset_queue = hclgevf_reset_tqp, 2964 .get_mac_addr = hclgevf_get_mac_addr, 2965 .set_mac_addr = hclgevf_set_mac_addr, 2966 .add_uc_addr = hclgevf_add_uc_addr, 2967 .rm_uc_addr = hclgevf_rm_uc_addr, 2968 .add_mc_addr = hclgevf_add_mc_addr, 2969 .rm_mc_addr = hclgevf_rm_mc_addr, 2970 .get_stats = hclgevf_get_stats, 2971 .update_stats = hclgevf_update_stats, 2972 .get_strings = hclgevf_get_strings, 2973 .get_sset_count = hclgevf_get_sset_count, 2974 .get_rss_key_size = hclgevf_get_rss_key_size, 2975 .get_rss_indir_size = hclgevf_get_rss_indir_size, 2976 .get_rss = hclgevf_get_rss, 2977 .set_rss = hclgevf_set_rss, 2978 .get_rss_tuple = hclgevf_get_rss_tuple, 2979 .set_rss_tuple = hclgevf_set_rss_tuple, 2980 .get_tc_size = hclgevf_get_tc_size, 2981 .get_fw_version = hclgevf_get_fw_version, 2982 .set_vlan_filter = hclgevf_set_vlan_filter, 2983 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 2984 .reset_event = hclgevf_reset_event, 2985 .set_default_reset_request = hclgevf_set_def_reset_request, 2986 .get_channels = hclgevf_get_channels, 2987 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2988 .get_regs_len = hclgevf_get_regs_len, 2989 .get_regs = hclgevf_get_regs, 2990 .get_status = hclgevf_get_status, 2991 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2992 .get_media_type = hclgevf_get_media_type, 2993 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 2994 .ae_dev_resetting = hclgevf_ae_dev_resetting, 2995 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 2996 .set_gro_en = hclgevf_gro_en, 2997 .set_mtu = hclgevf_set_mtu, 2998 .get_global_queue_id = hclgevf_get_qid_global, 2999 .set_timer_task = hclgevf_set_timer_task, 3000 .get_link_mode = hclgevf_get_link_mode, 3001 }; 3002 3003 static struct hnae3_ae_algo ae_algovf = { 3004 .ops = &hclgevf_ops, 3005 .pdev_id_table = ae_algovf_pci_tbl, 3006 }; 3007 3008 static int hclgevf_init(void) 3009 { 3010 pr_info("%s is initializing\n", HCLGEVF_NAME); 3011 3012 hnae3_register_ae_algo(&ae_algovf); 3013 3014 return 0; 3015 } 3016 3017 static void hclgevf_exit(void) 3018 { 3019 hnae3_unregister_ae_algo(&ae_algovf); 3020 } 3021 module_init(hclgevf_init); 3022 module_exit(hclgevf_exit); 3023 3024 MODULE_LICENSE("GPL"); 3025 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3026 MODULE_DESCRIPTION("HCLGEVF Driver"); 3027 MODULE_VERSION(HCLGEVF_MOD_VERSION); 3028