1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 static const u8 hclgevf_hash_key[] = { 25 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 26 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 27 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 28 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 29 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 30 }; 31 32 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 33 34 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 35 HCLGEVF_CMDQ_TX_ADDR_H_REG, 36 HCLGEVF_CMDQ_TX_DEPTH_REG, 37 HCLGEVF_CMDQ_TX_TAIL_REG, 38 HCLGEVF_CMDQ_TX_HEAD_REG, 39 HCLGEVF_CMDQ_RX_ADDR_L_REG, 40 HCLGEVF_CMDQ_RX_ADDR_H_REG, 41 HCLGEVF_CMDQ_RX_DEPTH_REG, 42 HCLGEVF_CMDQ_RX_TAIL_REG, 43 HCLGEVF_CMDQ_RX_HEAD_REG, 44 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 45 HCLGEVF_CMDQ_INTR_STS_REG, 46 HCLGEVF_CMDQ_INTR_EN_REG, 47 HCLGEVF_CMDQ_INTR_GEN_REG}; 48 49 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 50 HCLGEVF_RST_ING, 51 HCLGEVF_GRO_EN_REG}; 52 53 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 54 HCLGEVF_RING_RX_ADDR_H_REG, 55 HCLGEVF_RING_RX_BD_NUM_REG, 56 HCLGEVF_RING_RX_BD_LENGTH_REG, 57 HCLGEVF_RING_RX_MERGE_EN_REG, 58 HCLGEVF_RING_RX_TAIL_REG, 59 HCLGEVF_RING_RX_HEAD_REG, 60 HCLGEVF_RING_RX_FBD_NUM_REG, 61 HCLGEVF_RING_RX_OFFSET_REG, 62 HCLGEVF_RING_RX_FBD_OFFSET_REG, 63 HCLGEVF_RING_RX_STASH_REG, 64 HCLGEVF_RING_RX_BD_ERR_REG, 65 HCLGEVF_RING_TX_ADDR_L_REG, 66 HCLGEVF_RING_TX_ADDR_H_REG, 67 HCLGEVF_RING_TX_BD_NUM_REG, 68 HCLGEVF_RING_TX_PRIORITY_REG, 69 HCLGEVF_RING_TX_TC_REG, 70 HCLGEVF_RING_TX_MERGE_EN_REG, 71 HCLGEVF_RING_TX_TAIL_REG, 72 HCLGEVF_RING_TX_HEAD_REG, 73 HCLGEVF_RING_TX_FBD_NUM_REG, 74 HCLGEVF_RING_TX_OFFSET_REG, 75 HCLGEVF_RING_TX_EBD_NUM_REG, 76 HCLGEVF_RING_TX_EBD_OFFSET_REG, 77 HCLGEVF_RING_TX_BD_ERR_REG, 78 HCLGEVF_RING_EN_REG}; 79 80 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 81 HCLGEVF_TQP_INTR_GL0_REG, 82 HCLGEVF_TQP_INTR_GL1_REG, 83 HCLGEVF_TQP_INTR_GL2_REG, 84 HCLGEVF_TQP_INTR_RL_REG}; 85 86 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 87 struct hnae3_handle *handle) 88 { 89 if (!handle->client) 90 return container_of(handle, struct hclgevf_dev, nic); 91 else if (handle->client->type == HNAE3_CLIENT_ROCE) 92 return container_of(handle, struct hclgevf_dev, roce); 93 else 94 return container_of(handle, struct hclgevf_dev, nic); 95 } 96 97 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 98 { 99 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 100 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 101 struct hclgevf_desc desc; 102 struct hclgevf_tqp *tqp; 103 int status; 104 int i; 105 106 for (i = 0; i < kinfo->num_tqps; i++) { 107 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 108 hclgevf_cmd_setup_basic_desc(&desc, 109 HCLGEVF_OPC_QUERY_RX_STATUS, 110 true); 111 112 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 113 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 114 if (status) { 115 dev_err(&hdev->pdev->dev, 116 "Query tqp stat fail, status = %d,queue = %d\n", 117 status, i); 118 return status; 119 } 120 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 121 le32_to_cpu(desc.data[1]); 122 123 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 124 true); 125 126 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 127 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 128 if (status) { 129 dev_err(&hdev->pdev->dev, 130 "Query tqp stat fail, status = %d,queue = %d\n", 131 status, i); 132 return status; 133 } 134 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 135 le32_to_cpu(desc.data[1]); 136 } 137 138 return 0; 139 } 140 141 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 142 { 143 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 144 struct hclgevf_tqp *tqp; 145 u64 *buff = data; 146 int i; 147 148 for (i = 0; i < kinfo->num_tqps; i++) { 149 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 150 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 151 } 152 for (i = 0; i < kinfo->num_tqps; i++) { 153 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 154 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 155 } 156 157 return buff; 158 } 159 160 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 161 { 162 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 163 164 return kinfo->num_tqps * 2; 165 } 166 167 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 168 { 169 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 170 u8 *buff = data; 171 int i = 0; 172 173 for (i = 0; i < kinfo->num_tqps; i++) { 174 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 175 struct hclgevf_tqp, q); 176 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 177 tqp->index); 178 buff += ETH_GSTRING_LEN; 179 } 180 181 for (i = 0; i < kinfo->num_tqps; i++) { 182 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 183 struct hclgevf_tqp, q); 184 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 185 tqp->index); 186 buff += ETH_GSTRING_LEN; 187 } 188 189 return buff; 190 } 191 192 static void hclgevf_update_stats(struct hnae3_handle *handle, 193 struct net_device_stats *net_stats) 194 { 195 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 196 int status; 197 198 status = hclgevf_tqps_update_stats(handle); 199 if (status) 200 dev_err(&hdev->pdev->dev, 201 "VF update of TQPS stats fail, status = %d.\n", 202 status); 203 } 204 205 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 206 { 207 if (strset == ETH_SS_TEST) 208 return -EOPNOTSUPP; 209 else if (strset == ETH_SS_STATS) 210 return hclgevf_tqps_get_sset_count(handle, strset); 211 212 return 0; 213 } 214 215 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 216 u8 *data) 217 { 218 u8 *p = (char *)data; 219 220 if (strset == ETH_SS_STATS) 221 p = hclgevf_tqps_get_strings(handle, p); 222 } 223 224 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 225 { 226 hclgevf_tqps_get_stats(handle, data); 227 } 228 229 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 230 { 231 u8 resp_msg; 232 int status; 233 234 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 235 true, &resp_msg, sizeof(resp_msg)); 236 if (status) { 237 dev_err(&hdev->pdev->dev, 238 "VF request to get TC info from PF failed %d", 239 status); 240 return status; 241 } 242 243 hdev->hw_tc_map = resp_msg; 244 245 return 0; 246 } 247 248 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 249 { 250 struct hnae3_handle *nic = &hdev->nic; 251 u8 resp_msg; 252 int ret; 253 254 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 255 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, 256 NULL, 0, true, &resp_msg, sizeof(u8)); 257 if (ret) { 258 dev_err(&hdev->pdev->dev, 259 "VF request to get port based vlan state failed %d", 260 ret); 261 return ret; 262 } 263 264 nic->port_base_vlan_state = resp_msg; 265 266 return 0; 267 } 268 269 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 270 { 271 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 272 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 273 int status; 274 275 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 276 true, resp_msg, 277 HCLGEVF_TQPS_RSS_INFO_LEN); 278 if (status) { 279 dev_err(&hdev->pdev->dev, 280 "VF request to get tqp info from PF failed %d", 281 status); 282 return status; 283 } 284 285 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 286 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 287 memcpy(&hdev->rx_buf_len, &resp_msg[4], sizeof(u16)); 288 289 return 0; 290 } 291 292 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 293 { 294 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 295 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 296 int ret; 297 298 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QDEPTH, 0, NULL, 0, 299 true, resp_msg, 300 HCLGEVF_TQPS_DEPTH_INFO_LEN); 301 if (ret) { 302 dev_err(&hdev->pdev->dev, 303 "VF request to get tqp depth info from PF failed %d", 304 ret); 305 return ret; 306 } 307 308 memcpy(&hdev->num_tx_desc, &resp_msg[0], sizeof(u16)); 309 memcpy(&hdev->num_rx_desc, &resp_msg[2], sizeof(u16)); 310 311 return 0; 312 } 313 314 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 315 { 316 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 317 u8 msg_data[2], resp_data[2]; 318 u16 qid_in_pf = 0; 319 int ret; 320 321 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 322 323 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, 324 sizeof(msg_data), true, resp_data, 325 sizeof(resp_data)); 326 if (!ret) 327 qid_in_pf = *(u16 *)resp_data; 328 329 return qid_in_pf; 330 } 331 332 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 333 { 334 u8 resp_msg[2]; 335 int ret; 336 337 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0, 338 true, resp_msg, sizeof(resp_msg)); 339 if (ret) { 340 dev_err(&hdev->pdev->dev, 341 "VF request to get the pf port media type failed %d", 342 ret); 343 return ret; 344 } 345 346 hdev->hw.mac.media_type = resp_msg[0]; 347 hdev->hw.mac.module_type = resp_msg[1]; 348 349 return 0; 350 } 351 352 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 353 { 354 struct hclgevf_tqp *tqp; 355 int i; 356 357 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 358 sizeof(struct hclgevf_tqp), GFP_KERNEL); 359 if (!hdev->htqp) 360 return -ENOMEM; 361 362 tqp = hdev->htqp; 363 364 for (i = 0; i < hdev->num_tqps; i++) { 365 tqp->dev = &hdev->pdev->dev; 366 tqp->index = i; 367 368 tqp->q.ae_algo = &ae_algovf; 369 tqp->q.buf_size = hdev->rx_buf_len; 370 tqp->q.tx_desc_num = hdev->num_tx_desc; 371 tqp->q.rx_desc_num = hdev->num_rx_desc; 372 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 373 i * HCLGEVF_TQP_REG_SIZE; 374 375 tqp++; 376 } 377 378 return 0; 379 } 380 381 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 382 { 383 struct hnae3_handle *nic = &hdev->nic; 384 struct hnae3_knic_private_info *kinfo; 385 u16 new_tqps = hdev->num_tqps; 386 int i; 387 388 kinfo = &nic->kinfo; 389 kinfo->num_tc = 0; 390 kinfo->num_tx_desc = hdev->num_tx_desc; 391 kinfo->num_rx_desc = hdev->num_rx_desc; 392 kinfo->rx_buf_len = hdev->rx_buf_len; 393 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 394 if (hdev->hw_tc_map & BIT(i)) 395 kinfo->num_tc++; 396 397 kinfo->rss_size 398 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 399 new_tqps = kinfo->rss_size * kinfo->num_tc; 400 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 401 402 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 403 sizeof(struct hnae3_queue *), GFP_KERNEL); 404 if (!kinfo->tqp) 405 return -ENOMEM; 406 407 for (i = 0; i < kinfo->num_tqps; i++) { 408 hdev->htqp[i].q.handle = &hdev->nic; 409 hdev->htqp[i].q.tqp_index = i; 410 kinfo->tqp[i] = &hdev->htqp[i].q; 411 } 412 413 return 0; 414 } 415 416 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 417 { 418 int status; 419 u8 resp_msg; 420 421 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 422 0, false, &resp_msg, sizeof(resp_msg)); 423 if (status) 424 dev_err(&hdev->pdev->dev, 425 "VF failed to fetch link status(%d) from PF", status); 426 } 427 428 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 429 { 430 struct hnae3_handle *rhandle = &hdev->roce; 431 struct hnae3_handle *handle = &hdev->nic; 432 struct hnae3_client *rclient; 433 struct hnae3_client *client; 434 435 client = handle->client; 436 rclient = hdev->roce_client; 437 438 link_state = 439 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 440 441 if (link_state != hdev->hw.mac.link) { 442 client->ops->link_status_change(handle, !!link_state); 443 if (rclient && rclient->ops->link_status_change) 444 rclient->ops->link_status_change(rhandle, !!link_state); 445 hdev->hw.mac.link = link_state; 446 } 447 } 448 449 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 450 { 451 #define HCLGEVF_ADVERTISING 0 452 #define HCLGEVF_SUPPORTED 1 453 u8 send_msg; 454 u8 resp_msg; 455 456 send_msg = HCLGEVF_ADVERTISING; 457 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, 458 &send_msg, sizeof(send_msg), false, 459 &resp_msg, sizeof(resp_msg)); 460 send_msg = HCLGEVF_SUPPORTED; 461 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, 462 &send_msg, sizeof(send_msg), false, 463 &resp_msg, sizeof(resp_msg)); 464 } 465 466 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 467 { 468 struct hnae3_handle *nic = &hdev->nic; 469 int ret; 470 471 nic->ae_algo = &ae_algovf; 472 nic->pdev = hdev->pdev; 473 nic->numa_node_mask = hdev->numa_node_mask; 474 nic->flags |= HNAE3_SUPPORT_VF; 475 476 ret = hclgevf_knic_setup(hdev); 477 if (ret) 478 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 479 ret); 480 return ret; 481 } 482 483 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 484 { 485 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 486 dev_warn(&hdev->pdev->dev, 487 "vector(vector_id %d) has been freed.\n", vector_id); 488 return; 489 } 490 491 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 492 hdev->num_msi_left += 1; 493 hdev->num_msi_used -= 1; 494 } 495 496 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 497 struct hnae3_vector_info *vector_info) 498 { 499 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 500 struct hnae3_vector_info *vector = vector_info; 501 int alloc = 0; 502 int i, j; 503 504 vector_num = min(hdev->num_msi_left, vector_num); 505 506 for (j = 0; j < vector_num; j++) { 507 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 508 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 509 vector->vector = pci_irq_vector(hdev->pdev, i); 510 vector->io_addr = hdev->hw.io_base + 511 HCLGEVF_VECTOR_REG_BASE + 512 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 513 hdev->vector_status[i] = 0; 514 hdev->vector_irq[i] = vector->vector; 515 516 vector++; 517 alloc++; 518 519 break; 520 } 521 } 522 } 523 hdev->num_msi_left -= alloc; 524 hdev->num_msi_used += alloc; 525 526 return alloc; 527 } 528 529 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 530 { 531 int i; 532 533 for (i = 0; i < hdev->num_msi; i++) 534 if (vector == hdev->vector_irq[i]) 535 return i; 536 537 return -EINVAL; 538 } 539 540 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 541 const u8 hfunc, const u8 *key) 542 { 543 struct hclgevf_rss_config_cmd *req; 544 struct hclgevf_desc desc; 545 int key_offset = 0; 546 int key_counts; 547 int key_size; 548 int ret; 549 550 key_counts = HCLGEVF_RSS_KEY_SIZE; 551 req = (struct hclgevf_rss_config_cmd *)desc.data; 552 553 while (key_counts) { 554 hclgevf_cmd_setup_basic_desc(&desc, 555 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 556 false); 557 558 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 559 req->hash_config |= 560 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 561 562 key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 563 memcpy(req->hash_key, 564 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 565 566 key_counts -= key_size; 567 key_offset++; 568 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 569 if (ret) { 570 dev_err(&hdev->pdev->dev, 571 "Configure RSS config fail, status = %d\n", 572 ret); 573 return ret; 574 } 575 } 576 577 return 0; 578 } 579 580 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 581 { 582 return HCLGEVF_RSS_KEY_SIZE; 583 } 584 585 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 586 { 587 return HCLGEVF_RSS_IND_TBL_SIZE; 588 } 589 590 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 591 { 592 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 593 struct hclgevf_rss_indirection_table_cmd *req; 594 struct hclgevf_desc desc; 595 int status; 596 int i, j; 597 598 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 599 600 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 601 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 602 false); 603 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 604 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 605 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 606 req->rss_result[j] = 607 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 608 609 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 610 if (status) { 611 dev_err(&hdev->pdev->dev, 612 "VF failed(=%d) to set RSS indirection table\n", 613 status); 614 return status; 615 } 616 } 617 618 return 0; 619 } 620 621 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 622 { 623 struct hclgevf_rss_tc_mode_cmd *req; 624 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 625 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 626 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 627 struct hclgevf_desc desc; 628 u16 roundup_size; 629 int status; 630 int i; 631 632 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 633 634 roundup_size = roundup_pow_of_two(rss_size); 635 roundup_size = ilog2(roundup_size); 636 637 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 638 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 639 tc_size[i] = roundup_size; 640 tc_offset[i] = rss_size * i; 641 } 642 643 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 644 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 645 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 646 (tc_valid[i] & 0x1)); 647 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 648 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 649 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 650 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 651 } 652 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 653 if (status) 654 dev_err(&hdev->pdev->dev, 655 "VF failed(=%d) to set rss tc mode\n", status); 656 657 return status; 658 } 659 660 /* for revision 0x20, vf shared the same rss config with pf */ 661 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 662 { 663 #define HCLGEVF_RSS_MBX_RESP_LEN 8 664 665 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 666 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 667 u16 msg_num, hash_key_index; 668 u8 index; 669 int ret; 670 671 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 672 HCLGEVF_RSS_MBX_RESP_LEN; 673 for (index = 0; index < msg_num; index++) { 674 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_RSS_KEY, 0, 675 &index, sizeof(index), 676 true, resp_msg, 677 HCLGEVF_RSS_MBX_RESP_LEN); 678 if (ret) { 679 dev_err(&hdev->pdev->dev, 680 "VF get rss hash key from PF failed, ret=%d", 681 ret); 682 return ret; 683 } 684 685 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 686 if (index == msg_num - 1) 687 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 688 &resp_msg[0], 689 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 690 else 691 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 692 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 693 } 694 695 return 0; 696 } 697 698 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 699 u8 *hfunc) 700 { 701 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 702 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 703 int i, ret; 704 705 if (handle->pdev->revision >= 0x21) { 706 /* Get hash algorithm */ 707 if (hfunc) { 708 switch (rss_cfg->hash_algo) { 709 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 710 *hfunc = ETH_RSS_HASH_TOP; 711 break; 712 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 713 *hfunc = ETH_RSS_HASH_XOR; 714 break; 715 default: 716 *hfunc = ETH_RSS_HASH_UNKNOWN; 717 break; 718 } 719 } 720 721 /* Get the RSS Key required by the user */ 722 if (key) 723 memcpy(key, rss_cfg->rss_hash_key, 724 HCLGEVF_RSS_KEY_SIZE); 725 } else { 726 if (hfunc) 727 *hfunc = ETH_RSS_HASH_TOP; 728 if (key) { 729 ret = hclgevf_get_rss_hash_key(hdev); 730 if (ret) 731 return ret; 732 memcpy(key, rss_cfg->rss_hash_key, 733 HCLGEVF_RSS_KEY_SIZE); 734 } 735 } 736 737 if (indir) 738 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 739 indir[i] = rss_cfg->rss_indirection_tbl[i]; 740 741 return 0; 742 } 743 744 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 745 const u8 *key, const u8 hfunc) 746 { 747 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 748 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 749 int ret, i; 750 751 if (handle->pdev->revision >= 0x21) { 752 /* Set the RSS Hash Key if specififed by the user */ 753 if (key) { 754 switch (hfunc) { 755 case ETH_RSS_HASH_TOP: 756 rss_cfg->hash_algo = 757 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 758 break; 759 case ETH_RSS_HASH_XOR: 760 rss_cfg->hash_algo = 761 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 762 break; 763 case ETH_RSS_HASH_NO_CHANGE: 764 break; 765 default: 766 return -EINVAL; 767 } 768 769 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 770 key); 771 if (ret) 772 return ret; 773 774 /* Update the shadow RSS key with user specified qids */ 775 memcpy(rss_cfg->rss_hash_key, key, 776 HCLGEVF_RSS_KEY_SIZE); 777 } 778 } 779 780 /* update the shadow RSS table with user specified qids */ 781 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 782 rss_cfg->rss_indirection_tbl[i] = indir[i]; 783 784 /* update the hardware */ 785 return hclgevf_set_rss_indir_table(hdev); 786 } 787 788 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 789 { 790 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 791 792 if (nfc->data & RXH_L4_B_2_3) 793 hash_sets |= HCLGEVF_D_PORT_BIT; 794 else 795 hash_sets &= ~HCLGEVF_D_PORT_BIT; 796 797 if (nfc->data & RXH_IP_SRC) 798 hash_sets |= HCLGEVF_S_IP_BIT; 799 else 800 hash_sets &= ~HCLGEVF_S_IP_BIT; 801 802 if (nfc->data & RXH_IP_DST) 803 hash_sets |= HCLGEVF_D_IP_BIT; 804 else 805 hash_sets &= ~HCLGEVF_D_IP_BIT; 806 807 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 808 hash_sets |= HCLGEVF_V_TAG_BIT; 809 810 return hash_sets; 811 } 812 813 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 814 struct ethtool_rxnfc *nfc) 815 { 816 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 817 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 818 struct hclgevf_rss_input_tuple_cmd *req; 819 struct hclgevf_desc desc; 820 u8 tuple_sets; 821 int ret; 822 823 if (handle->pdev->revision == 0x20) 824 return -EOPNOTSUPP; 825 826 if (nfc->data & 827 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 828 return -EINVAL; 829 830 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 831 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 832 833 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 834 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 835 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 836 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 837 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 838 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 839 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 840 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 841 842 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 843 switch (nfc->flow_type) { 844 case TCP_V4_FLOW: 845 req->ipv4_tcp_en = tuple_sets; 846 break; 847 case TCP_V6_FLOW: 848 req->ipv6_tcp_en = tuple_sets; 849 break; 850 case UDP_V4_FLOW: 851 req->ipv4_udp_en = tuple_sets; 852 break; 853 case UDP_V6_FLOW: 854 req->ipv6_udp_en = tuple_sets; 855 break; 856 case SCTP_V4_FLOW: 857 req->ipv4_sctp_en = tuple_sets; 858 break; 859 case SCTP_V6_FLOW: 860 if ((nfc->data & RXH_L4_B_0_1) || 861 (nfc->data & RXH_L4_B_2_3)) 862 return -EINVAL; 863 864 req->ipv6_sctp_en = tuple_sets; 865 break; 866 case IPV4_FLOW: 867 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 868 break; 869 case IPV6_FLOW: 870 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 871 break; 872 default: 873 return -EINVAL; 874 } 875 876 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 877 if (ret) { 878 dev_err(&hdev->pdev->dev, 879 "Set rss tuple fail, status = %d\n", ret); 880 return ret; 881 } 882 883 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 884 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 885 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 886 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 887 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 888 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 889 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 890 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 891 return 0; 892 } 893 894 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 895 struct ethtool_rxnfc *nfc) 896 { 897 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 898 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 899 u8 tuple_sets; 900 901 if (handle->pdev->revision == 0x20) 902 return -EOPNOTSUPP; 903 904 nfc->data = 0; 905 906 switch (nfc->flow_type) { 907 case TCP_V4_FLOW: 908 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 909 break; 910 case UDP_V4_FLOW: 911 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 912 break; 913 case TCP_V6_FLOW: 914 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 915 break; 916 case UDP_V6_FLOW: 917 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 918 break; 919 case SCTP_V4_FLOW: 920 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 921 break; 922 case SCTP_V6_FLOW: 923 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 924 break; 925 case IPV4_FLOW: 926 case IPV6_FLOW: 927 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 928 break; 929 default: 930 return -EINVAL; 931 } 932 933 if (!tuple_sets) 934 return 0; 935 936 if (tuple_sets & HCLGEVF_D_PORT_BIT) 937 nfc->data |= RXH_L4_B_2_3; 938 if (tuple_sets & HCLGEVF_S_PORT_BIT) 939 nfc->data |= RXH_L4_B_0_1; 940 if (tuple_sets & HCLGEVF_D_IP_BIT) 941 nfc->data |= RXH_IP_DST; 942 if (tuple_sets & HCLGEVF_S_IP_BIT) 943 nfc->data |= RXH_IP_SRC; 944 945 return 0; 946 } 947 948 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 949 struct hclgevf_rss_cfg *rss_cfg) 950 { 951 struct hclgevf_rss_input_tuple_cmd *req; 952 struct hclgevf_desc desc; 953 int ret; 954 955 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 956 957 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 958 959 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 960 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 961 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 962 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 963 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 964 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 965 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 966 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 967 968 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 969 if (ret) 970 dev_err(&hdev->pdev->dev, 971 "Configure rss input fail, status = %d\n", ret); 972 return ret; 973 } 974 975 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 976 { 977 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 978 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 979 980 return rss_cfg->rss_size; 981 } 982 983 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 984 int vector_id, 985 struct hnae3_ring_chain_node *ring_chain) 986 { 987 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 988 struct hnae3_ring_chain_node *node; 989 struct hclge_mbx_vf_to_pf_cmd *req; 990 struct hclgevf_desc desc; 991 int i = 0; 992 int status; 993 u8 type; 994 995 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 996 997 for (node = ring_chain; node; node = node->next) { 998 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 999 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 1000 1001 if (i == 0) { 1002 hclgevf_cmd_setup_basic_desc(&desc, 1003 HCLGEVF_OPC_MBX_VF_TO_PF, 1004 false); 1005 type = en ? 1006 HCLGE_MBX_MAP_RING_TO_VECTOR : 1007 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1008 req->msg[0] = type; 1009 req->msg[1] = vector_id; 1010 } 1011 1012 req->msg[idx_offset] = 1013 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1014 req->msg[idx_offset + 1] = node->tqp_index; 1015 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 1016 HNAE3_RING_GL_IDX_M, 1017 HNAE3_RING_GL_IDX_S); 1018 1019 i++; 1020 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 1021 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 1022 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 1023 !node->next) { 1024 req->msg[2] = i; 1025 1026 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1027 if (status) { 1028 dev_err(&hdev->pdev->dev, 1029 "Map TQP fail, status is %d.\n", 1030 status); 1031 return status; 1032 } 1033 i = 0; 1034 hclgevf_cmd_setup_basic_desc(&desc, 1035 HCLGEVF_OPC_MBX_VF_TO_PF, 1036 false); 1037 req->msg[0] = type; 1038 req->msg[1] = vector_id; 1039 } 1040 } 1041 1042 return 0; 1043 } 1044 1045 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1046 struct hnae3_ring_chain_node *ring_chain) 1047 { 1048 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1049 int vector_id; 1050 1051 vector_id = hclgevf_get_vector_index(hdev, vector); 1052 if (vector_id < 0) { 1053 dev_err(&handle->pdev->dev, 1054 "Get vector index fail. ret =%d\n", vector_id); 1055 return vector_id; 1056 } 1057 1058 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1059 } 1060 1061 static int hclgevf_unmap_ring_from_vector( 1062 struct hnae3_handle *handle, 1063 int vector, 1064 struct hnae3_ring_chain_node *ring_chain) 1065 { 1066 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1067 int ret, vector_id; 1068 1069 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1070 return 0; 1071 1072 vector_id = hclgevf_get_vector_index(hdev, vector); 1073 if (vector_id < 0) { 1074 dev_err(&handle->pdev->dev, 1075 "Get vector index fail. ret =%d\n", vector_id); 1076 return vector_id; 1077 } 1078 1079 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1080 if (ret) 1081 dev_err(&handle->pdev->dev, 1082 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1083 vector_id, 1084 ret); 1085 1086 return ret; 1087 } 1088 1089 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1090 { 1091 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1092 int vector_id; 1093 1094 vector_id = hclgevf_get_vector_index(hdev, vector); 1095 if (vector_id < 0) { 1096 dev_err(&handle->pdev->dev, 1097 "hclgevf_put_vector get vector index fail. ret =%d\n", 1098 vector_id); 1099 return vector_id; 1100 } 1101 1102 hclgevf_free_vector(hdev, vector_id); 1103 1104 return 0; 1105 } 1106 1107 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1108 bool en_bc_pmc) 1109 { 1110 struct hclge_mbx_vf_to_pf_cmd *req; 1111 struct hclgevf_desc desc; 1112 int ret; 1113 1114 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 1115 1116 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 1117 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 1118 req->msg[1] = en_bc_pmc ? 1 : 0; 1119 1120 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1121 if (ret) 1122 dev_err(&hdev->pdev->dev, 1123 "Set promisc mode fail, status is %d.\n", ret); 1124 1125 return ret; 1126 } 1127 1128 static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc) 1129 { 1130 return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc); 1131 } 1132 1133 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 1134 int stream_id, bool enable) 1135 { 1136 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1137 struct hclgevf_desc desc; 1138 int status; 1139 1140 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1141 1142 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1143 false); 1144 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1145 req->stream_id = cpu_to_le16(stream_id); 1146 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 1147 1148 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1149 if (status) 1150 dev_err(&hdev->pdev->dev, 1151 "TQP enable fail, status =%d.\n", status); 1152 1153 return status; 1154 } 1155 1156 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1157 { 1158 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1159 struct hclgevf_tqp *tqp; 1160 int i; 1161 1162 for (i = 0; i < kinfo->num_tqps; i++) { 1163 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1164 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1165 } 1166 } 1167 1168 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1169 { 1170 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1171 1172 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1173 } 1174 1175 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1176 bool is_first) 1177 { 1178 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1179 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1180 u8 *new_mac_addr = (u8 *)p; 1181 u8 msg_data[ETH_ALEN * 2]; 1182 u16 subcode; 1183 int status; 1184 1185 ether_addr_copy(msg_data, new_mac_addr); 1186 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 1187 1188 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 1189 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1190 1191 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1192 subcode, msg_data, sizeof(msg_data), 1193 true, NULL, 0); 1194 if (!status) 1195 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1196 1197 return status; 1198 } 1199 1200 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1201 const unsigned char *addr) 1202 { 1203 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1204 1205 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1206 HCLGE_MBX_MAC_VLAN_UC_ADD, 1207 addr, ETH_ALEN, false, NULL, 0); 1208 } 1209 1210 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1211 const unsigned char *addr) 1212 { 1213 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1214 1215 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1216 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1217 addr, ETH_ALEN, false, NULL, 0); 1218 } 1219 1220 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1221 const unsigned char *addr) 1222 { 1223 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1224 1225 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1226 HCLGE_MBX_MAC_VLAN_MC_ADD, 1227 addr, ETH_ALEN, false, NULL, 0); 1228 } 1229 1230 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1231 const unsigned char *addr) 1232 { 1233 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1234 1235 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1236 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1237 addr, ETH_ALEN, false, NULL, 0); 1238 } 1239 1240 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1241 __be16 proto, u16 vlan_id, 1242 bool is_kill) 1243 { 1244 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1245 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1246 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1247 1248 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1249 return -EINVAL; 1250 1251 if (proto != htons(ETH_P_8021Q)) 1252 return -EPROTONOSUPPORT; 1253 1254 msg_data[0] = is_kill; 1255 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1256 memcpy(&msg_data[3], &proto, sizeof(proto)); 1257 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1258 HCLGE_MBX_VLAN_FILTER, msg_data, 1259 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 1260 } 1261 1262 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1263 { 1264 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1265 u8 msg_data; 1266 1267 msg_data = enable ? 1 : 0; 1268 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1269 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1270 1, false, NULL, 0); 1271 } 1272 1273 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1274 { 1275 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1276 u8 msg_data[2]; 1277 int ret; 1278 1279 memcpy(msg_data, &queue_id, sizeof(queue_id)); 1280 1281 /* disable vf queue before send queue reset msg to PF */ 1282 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1283 if (ret) 1284 return ret; 1285 1286 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1287 sizeof(msg_data), true, NULL, 0); 1288 } 1289 1290 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1291 { 1292 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1293 1294 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu, 1295 sizeof(new_mtu), true, NULL, 0); 1296 } 1297 1298 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1299 enum hnae3_reset_notify_type type) 1300 { 1301 struct hnae3_client *client = hdev->nic_client; 1302 struct hnae3_handle *handle = &hdev->nic; 1303 int ret; 1304 1305 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1306 !client) 1307 return 0; 1308 1309 if (!client->ops->reset_notify) 1310 return -EOPNOTSUPP; 1311 1312 ret = client->ops->reset_notify(handle, type); 1313 if (ret) 1314 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1315 type, ret); 1316 1317 return ret; 1318 } 1319 1320 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 1321 { 1322 struct hclgevf_dev *hdev = ae_dev->priv; 1323 1324 set_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1325 } 1326 1327 static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev, 1328 unsigned long delay_us, 1329 unsigned long wait_cnt) 1330 { 1331 unsigned long cnt = 0; 1332 1333 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && 1334 cnt++ < wait_cnt) 1335 usleep_range(delay_us, delay_us * 2); 1336 1337 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { 1338 dev_err(&hdev->pdev->dev, 1339 "flr wait timeout\n"); 1340 return -ETIMEDOUT; 1341 } 1342 1343 return 0; 1344 } 1345 1346 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1347 { 1348 #define HCLGEVF_RESET_WAIT_US 20000 1349 #define HCLGEVF_RESET_WAIT_CNT 2000 1350 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1351 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1352 1353 u32 val; 1354 int ret; 1355 1356 /* wait to check the hardware reset completion status */ 1357 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1358 dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val); 1359 1360 if (hdev->reset_type == HNAE3_FLR_RESET) 1361 return hclgevf_flr_poll_timeout(hdev, 1362 HCLGEVF_RESET_WAIT_US, 1363 HCLGEVF_RESET_WAIT_CNT); 1364 1365 ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val, 1366 !(val & HCLGEVF_RST_ING_BITS), 1367 HCLGEVF_RESET_WAIT_US, 1368 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1369 1370 /* hardware completion status should be available by this time */ 1371 if (ret) { 1372 dev_err(&hdev->pdev->dev, 1373 "could'nt get reset done status from h/w, timeout!\n"); 1374 return ret; 1375 } 1376 1377 /* we will wait a bit more to let reset of the stack to complete. This 1378 * might happen in case reset assertion was made by PF. Yes, this also 1379 * means we might end up waiting bit more even for VF reset. 1380 */ 1381 msleep(5000); 1382 1383 return 0; 1384 } 1385 1386 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1387 { 1388 int ret; 1389 1390 /* uninitialize the nic client */ 1391 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1392 if (ret) 1393 return ret; 1394 1395 /* re-initialize the hclge device */ 1396 ret = hclgevf_reset_hdev(hdev); 1397 if (ret) { 1398 dev_err(&hdev->pdev->dev, 1399 "hclge device re-init failed, VF is disabled!\n"); 1400 return ret; 1401 } 1402 1403 /* bring up the nic client again */ 1404 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1405 if (ret) 1406 return ret; 1407 1408 return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); 1409 } 1410 1411 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1412 { 1413 #define HCLGEVF_RESET_SYNC_TIME 100 1414 1415 int ret = 0; 1416 1417 switch (hdev->reset_type) { 1418 case HNAE3_VF_FUNC_RESET: 1419 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1420 0, true, NULL, sizeof(u8)); 1421 hdev->rst_stats.vf_func_rst_cnt++; 1422 break; 1423 case HNAE3_FLR_RESET: 1424 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1425 hdev->rst_stats.flr_rst_cnt++; 1426 break; 1427 default: 1428 break; 1429 } 1430 1431 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1432 /* inform hardware that preparatory work is done */ 1433 msleep(HCLGEVF_RESET_SYNC_TIME); 1434 hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 1435 HCLGEVF_NIC_CMQ_ENABLE); 1436 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1437 hdev->reset_type, ret); 1438 1439 return ret; 1440 } 1441 1442 static int hclgevf_reset(struct hclgevf_dev *hdev) 1443 { 1444 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1445 int ret; 1446 1447 /* Initialize ae_dev reset status as well, in case enet layer wants to 1448 * know if device is undergoing reset 1449 */ 1450 ae_dev->reset_type = hdev->reset_type; 1451 hdev->rst_stats.rst_cnt++; 1452 rtnl_lock(); 1453 1454 /* bring down the nic to stop any ongoing TX/RX */ 1455 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1456 if (ret) 1457 goto err_reset_lock; 1458 1459 rtnl_unlock(); 1460 1461 ret = hclgevf_reset_prepare_wait(hdev); 1462 if (ret) 1463 goto err_reset; 1464 1465 /* check if VF could successfully fetch the hardware reset completion 1466 * status from the hardware 1467 */ 1468 ret = hclgevf_reset_wait(hdev); 1469 if (ret) { 1470 /* can't do much in this situation, will disable VF */ 1471 dev_err(&hdev->pdev->dev, 1472 "VF failed(=%d) to fetch H/W reset completion status\n", 1473 ret); 1474 goto err_reset; 1475 } 1476 1477 hdev->rst_stats.hw_rst_done_cnt++; 1478 1479 rtnl_lock(); 1480 1481 /* now, re-initialize the nic client and ae device*/ 1482 ret = hclgevf_reset_stack(hdev); 1483 if (ret) { 1484 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1485 goto err_reset_lock; 1486 } 1487 1488 /* bring up the nic to enable TX/RX again */ 1489 ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1490 if (ret) 1491 goto err_reset_lock; 1492 1493 rtnl_unlock(); 1494 1495 hdev->last_reset_time = jiffies; 1496 ae_dev->reset_type = HNAE3_NONE_RESET; 1497 hdev->rst_stats.rst_done_cnt++; 1498 1499 return ret; 1500 err_reset_lock: 1501 rtnl_unlock(); 1502 err_reset: 1503 /* When VF reset failed, only the higher level reset asserted by PF 1504 * can restore it, so re-initialize the command queue to receive 1505 * this higher reset event. 1506 */ 1507 hclgevf_cmd_init(hdev); 1508 dev_err(&hdev->pdev->dev, "failed to reset VF\n"); 1509 if (hclgevf_is_reset_pending(hdev)) 1510 hclgevf_reset_task_schedule(hdev); 1511 1512 return ret; 1513 } 1514 1515 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1516 unsigned long *addr) 1517 { 1518 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1519 1520 /* return the highest priority reset level amongst all */ 1521 if (test_bit(HNAE3_VF_RESET, addr)) { 1522 rst_level = HNAE3_VF_RESET; 1523 clear_bit(HNAE3_VF_RESET, addr); 1524 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1525 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1526 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1527 rst_level = HNAE3_VF_FULL_RESET; 1528 clear_bit(HNAE3_VF_FULL_RESET, addr); 1529 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1530 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1531 rst_level = HNAE3_VF_PF_FUNC_RESET; 1532 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1533 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1534 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1535 rst_level = HNAE3_VF_FUNC_RESET; 1536 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1537 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1538 rst_level = HNAE3_FLR_RESET; 1539 clear_bit(HNAE3_FLR_RESET, addr); 1540 } 1541 1542 return rst_level; 1543 } 1544 1545 static void hclgevf_reset_event(struct pci_dev *pdev, 1546 struct hnae3_handle *handle) 1547 { 1548 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1549 struct hclgevf_dev *hdev = ae_dev->priv; 1550 1551 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1552 1553 if (hdev->default_reset_request) 1554 hdev->reset_level = 1555 hclgevf_get_reset_level(hdev, 1556 &hdev->default_reset_request); 1557 else 1558 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1559 1560 /* reset of this VF requested */ 1561 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1562 hclgevf_reset_task_schedule(hdev); 1563 1564 hdev->last_reset_time = jiffies; 1565 } 1566 1567 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1568 enum hnae3_reset_type rst_type) 1569 { 1570 struct hclgevf_dev *hdev = ae_dev->priv; 1571 1572 set_bit(rst_type, &hdev->default_reset_request); 1573 } 1574 1575 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1576 { 1577 #define HCLGEVF_FLR_WAIT_MS 100 1578 #define HCLGEVF_FLR_WAIT_CNT 50 1579 struct hclgevf_dev *hdev = ae_dev->priv; 1580 int cnt = 0; 1581 1582 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1583 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1584 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); 1585 hclgevf_reset_event(hdev->pdev, NULL); 1586 1587 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && 1588 cnt++ < HCLGEVF_FLR_WAIT_CNT) 1589 msleep(HCLGEVF_FLR_WAIT_MS); 1590 1591 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) 1592 dev_err(&hdev->pdev->dev, 1593 "flr wait down timeout: %d\n", cnt); 1594 } 1595 1596 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1597 { 1598 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1599 1600 return hdev->fw_version; 1601 } 1602 1603 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1604 { 1605 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1606 1607 vector->vector_irq = pci_irq_vector(hdev->pdev, 1608 HCLGEVF_MISC_VECTOR_NUM); 1609 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1610 /* vector status always valid for Vector 0 */ 1611 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1612 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1613 1614 hdev->num_msi_left -= 1; 1615 hdev->num_msi_used += 1; 1616 } 1617 1618 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1619 { 1620 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1621 !test_bit(HCLGEVF_STATE_REMOVING, &hdev->state)) { 1622 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1623 schedule_work(&hdev->rst_service_task); 1624 } 1625 } 1626 1627 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1628 { 1629 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1630 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1631 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1632 schedule_work(&hdev->mbx_service_task); 1633 } 1634 } 1635 1636 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1637 { 1638 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1639 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1640 schedule_work(&hdev->service_task); 1641 } 1642 1643 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1644 { 1645 /* if we have any pending mailbox event then schedule the mbx task */ 1646 if (hdev->mbx_event_pending) 1647 hclgevf_mbx_task_schedule(hdev); 1648 1649 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1650 hclgevf_reset_task_schedule(hdev); 1651 } 1652 1653 static void hclgevf_service_timer(struct timer_list *t) 1654 { 1655 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1656 1657 mod_timer(&hdev->service_timer, jiffies + 1658 HCLGEVF_GENERAL_TASK_INTERVAL * HZ); 1659 1660 hdev->stats_timer++; 1661 hclgevf_task_schedule(hdev); 1662 } 1663 1664 static void hclgevf_reset_service_task(struct work_struct *work) 1665 { 1666 struct hclgevf_dev *hdev = 1667 container_of(work, struct hclgevf_dev, rst_service_task); 1668 int ret; 1669 1670 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1671 return; 1672 1673 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1674 1675 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1676 &hdev->reset_state)) { 1677 /* PF has initmated that it is about to reset the hardware. 1678 * We now have to poll & check if harware has actually completed 1679 * the reset sequence. On hardware reset completion, VF needs to 1680 * reset the client and ae device. 1681 */ 1682 hdev->reset_attempts = 0; 1683 1684 hdev->last_reset_time = jiffies; 1685 while ((hdev->reset_type = 1686 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 1687 != HNAE3_NONE_RESET) { 1688 ret = hclgevf_reset(hdev); 1689 if (ret) 1690 dev_err(&hdev->pdev->dev, 1691 "VF stack reset failed %d.\n", ret); 1692 } 1693 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1694 &hdev->reset_state)) { 1695 /* we could be here when either of below happens: 1696 * 1. reset was initiated due to watchdog timeout due to 1697 * a. IMP was earlier reset and our TX got choked down and 1698 * which resulted in watchdog reacting and inducing VF 1699 * reset. This also means our cmdq would be unreliable. 1700 * b. problem in TX due to other lower layer(example link 1701 * layer not functioning properly etc.) 1702 * 2. VF reset might have been initiated due to some config 1703 * change. 1704 * 1705 * NOTE: Theres no clear way to detect above cases than to react 1706 * to the response of PF for this reset request. PF will ack the 1707 * 1b and 2. cases but we will not get any intimation about 1a 1708 * from PF as cmdq would be in unreliable state i.e. mailbox 1709 * communication between PF and VF would be broken. 1710 */ 1711 1712 /* if we are never geting into pending state it means either: 1713 * 1. PF is not receiving our request which could be due to IMP 1714 * reset 1715 * 2. PF is screwed 1716 * We cannot do much for 2. but to check first we can try reset 1717 * our PCIe + stack and see if it alleviates the problem. 1718 */ 1719 if (hdev->reset_attempts > 3) { 1720 /* prepare for full reset of stack + pcie interface */ 1721 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1722 1723 /* "defer" schedule the reset task again */ 1724 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1725 } else { 1726 hdev->reset_attempts++; 1727 1728 set_bit(hdev->reset_level, &hdev->reset_pending); 1729 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1730 } 1731 hclgevf_reset_task_schedule(hdev); 1732 } 1733 1734 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1735 } 1736 1737 static void hclgevf_mailbox_service_task(struct work_struct *work) 1738 { 1739 struct hclgevf_dev *hdev; 1740 1741 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1742 1743 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1744 return; 1745 1746 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1747 1748 hclgevf_mbx_async_handler(hdev); 1749 1750 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1751 } 1752 1753 static void hclgevf_keep_alive_timer(struct timer_list *t) 1754 { 1755 struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); 1756 1757 schedule_work(&hdev->keep_alive_task); 1758 mod_timer(&hdev->keep_alive_timer, jiffies + 1759 HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ); 1760 } 1761 1762 static void hclgevf_keep_alive_task(struct work_struct *work) 1763 { 1764 struct hclgevf_dev *hdev; 1765 u8 respmsg; 1766 int ret; 1767 1768 hdev = container_of(work, struct hclgevf_dev, keep_alive_task); 1769 1770 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) 1771 return; 1772 1773 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, 1774 0, false, &respmsg, sizeof(respmsg)); 1775 if (ret) 1776 dev_err(&hdev->pdev->dev, 1777 "VF sends keep alive cmd failed(=%d)\n", ret); 1778 } 1779 1780 static void hclgevf_service_task(struct work_struct *work) 1781 { 1782 struct hnae3_handle *handle; 1783 struct hclgevf_dev *hdev; 1784 1785 hdev = container_of(work, struct hclgevf_dev, service_task); 1786 handle = &hdev->nic; 1787 1788 if (hdev->stats_timer >= HCLGEVF_STATS_TIMER_INTERVAL) { 1789 hclgevf_tqps_update_stats(handle); 1790 hdev->stats_timer = 0; 1791 } 1792 1793 /* request the link status from the PF. PF would be able to tell VF 1794 * about such updates in future so we might remove this later 1795 */ 1796 hclgevf_request_link_info(hdev); 1797 1798 hclgevf_update_link_mode(hdev); 1799 1800 hclgevf_deferred_task_schedule(hdev); 1801 1802 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1803 } 1804 1805 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1806 { 1807 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1808 } 1809 1810 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1811 u32 *clearval) 1812 { 1813 u32 cmdq_src_reg, rst_ing_reg; 1814 1815 /* fetch the events from their corresponding regs */ 1816 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1817 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1818 1819 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) { 1820 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1821 dev_info(&hdev->pdev->dev, 1822 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1823 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1824 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1825 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1826 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); 1827 *clearval = cmdq_src_reg; 1828 hdev->rst_stats.vf_rst_cnt++; 1829 return HCLGEVF_VECTOR0_EVENT_RST; 1830 } 1831 1832 /* check for vector0 mailbox(=CMDQ RX) event source */ 1833 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1834 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1835 *clearval = cmdq_src_reg; 1836 return HCLGEVF_VECTOR0_EVENT_MBX; 1837 } 1838 1839 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1840 1841 return HCLGEVF_VECTOR0_EVENT_OTHER; 1842 } 1843 1844 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1845 { 1846 writel(en ? 1 : 0, vector->addr); 1847 } 1848 1849 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1850 { 1851 enum hclgevf_evt_cause event_cause; 1852 struct hclgevf_dev *hdev = data; 1853 u32 clearval; 1854 1855 hclgevf_enable_vector(&hdev->misc_vector, false); 1856 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 1857 1858 switch (event_cause) { 1859 case HCLGEVF_VECTOR0_EVENT_RST: 1860 hclgevf_reset_task_schedule(hdev); 1861 break; 1862 case HCLGEVF_VECTOR0_EVENT_MBX: 1863 hclgevf_mbx_handler(hdev); 1864 break; 1865 default: 1866 break; 1867 } 1868 1869 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 1870 hclgevf_clear_event_cause(hdev, clearval); 1871 hclgevf_enable_vector(&hdev->misc_vector, true); 1872 } 1873 1874 return IRQ_HANDLED; 1875 } 1876 1877 static int hclgevf_configure(struct hclgevf_dev *hdev) 1878 { 1879 int ret; 1880 1881 /* get current port based vlan state from PF */ 1882 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 1883 if (ret) 1884 return ret; 1885 1886 /* get queue configuration from PF */ 1887 ret = hclgevf_get_queue_info(hdev); 1888 if (ret) 1889 return ret; 1890 1891 /* get queue depth info from PF */ 1892 ret = hclgevf_get_queue_depth(hdev); 1893 if (ret) 1894 return ret; 1895 1896 ret = hclgevf_get_pf_media_type(hdev); 1897 if (ret) 1898 return ret; 1899 1900 /* get tc configuration from PF */ 1901 return hclgevf_get_tc_info(hdev); 1902 } 1903 1904 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1905 { 1906 struct pci_dev *pdev = ae_dev->pdev; 1907 struct hclgevf_dev *hdev; 1908 1909 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1910 if (!hdev) 1911 return -ENOMEM; 1912 1913 hdev->pdev = pdev; 1914 hdev->ae_dev = ae_dev; 1915 ae_dev->priv = hdev; 1916 1917 return 0; 1918 } 1919 1920 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1921 { 1922 struct hnae3_handle *roce = &hdev->roce; 1923 struct hnae3_handle *nic = &hdev->nic; 1924 1925 roce->rinfo.num_vectors = hdev->num_roce_msix; 1926 1927 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1928 hdev->num_msi_left == 0) 1929 return -EINVAL; 1930 1931 roce->rinfo.base_vector = hdev->roce_base_vector; 1932 1933 roce->rinfo.netdev = nic->kinfo.netdev; 1934 roce->rinfo.roce_io_base = hdev->hw.io_base; 1935 1936 roce->pdev = nic->pdev; 1937 roce->ae_algo = nic->ae_algo; 1938 roce->numa_node_mask = nic->numa_node_mask; 1939 1940 return 0; 1941 } 1942 1943 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 1944 { 1945 struct hclgevf_cfg_gro_status_cmd *req; 1946 struct hclgevf_desc desc; 1947 int ret; 1948 1949 if (!hnae3_dev_gro_supported(hdev)) 1950 return 0; 1951 1952 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 1953 false); 1954 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 1955 1956 req->gro_en = cpu_to_le16(en ? 1 : 0); 1957 1958 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1959 if (ret) 1960 dev_err(&hdev->pdev->dev, 1961 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 1962 1963 return ret; 1964 } 1965 1966 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1967 { 1968 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1969 int i, ret; 1970 1971 rss_cfg->rss_size = hdev->rss_size_max; 1972 1973 if (hdev->pdev->revision >= 0x21) { 1974 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 1975 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 1976 HCLGEVF_RSS_KEY_SIZE); 1977 1978 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 1979 rss_cfg->rss_hash_key); 1980 if (ret) 1981 return ret; 1982 1983 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 1984 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1985 rss_cfg->rss_tuple_sets.ipv4_udp_en = 1986 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1987 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 1988 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1989 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 1990 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1991 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 1992 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1993 rss_cfg->rss_tuple_sets.ipv6_udp_en = 1994 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1995 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 1996 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1997 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 1998 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1999 2000 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2001 if (ret) 2002 return ret; 2003 2004 } 2005 2006 /* Initialize RSS indirect table for each vport */ 2007 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 2008 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 2009 2010 ret = hclgevf_set_rss_indir_table(hdev); 2011 if (ret) 2012 return ret; 2013 2014 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 2015 } 2016 2017 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2018 { 2019 /* other vlan config(like, VLAN TX/RX offload) would also be added 2020 * here later 2021 */ 2022 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2023 false); 2024 } 2025 2026 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2027 { 2028 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2029 2030 if (enable) { 2031 mod_timer(&hdev->service_timer, jiffies + HZ); 2032 } else { 2033 del_timer_sync(&hdev->service_timer); 2034 cancel_work_sync(&hdev->service_task); 2035 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 2036 } 2037 } 2038 2039 static int hclgevf_ae_start(struct hnae3_handle *handle) 2040 { 2041 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2042 2043 /* reset tqp stats */ 2044 hclgevf_reset_tqp_stats(handle); 2045 2046 hclgevf_request_link_info(hdev); 2047 2048 hclgevf_update_link_mode(hdev); 2049 2050 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2051 2052 return 0; 2053 } 2054 2055 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2056 { 2057 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2058 int i; 2059 2060 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2061 2062 if (hdev->reset_type != HNAE3_VF_RESET) 2063 for (i = 0; i < handle->kinfo.num_tqps; i++) 2064 if (hclgevf_reset_tqp(handle, i)) 2065 break; 2066 2067 /* reset tqp stats */ 2068 hclgevf_reset_tqp_stats(handle); 2069 hclgevf_update_link_status(hdev, 0); 2070 } 2071 2072 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2073 { 2074 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2075 u8 msg_data; 2076 2077 msg_data = alive ? 1 : 0; 2078 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE, 2079 0, &msg_data, 1, false, NULL, 0); 2080 } 2081 2082 static int hclgevf_client_start(struct hnae3_handle *handle) 2083 { 2084 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2085 int ret; 2086 2087 ret = hclgevf_set_alive(handle, true); 2088 if (ret) 2089 return ret; 2090 2091 mod_timer(&hdev->keep_alive_timer, jiffies + 2092 HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ); 2093 2094 return 0; 2095 } 2096 2097 static void hclgevf_client_stop(struct hnae3_handle *handle) 2098 { 2099 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2100 int ret; 2101 2102 ret = hclgevf_set_alive(handle, false); 2103 if (ret) 2104 dev_warn(&hdev->pdev->dev, 2105 "%s failed %d\n", __func__, ret); 2106 2107 del_timer_sync(&hdev->keep_alive_timer); 2108 cancel_work_sync(&hdev->keep_alive_task); 2109 } 2110 2111 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2112 { 2113 /* setup tasks for the MBX */ 2114 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 2115 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2116 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2117 2118 /* setup tasks for service timer */ 2119 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 2120 2121 INIT_WORK(&hdev->service_task, hclgevf_service_task); 2122 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 2123 2124 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 2125 2126 mutex_init(&hdev->mbx_resp.mbx_mutex); 2127 2128 /* bring the device down */ 2129 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2130 } 2131 2132 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2133 { 2134 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2135 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2136 2137 if (hdev->keep_alive_timer.function) 2138 del_timer_sync(&hdev->keep_alive_timer); 2139 if (hdev->keep_alive_task.func) 2140 cancel_work_sync(&hdev->keep_alive_task); 2141 if (hdev->service_timer.function) 2142 del_timer_sync(&hdev->service_timer); 2143 if (hdev->service_task.func) 2144 cancel_work_sync(&hdev->service_task); 2145 if (hdev->mbx_service_task.func) 2146 cancel_work_sync(&hdev->mbx_service_task); 2147 if (hdev->rst_service_task.func) 2148 cancel_work_sync(&hdev->rst_service_task); 2149 2150 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2151 } 2152 2153 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2154 { 2155 struct pci_dev *pdev = hdev->pdev; 2156 int vectors; 2157 int i; 2158 2159 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 2160 vectors = pci_alloc_irq_vectors(pdev, 2161 hdev->roce_base_msix_offset + 1, 2162 hdev->num_msi, 2163 PCI_IRQ_MSIX); 2164 else 2165 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 2166 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2167 2168 if (vectors < 0) { 2169 dev_err(&pdev->dev, 2170 "failed(%d) to allocate MSI/MSI-X vectors\n", 2171 vectors); 2172 return vectors; 2173 } 2174 if (vectors < hdev->num_msi) 2175 dev_warn(&hdev->pdev->dev, 2176 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2177 hdev->num_msi, vectors); 2178 2179 hdev->num_msi = vectors; 2180 hdev->num_msi_left = vectors; 2181 hdev->base_msi_vector = pdev->irq; 2182 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2183 2184 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2185 sizeof(u16), GFP_KERNEL); 2186 if (!hdev->vector_status) { 2187 pci_free_irq_vectors(pdev); 2188 return -ENOMEM; 2189 } 2190 2191 for (i = 0; i < hdev->num_msi; i++) 2192 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2193 2194 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2195 sizeof(int), GFP_KERNEL); 2196 if (!hdev->vector_irq) { 2197 devm_kfree(&pdev->dev, hdev->vector_status); 2198 pci_free_irq_vectors(pdev); 2199 return -ENOMEM; 2200 } 2201 2202 return 0; 2203 } 2204 2205 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2206 { 2207 struct pci_dev *pdev = hdev->pdev; 2208 2209 devm_kfree(&pdev->dev, hdev->vector_status); 2210 devm_kfree(&pdev->dev, hdev->vector_irq); 2211 pci_free_irq_vectors(pdev); 2212 } 2213 2214 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2215 { 2216 int ret = 0; 2217 2218 hclgevf_get_misc_vector(hdev); 2219 2220 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2221 0, "hclgevf_cmd", hdev); 2222 if (ret) { 2223 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2224 hdev->misc_vector.vector_irq); 2225 return ret; 2226 } 2227 2228 hclgevf_clear_event_cause(hdev, 0); 2229 2230 /* enable misc. vector(vector 0) */ 2231 hclgevf_enable_vector(&hdev->misc_vector, true); 2232 2233 return ret; 2234 } 2235 2236 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2237 { 2238 /* disable misc vector(vector 0) */ 2239 hclgevf_enable_vector(&hdev->misc_vector, false); 2240 synchronize_irq(hdev->misc_vector.vector_irq); 2241 free_irq(hdev->misc_vector.vector_irq, hdev); 2242 hclgevf_free_vector(hdev, 0); 2243 } 2244 2245 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2246 { 2247 struct device *dev = &hdev->pdev->dev; 2248 2249 dev_info(dev, "VF info begin:\n"); 2250 2251 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps); 2252 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc); 2253 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc); 2254 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport); 2255 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map); 2256 dev_info(dev, "PF media type of this VF: %d\n", 2257 hdev->hw.mac.media_type); 2258 2259 dev_info(dev, "VF info end.\n"); 2260 } 2261 2262 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2263 struct hnae3_client *client) 2264 { 2265 struct hclgevf_dev *hdev = ae_dev->priv; 2266 int ret; 2267 2268 ret = client->ops->init_instance(&hdev->nic); 2269 if (ret) 2270 return ret; 2271 2272 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2273 hnae3_set_client_init_flag(client, ae_dev, 1); 2274 2275 if (netif_msg_drv(&hdev->nic)) 2276 hclgevf_info_show(hdev); 2277 2278 return 0; 2279 } 2280 2281 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2282 struct hnae3_client *client) 2283 { 2284 struct hclgevf_dev *hdev = ae_dev->priv; 2285 int ret; 2286 2287 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2288 !hdev->nic_client) 2289 return 0; 2290 2291 ret = hclgevf_init_roce_base_info(hdev); 2292 if (ret) 2293 return ret; 2294 2295 ret = client->ops->init_instance(&hdev->roce); 2296 if (ret) 2297 return ret; 2298 2299 hnae3_set_client_init_flag(client, ae_dev, 1); 2300 2301 return 0; 2302 } 2303 2304 static int hclgevf_init_client_instance(struct hnae3_client *client, 2305 struct hnae3_ae_dev *ae_dev) 2306 { 2307 struct hclgevf_dev *hdev = ae_dev->priv; 2308 int ret; 2309 2310 switch (client->type) { 2311 case HNAE3_CLIENT_KNIC: 2312 hdev->nic_client = client; 2313 hdev->nic.client = client; 2314 2315 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2316 if (ret) 2317 goto clear_nic; 2318 2319 ret = hclgevf_init_roce_client_instance(ae_dev, 2320 hdev->roce_client); 2321 if (ret) 2322 goto clear_roce; 2323 2324 break; 2325 case HNAE3_CLIENT_ROCE: 2326 if (hnae3_dev_roce_supported(hdev)) { 2327 hdev->roce_client = client; 2328 hdev->roce.client = client; 2329 } 2330 2331 ret = hclgevf_init_roce_client_instance(ae_dev, client); 2332 if (ret) 2333 goto clear_roce; 2334 2335 break; 2336 default: 2337 return -EINVAL; 2338 } 2339 2340 return 0; 2341 2342 clear_nic: 2343 hdev->nic_client = NULL; 2344 hdev->nic.client = NULL; 2345 return ret; 2346 clear_roce: 2347 hdev->roce_client = NULL; 2348 hdev->roce.client = NULL; 2349 return ret; 2350 } 2351 2352 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2353 struct hnae3_ae_dev *ae_dev) 2354 { 2355 struct hclgevf_dev *hdev = ae_dev->priv; 2356 2357 /* un-init roce, if it exists */ 2358 if (hdev->roce_client) { 2359 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2360 hdev->roce_client = NULL; 2361 hdev->roce.client = NULL; 2362 } 2363 2364 /* un-init nic/unic, if this was not called by roce client */ 2365 if (client->ops->uninit_instance && hdev->nic_client && 2366 client->type != HNAE3_CLIENT_ROCE) { 2367 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2368 2369 client->ops->uninit_instance(&hdev->nic, 0); 2370 hdev->nic_client = NULL; 2371 hdev->nic.client = NULL; 2372 } 2373 } 2374 2375 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2376 { 2377 struct pci_dev *pdev = hdev->pdev; 2378 struct hclgevf_hw *hw; 2379 int ret; 2380 2381 ret = pci_enable_device(pdev); 2382 if (ret) { 2383 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2384 return ret; 2385 } 2386 2387 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2388 if (ret) { 2389 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2390 goto err_disable_device; 2391 } 2392 2393 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2394 if (ret) { 2395 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2396 goto err_disable_device; 2397 } 2398 2399 pci_set_master(pdev); 2400 hw = &hdev->hw; 2401 hw->hdev = hdev; 2402 hw->io_base = pci_iomap(pdev, 2, 0); 2403 if (!hw->io_base) { 2404 dev_err(&pdev->dev, "can't map configuration register space\n"); 2405 ret = -ENOMEM; 2406 goto err_clr_master; 2407 } 2408 2409 return 0; 2410 2411 err_clr_master: 2412 pci_clear_master(pdev); 2413 pci_release_regions(pdev); 2414 err_disable_device: 2415 pci_disable_device(pdev); 2416 2417 return ret; 2418 } 2419 2420 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2421 { 2422 struct pci_dev *pdev = hdev->pdev; 2423 2424 pci_iounmap(pdev, hdev->hw.io_base); 2425 pci_clear_master(pdev); 2426 pci_release_regions(pdev); 2427 pci_disable_device(pdev); 2428 } 2429 2430 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2431 { 2432 struct hclgevf_query_res_cmd *req; 2433 struct hclgevf_desc desc; 2434 int ret; 2435 2436 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2437 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2438 if (ret) { 2439 dev_err(&hdev->pdev->dev, 2440 "query vf resource failed, ret = %d.\n", ret); 2441 return ret; 2442 } 2443 2444 req = (struct hclgevf_query_res_cmd *)desc.data; 2445 2446 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 2447 hdev->roce_base_msix_offset = 2448 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 2449 HCLGEVF_MSIX_OFT_ROCEE_M, 2450 HCLGEVF_MSIX_OFT_ROCEE_S); 2451 hdev->num_roce_msix = 2452 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2453 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2454 2455 /* VF should have NIC vectors and Roce vectors, NIC vectors 2456 * are queued before Roce vectors. The offset is fixed to 64. 2457 */ 2458 hdev->num_msi = hdev->num_roce_msix + 2459 hdev->roce_base_msix_offset; 2460 } else { 2461 hdev->num_msi = 2462 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2463 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2464 } 2465 2466 return 0; 2467 } 2468 2469 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2470 { 2471 struct pci_dev *pdev = hdev->pdev; 2472 int ret = 0; 2473 2474 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2475 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2476 hclgevf_misc_irq_uninit(hdev); 2477 hclgevf_uninit_msi(hdev); 2478 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2479 } 2480 2481 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2482 pci_set_master(pdev); 2483 ret = hclgevf_init_msi(hdev); 2484 if (ret) { 2485 dev_err(&pdev->dev, 2486 "failed(%d) to init MSI/MSI-X\n", ret); 2487 return ret; 2488 } 2489 2490 ret = hclgevf_misc_irq_init(hdev); 2491 if (ret) { 2492 hclgevf_uninit_msi(hdev); 2493 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2494 ret); 2495 return ret; 2496 } 2497 2498 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2499 } 2500 2501 return ret; 2502 } 2503 2504 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2505 { 2506 struct pci_dev *pdev = hdev->pdev; 2507 int ret; 2508 2509 ret = hclgevf_pci_reset(hdev); 2510 if (ret) { 2511 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2512 return ret; 2513 } 2514 2515 ret = hclgevf_cmd_init(hdev); 2516 if (ret) { 2517 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2518 return ret; 2519 } 2520 2521 ret = hclgevf_rss_init_hw(hdev); 2522 if (ret) { 2523 dev_err(&hdev->pdev->dev, 2524 "failed(%d) to initialize RSS\n", ret); 2525 return ret; 2526 } 2527 2528 ret = hclgevf_config_gro(hdev, true); 2529 if (ret) 2530 return ret; 2531 2532 ret = hclgevf_init_vlan_config(hdev); 2533 if (ret) { 2534 dev_err(&hdev->pdev->dev, 2535 "failed(%d) to initialize VLAN config\n", ret); 2536 return ret; 2537 } 2538 2539 dev_info(&hdev->pdev->dev, "Reset done\n"); 2540 2541 return 0; 2542 } 2543 2544 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2545 { 2546 struct pci_dev *pdev = hdev->pdev; 2547 int ret; 2548 2549 ret = hclgevf_pci_init(hdev); 2550 if (ret) { 2551 dev_err(&pdev->dev, "PCI initialization failed\n"); 2552 return ret; 2553 } 2554 2555 ret = hclgevf_cmd_queue_init(hdev); 2556 if (ret) { 2557 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); 2558 goto err_cmd_queue_init; 2559 } 2560 2561 ret = hclgevf_cmd_init(hdev); 2562 if (ret) 2563 goto err_cmd_init; 2564 2565 /* Get vf resource */ 2566 ret = hclgevf_query_vf_resource(hdev); 2567 if (ret) { 2568 dev_err(&hdev->pdev->dev, 2569 "Query vf status error, ret = %d.\n", ret); 2570 goto err_cmd_init; 2571 } 2572 2573 ret = hclgevf_init_msi(hdev); 2574 if (ret) { 2575 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2576 goto err_cmd_init; 2577 } 2578 2579 hclgevf_state_init(hdev); 2580 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2581 2582 ret = hclgevf_misc_irq_init(hdev); 2583 if (ret) { 2584 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2585 ret); 2586 goto err_misc_irq_init; 2587 } 2588 2589 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2590 2591 ret = hclgevf_configure(hdev); 2592 if (ret) { 2593 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2594 goto err_config; 2595 } 2596 2597 ret = hclgevf_alloc_tqps(hdev); 2598 if (ret) { 2599 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2600 goto err_config; 2601 } 2602 2603 ret = hclgevf_set_handle_info(hdev); 2604 if (ret) { 2605 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2606 goto err_config; 2607 } 2608 2609 ret = hclgevf_config_gro(hdev, true); 2610 if (ret) 2611 goto err_config; 2612 2613 /* vf is not allowed to enable unicast/multicast promisc mode. 2614 * For revision 0x20, default to disable broadcast promisc mode, 2615 * firmware makes sure broadcast packets can be accepted. 2616 * For revision 0x21, default to enable broadcast promisc mode. 2617 */ 2618 ret = hclgevf_set_promisc_mode(hdev, true); 2619 if (ret) 2620 goto err_config; 2621 2622 /* Initialize RSS for this VF */ 2623 ret = hclgevf_rss_init_hw(hdev); 2624 if (ret) { 2625 dev_err(&hdev->pdev->dev, 2626 "failed(%d) to initialize RSS\n", ret); 2627 goto err_config; 2628 } 2629 2630 ret = hclgevf_init_vlan_config(hdev); 2631 if (ret) { 2632 dev_err(&hdev->pdev->dev, 2633 "failed(%d) to initialize VLAN config\n", ret); 2634 goto err_config; 2635 } 2636 2637 hdev->last_reset_time = jiffies; 2638 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 2639 2640 return 0; 2641 2642 err_config: 2643 hclgevf_misc_irq_uninit(hdev); 2644 err_misc_irq_init: 2645 hclgevf_state_uninit(hdev); 2646 hclgevf_uninit_msi(hdev); 2647 err_cmd_init: 2648 hclgevf_cmd_uninit(hdev); 2649 err_cmd_queue_init: 2650 hclgevf_pci_uninit(hdev); 2651 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2652 return ret; 2653 } 2654 2655 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2656 { 2657 hclgevf_state_uninit(hdev); 2658 2659 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2660 hclgevf_misc_irq_uninit(hdev); 2661 hclgevf_uninit_msi(hdev); 2662 } 2663 2664 hclgevf_pci_uninit(hdev); 2665 hclgevf_cmd_uninit(hdev); 2666 } 2667 2668 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2669 { 2670 struct pci_dev *pdev = ae_dev->pdev; 2671 struct hclgevf_dev *hdev; 2672 int ret; 2673 2674 ret = hclgevf_alloc_hdev(ae_dev); 2675 if (ret) { 2676 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2677 return ret; 2678 } 2679 2680 ret = hclgevf_init_hdev(ae_dev->priv); 2681 if (ret) { 2682 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2683 return ret; 2684 } 2685 2686 hdev = ae_dev->priv; 2687 timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0); 2688 INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task); 2689 2690 return 0; 2691 } 2692 2693 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2694 { 2695 struct hclgevf_dev *hdev = ae_dev->priv; 2696 2697 hclgevf_uninit_hdev(hdev); 2698 ae_dev->priv = NULL; 2699 } 2700 2701 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2702 { 2703 struct hnae3_handle *nic = &hdev->nic; 2704 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2705 2706 return min_t(u32, hdev->rss_size_max, 2707 hdev->num_tqps / kinfo->num_tc); 2708 } 2709 2710 /** 2711 * hclgevf_get_channels - Get the current channels enabled and max supported. 2712 * @handle: hardware information for network interface 2713 * @ch: ethtool channels structure 2714 * 2715 * We don't support separate tx and rx queues as channels. The other count 2716 * represents how many queues are being used for control. max_combined counts 2717 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2718 * q_vectors since we support a lot more queue pairs than q_vectors. 2719 **/ 2720 static void hclgevf_get_channels(struct hnae3_handle *handle, 2721 struct ethtool_channels *ch) 2722 { 2723 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2724 2725 ch->max_combined = hclgevf_get_max_channels(hdev); 2726 ch->other_count = 0; 2727 ch->max_other = 0; 2728 ch->combined_count = handle->kinfo.rss_size; 2729 } 2730 2731 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2732 u16 *alloc_tqps, u16 *max_rss_size) 2733 { 2734 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2735 2736 *alloc_tqps = hdev->num_tqps; 2737 *max_rss_size = hdev->rss_size_max; 2738 } 2739 2740 static int hclgevf_get_status(struct hnae3_handle *handle) 2741 { 2742 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2743 2744 return hdev->hw.mac.link; 2745 } 2746 2747 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2748 u8 *auto_neg, u32 *speed, 2749 u8 *duplex) 2750 { 2751 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2752 2753 if (speed) 2754 *speed = hdev->hw.mac.speed; 2755 if (duplex) 2756 *duplex = hdev->hw.mac.duplex; 2757 if (auto_neg) 2758 *auto_neg = AUTONEG_DISABLE; 2759 } 2760 2761 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 2762 u8 duplex) 2763 { 2764 hdev->hw.mac.speed = speed; 2765 hdev->hw.mac.duplex = duplex; 2766 } 2767 2768 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 2769 { 2770 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2771 2772 return hclgevf_config_gro(hdev, enable); 2773 } 2774 2775 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 2776 u8 *module_type) 2777 { 2778 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2779 2780 if (media_type) 2781 *media_type = hdev->hw.mac.media_type; 2782 2783 if (module_type) 2784 *module_type = hdev->hw.mac.module_type; 2785 } 2786 2787 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 2788 { 2789 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2790 2791 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2792 } 2793 2794 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 2795 { 2796 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2797 2798 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2799 } 2800 2801 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 2802 { 2803 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2804 2805 return hdev->rst_stats.hw_rst_done_cnt; 2806 } 2807 2808 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 2809 unsigned long *supported, 2810 unsigned long *advertising) 2811 { 2812 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2813 2814 *supported = hdev->hw.mac.supported; 2815 *advertising = hdev->hw.mac.advertising; 2816 } 2817 2818 #define MAX_SEPARATE_NUM 4 2819 #define SEPARATOR_VALUE 0xFFFFFFFF 2820 #define REG_NUM_PER_LINE 4 2821 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 2822 2823 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 2824 { 2825 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 2826 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2827 2828 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 2829 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 2830 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 2831 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 2832 2833 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 2834 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 2835 } 2836 2837 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 2838 void *data) 2839 { 2840 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2841 int i, j, reg_um, separator_num; 2842 u32 *reg = data; 2843 2844 *version = hdev->fw_version; 2845 2846 /* fetching per-VF registers values from VF PCIe register space */ 2847 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 2848 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2849 for (i = 0; i < reg_um; i++) 2850 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 2851 for (i = 0; i < separator_num; i++) 2852 *reg++ = SEPARATOR_VALUE; 2853 2854 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 2855 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2856 for (i = 0; i < reg_um; i++) 2857 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 2858 for (i = 0; i < separator_num; i++) 2859 *reg++ = SEPARATOR_VALUE; 2860 2861 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 2862 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2863 for (j = 0; j < hdev->num_tqps; j++) { 2864 for (i = 0; i < reg_um; i++) 2865 *reg++ = hclgevf_read_dev(&hdev->hw, 2866 ring_reg_addr_list[i] + 2867 0x200 * j); 2868 for (i = 0; i < separator_num; i++) 2869 *reg++ = SEPARATOR_VALUE; 2870 } 2871 2872 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 2873 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2874 for (j = 0; j < hdev->num_msi_used - 1; j++) { 2875 for (i = 0; i < reg_um; i++) 2876 *reg++ = hclgevf_read_dev(&hdev->hw, 2877 tqp_intr_reg_addr_list[i] + 2878 4 * j); 2879 for (i = 0; i < separator_num; i++) 2880 *reg++ = SEPARATOR_VALUE; 2881 } 2882 } 2883 2884 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 2885 u8 *port_base_vlan_info, u8 data_size) 2886 { 2887 struct hnae3_handle *nic = &hdev->nic; 2888 2889 rtnl_lock(); 2890 hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 2891 rtnl_unlock(); 2892 2893 /* send msg to PF and wait update port based vlan info */ 2894 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 2895 HCLGE_MBX_PORT_BASE_VLAN_CFG, 2896 port_base_vlan_info, data_size, 2897 false, NULL, 0); 2898 2899 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 2900 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; 2901 else 2902 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 2903 2904 rtnl_lock(); 2905 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 2906 rtnl_unlock(); 2907 } 2908 2909 static const struct hnae3_ae_ops hclgevf_ops = { 2910 .init_ae_dev = hclgevf_init_ae_dev, 2911 .uninit_ae_dev = hclgevf_uninit_ae_dev, 2912 .flr_prepare = hclgevf_flr_prepare, 2913 .flr_done = hclgevf_flr_done, 2914 .init_client_instance = hclgevf_init_client_instance, 2915 .uninit_client_instance = hclgevf_uninit_client_instance, 2916 .start = hclgevf_ae_start, 2917 .stop = hclgevf_ae_stop, 2918 .client_start = hclgevf_client_start, 2919 .client_stop = hclgevf_client_stop, 2920 .map_ring_to_vector = hclgevf_map_ring_to_vector, 2921 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2922 .get_vector = hclgevf_get_vector, 2923 .put_vector = hclgevf_put_vector, 2924 .reset_queue = hclgevf_reset_tqp, 2925 .get_mac_addr = hclgevf_get_mac_addr, 2926 .set_mac_addr = hclgevf_set_mac_addr, 2927 .add_uc_addr = hclgevf_add_uc_addr, 2928 .rm_uc_addr = hclgevf_rm_uc_addr, 2929 .add_mc_addr = hclgevf_add_mc_addr, 2930 .rm_mc_addr = hclgevf_rm_mc_addr, 2931 .get_stats = hclgevf_get_stats, 2932 .update_stats = hclgevf_update_stats, 2933 .get_strings = hclgevf_get_strings, 2934 .get_sset_count = hclgevf_get_sset_count, 2935 .get_rss_key_size = hclgevf_get_rss_key_size, 2936 .get_rss_indir_size = hclgevf_get_rss_indir_size, 2937 .get_rss = hclgevf_get_rss, 2938 .set_rss = hclgevf_set_rss, 2939 .get_rss_tuple = hclgevf_get_rss_tuple, 2940 .set_rss_tuple = hclgevf_set_rss_tuple, 2941 .get_tc_size = hclgevf_get_tc_size, 2942 .get_fw_version = hclgevf_get_fw_version, 2943 .set_vlan_filter = hclgevf_set_vlan_filter, 2944 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 2945 .reset_event = hclgevf_reset_event, 2946 .set_default_reset_request = hclgevf_set_def_reset_request, 2947 .get_channels = hclgevf_get_channels, 2948 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2949 .get_regs_len = hclgevf_get_regs_len, 2950 .get_regs = hclgevf_get_regs, 2951 .get_status = hclgevf_get_status, 2952 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2953 .get_media_type = hclgevf_get_media_type, 2954 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 2955 .ae_dev_resetting = hclgevf_ae_dev_resetting, 2956 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 2957 .set_gro_en = hclgevf_gro_en, 2958 .set_mtu = hclgevf_set_mtu, 2959 .get_global_queue_id = hclgevf_get_qid_global, 2960 .set_timer_task = hclgevf_set_timer_task, 2961 .get_link_mode = hclgevf_get_link_mode, 2962 }; 2963 2964 static struct hnae3_ae_algo ae_algovf = { 2965 .ops = &hclgevf_ops, 2966 .pdev_id_table = ae_algovf_pci_tbl, 2967 }; 2968 2969 static int hclgevf_init(void) 2970 { 2971 pr_info("%s is initializing\n", HCLGEVF_NAME); 2972 2973 hnae3_register_ae_algo(&ae_algovf); 2974 2975 return 0; 2976 } 2977 2978 static void hclgevf_exit(void) 2979 { 2980 hnae3_unregister_ae_algo(&ae_algovf); 2981 } 2982 module_init(hclgevf_init); 2983 module_exit(hclgevf_exit); 2984 2985 MODULE_LICENSE("GPL"); 2986 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2987 MODULE_DESCRIPTION("HCLGEVF Driver"); 2988 MODULE_VERSION(HCLGEVF_MOD_VERSION); 2989