1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <net/rtnetlink.h> 6 #include "hclgevf_cmd.h" 7 #include "hclgevf_main.h" 8 #include "hclge_mbx.h" 9 #include "hnae3.h" 10 11 #define HCLGEVF_NAME "hclgevf" 12 13 static int hclgevf_init_hdev(struct hclgevf_dev *hdev); 14 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 25 26 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 27 struct hnae3_handle *handle) 28 { 29 return container_of(handle, struct hclgevf_dev, nic); 30 } 31 32 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 33 { 34 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 35 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 36 struct hclgevf_desc desc; 37 struct hclgevf_tqp *tqp; 38 int status; 39 int i; 40 41 for (i = 0; i < kinfo->num_tqps; i++) { 42 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 43 hclgevf_cmd_setup_basic_desc(&desc, 44 HCLGEVF_OPC_QUERY_RX_STATUS, 45 true); 46 47 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 48 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 49 if (status) { 50 dev_err(&hdev->pdev->dev, 51 "Query tqp stat fail, status = %d,queue = %d\n", 52 status, i); 53 return status; 54 } 55 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 56 le32_to_cpu(desc.data[1]); 57 58 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 59 true); 60 61 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 62 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 63 if (status) { 64 dev_err(&hdev->pdev->dev, 65 "Query tqp stat fail, status = %d,queue = %d\n", 66 status, i); 67 return status; 68 } 69 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 70 le32_to_cpu(desc.data[1]); 71 } 72 73 return 0; 74 } 75 76 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 77 { 78 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 79 struct hclgevf_tqp *tqp; 80 u64 *buff = data; 81 int i; 82 83 for (i = 0; i < kinfo->num_tqps; i++) { 84 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 85 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 86 } 87 for (i = 0; i < kinfo->num_tqps; i++) { 88 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 89 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 90 } 91 92 return buff; 93 } 94 95 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 96 { 97 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 98 99 return kinfo->num_tqps * 2; 100 } 101 102 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 103 { 104 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 105 u8 *buff = data; 106 int i = 0; 107 108 for (i = 0; i < kinfo->num_tqps; i++) { 109 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 110 struct hclgevf_tqp, q); 111 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 112 tqp->index); 113 buff += ETH_GSTRING_LEN; 114 } 115 116 for (i = 0; i < kinfo->num_tqps; i++) { 117 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 118 struct hclgevf_tqp, q); 119 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 120 tqp->index); 121 buff += ETH_GSTRING_LEN; 122 } 123 124 return buff; 125 } 126 127 static void hclgevf_update_stats(struct hnae3_handle *handle, 128 struct net_device_stats *net_stats) 129 { 130 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 131 int status; 132 133 status = hclgevf_tqps_update_stats(handle); 134 if (status) 135 dev_err(&hdev->pdev->dev, 136 "VF update of TQPS stats fail, status = %d.\n", 137 status); 138 } 139 140 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 141 { 142 if (strset == ETH_SS_TEST) 143 return -EOPNOTSUPP; 144 else if (strset == ETH_SS_STATS) 145 return hclgevf_tqps_get_sset_count(handle, strset); 146 147 return 0; 148 } 149 150 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 151 u8 *data) 152 { 153 u8 *p = (char *)data; 154 155 if (strset == ETH_SS_STATS) 156 p = hclgevf_tqps_get_strings(handle, p); 157 } 158 159 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 160 { 161 hclgevf_tqps_get_stats(handle, data); 162 } 163 164 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 165 { 166 u8 resp_msg; 167 int status; 168 169 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 170 true, &resp_msg, sizeof(u8)); 171 if (status) { 172 dev_err(&hdev->pdev->dev, 173 "VF request to get TC info from PF failed %d", 174 status); 175 return status; 176 } 177 178 hdev->hw_tc_map = resp_msg; 179 180 return 0; 181 } 182 183 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 184 { 185 #define HCLGEVF_TQPS_RSS_INFO_LEN 8 186 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 187 int status; 188 189 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 190 true, resp_msg, 191 HCLGEVF_TQPS_RSS_INFO_LEN); 192 if (status) { 193 dev_err(&hdev->pdev->dev, 194 "VF request to get tqp info from PF failed %d", 195 status); 196 return status; 197 } 198 199 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 200 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 201 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 202 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 203 204 return 0; 205 } 206 207 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 208 { 209 struct hclgevf_tqp *tqp; 210 int i; 211 212 /* if this is on going reset then we need to re-allocate the TPQs 213 * since we cannot assume we would get same number of TPQs back from PF 214 */ 215 if (hclgevf_dev_ongoing_reset(hdev)) 216 devm_kfree(&hdev->pdev->dev, hdev->htqp); 217 218 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 219 sizeof(struct hclgevf_tqp), GFP_KERNEL); 220 if (!hdev->htqp) 221 return -ENOMEM; 222 223 tqp = hdev->htqp; 224 225 for (i = 0; i < hdev->num_tqps; i++) { 226 tqp->dev = &hdev->pdev->dev; 227 tqp->index = i; 228 229 tqp->q.ae_algo = &ae_algovf; 230 tqp->q.buf_size = hdev->rx_buf_len; 231 tqp->q.desc_num = hdev->num_desc; 232 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 233 i * HCLGEVF_TQP_REG_SIZE; 234 235 tqp++; 236 } 237 238 return 0; 239 } 240 241 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 242 { 243 struct hnae3_handle *nic = &hdev->nic; 244 struct hnae3_knic_private_info *kinfo; 245 u16 new_tqps = hdev->num_tqps; 246 int i; 247 248 kinfo = &nic->kinfo; 249 kinfo->num_tc = 0; 250 kinfo->num_desc = hdev->num_desc; 251 kinfo->rx_buf_len = hdev->rx_buf_len; 252 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 253 if (hdev->hw_tc_map & BIT(i)) 254 kinfo->num_tc++; 255 256 kinfo->rss_size 257 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 258 new_tqps = kinfo->rss_size * kinfo->num_tc; 259 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 260 261 /* if this is on going reset then we need to re-allocate the hnae queues 262 * as well since number of TPQs from PF might have changed. 263 */ 264 if (hclgevf_dev_ongoing_reset(hdev)) 265 devm_kfree(&hdev->pdev->dev, kinfo->tqp); 266 267 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 268 sizeof(struct hnae3_queue *), GFP_KERNEL); 269 if (!kinfo->tqp) 270 return -ENOMEM; 271 272 for (i = 0; i < kinfo->num_tqps; i++) { 273 hdev->htqp[i].q.handle = &hdev->nic; 274 hdev->htqp[i].q.tqp_index = i; 275 kinfo->tqp[i] = &hdev->htqp[i].q; 276 } 277 278 return 0; 279 } 280 281 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 282 { 283 int status; 284 u8 resp_msg; 285 286 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 287 0, false, &resp_msg, sizeof(u8)); 288 if (status) 289 dev_err(&hdev->pdev->dev, 290 "VF failed to fetch link status(%d) from PF", status); 291 } 292 293 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 294 { 295 struct hnae3_handle *handle = &hdev->nic; 296 struct hnae3_client *client; 297 298 client = handle->client; 299 300 link_state = 301 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 302 303 if (link_state != hdev->hw.mac.link) { 304 client->ops->link_status_change(handle, !!link_state); 305 hdev->hw.mac.link = link_state; 306 } 307 } 308 309 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 310 { 311 struct hnae3_handle *nic = &hdev->nic; 312 int ret; 313 314 nic->ae_algo = &ae_algovf; 315 nic->pdev = hdev->pdev; 316 nic->numa_node_mask = hdev->numa_node_mask; 317 nic->flags |= HNAE3_SUPPORT_VF; 318 319 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 320 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 321 hdev->ae_dev->dev_type); 322 return -EINVAL; 323 } 324 325 ret = hclgevf_knic_setup(hdev); 326 if (ret) 327 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 328 ret); 329 return ret; 330 } 331 332 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 333 { 334 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 335 dev_warn(&hdev->pdev->dev, 336 "vector(vector_id %d) has been freed.\n", vector_id); 337 return; 338 } 339 340 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 341 hdev->num_msi_left += 1; 342 hdev->num_msi_used -= 1; 343 } 344 345 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 346 struct hnae3_vector_info *vector_info) 347 { 348 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 349 struct hnae3_vector_info *vector = vector_info; 350 int alloc = 0; 351 int i, j; 352 353 vector_num = min(hdev->num_msi_left, vector_num); 354 355 for (j = 0; j < vector_num; j++) { 356 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 357 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 358 vector->vector = pci_irq_vector(hdev->pdev, i); 359 vector->io_addr = hdev->hw.io_base + 360 HCLGEVF_VECTOR_REG_BASE + 361 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 362 hdev->vector_status[i] = 0; 363 hdev->vector_irq[i] = vector->vector; 364 365 vector++; 366 alloc++; 367 368 break; 369 } 370 } 371 } 372 hdev->num_msi_left -= alloc; 373 hdev->num_msi_used += alloc; 374 375 return alloc; 376 } 377 378 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 379 { 380 int i; 381 382 for (i = 0; i < hdev->num_msi; i++) 383 if (vector == hdev->vector_irq[i]) 384 return i; 385 386 return -EINVAL; 387 } 388 389 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 390 { 391 return HCLGEVF_RSS_KEY_SIZE; 392 } 393 394 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 395 { 396 return HCLGEVF_RSS_IND_TBL_SIZE; 397 } 398 399 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 400 { 401 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 402 struct hclgevf_rss_indirection_table_cmd *req; 403 struct hclgevf_desc desc; 404 int status; 405 int i, j; 406 407 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 408 409 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 410 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 411 false); 412 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 413 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 414 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 415 req->rss_result[j] = 416 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 417 418 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 419 if (status) { 420 dev_err(&hdev->pdev->dev, 421 "VF failed(=%d) to set RSS indirection table\n", 422 status); 423 return status; 424 } 425 } 426 427 return 0; 428 } 429 430 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 431 { 432 struct hclgevf_rss_tc_mode_cmd *req; 433 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 434 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 435 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 436 struct hclgevf_desc desc; 437 u16 roundup_size; 438 int status; 439 int i; 440 441 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 442 443 roundup_size = roundup_pow_of_two(rss_size); 444 roundup_size = ilog2(roundup_size); 445 446 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 447 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 448 tc_size[i] = roundup_size; 449 tc_offset[i] = rss_size * i; 450 } 451 452 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 453 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 454 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 455 (tc_valid[i] & 0x1)); 456 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 457 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 458 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 459 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 460 } 461 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 462 if (status) 463 dev_err(&hdev->pdev->dev, 464 "VF failed(=%d) to set rss tc mode\n", status); 465 466 return status; 467 } 468 469 static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash, 470 u8 *key) 471 { 472 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 473 struct hclgevf_rss_config_cmd *req; 474 int lkup_times = key ? 3 : 1; 475 struct hclgevf_desc desc; 476 int key_offset; 477 int key_size; 478 int status; 479 480 req = (struct hclgevf_rss_config_cmd *)desc.data; 481 lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0); 482 483 for (key_offset = 0; key_offset < lkup_times; key_offset++) { 484 hclgevf_cmd_setup_basic_desc(&desc, 485 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 486 true); 487 req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET); 488 489 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 490 if (status) { 491 dev_err(&hdev->pdev->dev, 492 "failed to get hardware RSS cfg, status = %d\n", 493 status); 494 return status; 495 } 496 497 if (key_offset == 2) 498 key_size = 499 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 500 else 501 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 502 503 if (key) 504 memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, 505 req->hash_key, 506 key_size); 507 } 508 509 if (hash) { 510 if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ) 511 *hash = ETH_RSS_HASH_TOP; 512 else 513 *hash = ETH_RSS_HASH_UNKNOWN; 514 } 515 516 return 0; 517 } 518 519 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 520 u8 *hfunc) 521 { 522 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 523 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 524 int i; 525 526 if (indir) 527 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 528 indir[i] = rss_cfg->rss_indirection_tbl[i]; 529 530 return hclgevf_get_rss_hw_cfg(handle, hfunc, key); 531 } 532 533 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 534 const u8 *key, const u8 hfunc) 535 { 536 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 537 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 538 int i; 539 540 /* update the shadow RSS table with user specified qids */ 541 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 542 rss_cfg->rss_indirection_tbl[i] = indir[i]; 543 544 /* update the hardware */ 545 return hclgevf_set_rss_indir_table(hdev); 546 } 547 548 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 549 { 550 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 551 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 552 553 return rss_cfg->rss_size; 554 } 555 556 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 557 int vector_id, 558 struct hnae3_ring_chain_node *ring_chain) 559 { 560 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 561 struct hnae3_ring_chain_node *node; 562 struct hclge_mbx_vf_to_pf_cmd *req; 563 struct hclgevf_desc desc; 564 int i = 0; 565 int status; 566 u8 type; 567 568 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 569 570 for (node = ring_chain; node; node = node->next) { 571 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 572 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 573 574 if (i == 0) { 575 hclgevf_cmd_setup_basic_desc(&desc, 576 HCLGEVF_OPC_MBX_VF_TO_PF, 577 false); 578 type = en ? 579 HCLGE_MBX_MAP_RING_TO_VECTOR : 580 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 581 req->msg[0] = type; 582 req->msg[1] = vector_id; 583 } 584 585 req->msg[idx_offset] = 586 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 587 req->msg[idx_offset + 1] = node->tqp_index; 588 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 589 HNAE3_RING_GL_IDX_M, 590 HNAE3_RING_GL_IDX_S); 591 592 i++; 593 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 594 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 595 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 596 !node->next) { 597 req->msg[2] = i; 598 599 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 600 if (status) { 601 dev_err(&hdev->pdev->dev, 602 "Map TQP fail, status is %d.\n", 603 status); 604 return status; 605 } 606 i = 0; 607 hclgevf_cmd_setup_basic_desc(&desc, 608 HCLGEVF_OPC_MBX_VF_TO_PF, 609 false); 610 req->msg[0] = type; 611 req->msg[1] = vector_id; 612 } 613 } 614 615 return 0; 616 } 617 618 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 619 struct hnae3_ring_chain_node *ring_chain) 620 { 621 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 622 int vector_id; 623 624 vector_id = hclgevf_get_vector_index(hdev, vector); 625 if (vector_id < 0) { 626 dev_err(&handle->pdev->dev, 627 "Get vector index fail. ret =%d\n", vector_id); 628 return vector_id; 629 } 630 631 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 632 } 633 634 static int hclgevf_unmap_ring_from_vector( 635 struct hnae3_handle *handle, 636 int vector, 637 struct hnae3_ring_chain_node *ring_chain) 638 { 639 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 640 int ret, vector_id; 641 642 vector_id = hclgevf_get_vector_index(hdev, vector); 643 if (vector_id < 0) { 644 dev_err(&handle->pdev->dev, 645 "Get vector index fail. ret =%d\n", vector_id); 646 return vector_id; 647 } 648 649 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 650 if (ret) 651 dev_err(&handle->pdev->dev, 652 "Unmap ring from vector fail. vector=%d, ret =%d\n", 653 vector_id, 654 ret); 655 656 return ret; 657 } 658 659 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 660 { 661 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 662 int vector_id; 663 664 vector_id = hclgevf_get_vector_index(hdev, vector); 665 if (vector_id < 0) { 666 dev_err(&handle->pdev->dev, 667 "hclgevf_put_vector get vector index fail. ret =%d\n", 668 vector_id); 669 return vector_id; 670 } 671 672 hclgevf_free_vector(hdev, vector_id); 673 674 return 0; 675 } 676 677 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 678 bool en_uc_pmc, bool en_mc_pmc) 679 { 680 struct hclge_mbx_vf_to_pf_cmd *req; 681 struct hclgevf_desc desc; 682 int status; 683 684 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 685 686 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 687 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 688 req->msg[1] = en_uc_pmc ? 1 : 0; 689 req->msg[2] = en_mc_pmc ? 1 : 0; 690 691 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 692 if (status) 693 dev_err(&hdev->pdev->dev, 694 "Set promisc mode fail, status is %d.\n", status); 695 696 return status; 697 } 698 699 static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, 700 bool en_uc_pmc, bool en_mc_pmc) 701 { 702 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 703 704 hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); 705 } 706 707 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 708 int stream_id, bool enable) 709 { 710 struct hclgevf_cfg_com_tqp_queue_cmd *req; 711 struct hclgevf_desc desc; 712 int status; 713 714 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 715 716 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 717 false); 718 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 719 req->stream_id = cpu_to_le16(stream_id); 720 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 721 722 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 723 if (status) 724 dev_err(&hdev->pdev->dev, 725 "TQP enable fail, status =%d.\n", status); 726 727 return status; 728 } 729 730 static int hclgevf_get_queue_id(struct hnae3_queue *queue) 731 { 732 struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q); 733 734 return tqp->index; 735 } 736 737 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 738 { 739 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 740 struct hclgevf_tqp *tqp; 741 int i; 742 743 for (i = 0; i < kinfo->num_tqps; i++) { 744 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 745 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 746 } 747 } 748 749 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 750 { 751 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 752 753 ether_addr_copy(p, hdev->hw.mac.mac_addr); 754 } 755 756 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 757 bool is_first) 758 { 759 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 760 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 761 u8 *new_mac_addr = (u8 *)p; 762 u8 msg_data[ETH_ALEN * 2]; 763 u16 subcode; 764 int status; 765 766 ether_addr_copy(msg_data, new_mac_addr); 767 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 768 769 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 770 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 771 772 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 773 subcode, msg_data, ETH_ALEN * 2, 774 true, NULL, 0); 775 if (!status) 776 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 777 778 return status; 779 } 780 781 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 782 const unsigned char *addr) 783 { 784 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 785 786 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 787 HCLGE_MBX_MAC_VLAN_UC_ADD, 788 addr, ETH_ALEN, false, NULL, 0); 789 } 790 791 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 792 const unsigned char *addr) 793 { 794 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 795 796 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 797 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 798 addr, ETH_ALEN, false, NULL, 0); 799 } 800 801 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 802 const unsigned char *addr) 803 { 804 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 805 806 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 807 HCLGE_MBX_MAC_VLAN_MC_ADD, 808 addr, ETH_ALEN, false, NULL, 0); 809 } 810 811 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 812 const unsigned char *addr) 813 { 814 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 815 816 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 817 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 818 addr, ETH_ALEN, false, NULL, 0); 819 } 820 821 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 822 __be16 proto, u16 vlan_id, 823 bool is_kill) 824 { 825 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 826 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 827 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 828 829 if (vlan_id > 4095) 830 return -EINVAL; 831 832 if (proto != htons(ETH_P_8021Q)) 833 return -EPROTONOSUPPORT; 834 835 msg_data[0] = is_kill; 836 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 837 memcpy(&msg_data[3], &proto, sizeof(proto)); 838 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 839 HCLGE_MBX_VLAN_FILTER, msg_data, 840 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 841 } 842 843 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 844 { 845 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 846 u8 msg_data; 847 848 msg_data = enable ? 1 : 0; 849 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 850 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 851 1, false, NULL, 0); 852 } 853 854 static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 855 { 856 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 857 u8 msg_data[2]; 858 int ret; 859 860 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 861 862 /* disable vf queue before send queue reset msg to PF */ 863 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 864 if (ret) 865 return; 866 867 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 868 2, true, NULL, 0); 869 } 870 871 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 872 enum hnae3_reset_notify_type type) 873 { 874 struct hnae3_client *client = hdev->nic_client; 875 struct hnae3_handle *handle = &hdev->nic; 876 877 if (!client->ops->reset_notify) 878 return -EOPNOTSUPP; 879 880 return client->ops->reset_notify(handle, type); 881 } 882 883 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 884 { 885 #define HCLGEVF_RESET_WAIT_MS 500 886 #define HCLGEVF_RESET_WAIT_CNT 20 887 u32 val, cnt = 0; 888 889 /* wait to check the hardware reset completion status */ 890 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 891 while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) && 892 (cnt < HCLGEVF_RESET_WAIT_CNT)) { 893 msleep(HCLGEVF_RESET_WAIT_MS); 894 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 895 cnt++; 896 } 897 898 /* hardware completion status should be available by this time */ 899 if (cnt >= HCLGEVF_RESET_WAIT_CNT) { 900 dev_warn(&hdev->pdev->dev, 901 "could'nt get reset done status from h/w, timeout!\n"); 902 return -EBUSY; 903 } 904 905 /* we will wait a bit more to let reset of the stack to complete. This 906 * might happen in case reset assertion was made by PF. Yes, this also 907 * means we might end up waiting bit more even for VF reset. 908 */ 909 msleep(5000); 910 911 return 0; 912 } 913 914 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 915 { 916 int ret; 917 918 /* uninitialize the nic client */ 919 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 920 921 /* re-initialize the hclge device */ 922 ret = hclgevf_init_hdev(hdev); 923 if (ret) { 924 dev_err(&hdev->pdev->dev, 925 "hclge device re-init failed, VF is disabled!\n"); 926 return ret; 927 } 928 929 /* bring up the nic client again */ 930 hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 931 932 return 0; 933 } 934 935 static int hclgevf_reset(struct hclgevf_dev *hdev) 936 { 937 int ret; 938 939 rtnl_lock(); 940 941 /* bring down the nic to stop any ongoing TX/RX */ 942 hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 943 944 /* check if VF could successfully fetch the hardware reset completion 945 * status from the hardware 946 */ 947 ret = hclgevf_reset_wait(hdev); 948 if (ret) { 949 /* can't do much in this situation, will disable VF */ 950 dev_err(&hdev->pdev->dev, 951 "VF failed(=%d) to fetch H/W reset completion status\n", 952 ret); 953 954 dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); 955 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 956 957 rtnl_unlock(); 958 return ret; 959 } 960 961 /* now, re-initialize the nic client and ae device*/ 962 ret = hclgevf_reset_stack(hdev); 963 if (ret) 964 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 965 966 /* bring up the nic to enable TX/RX again */ 967 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 968 969 rtnl_unlock(); 970 971 return ret; 972 } 973 974 static int hclgevf_do_reset(struct hclgevf_dev *hdev) 975 { 976 int status; 977 u8 respmsg; 978 979 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 980 0, false, &respmsg, sizeof(u8)); 981 if (status) 982 dev_err(&hdev->pdev->dev, 983 "VF reset request to PF failed(=%d)\n", status); 984 985 return status; 986 } 987 988 static void hclgevf_reset_event(struct hnae3_handle *handle) 989 { 990 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 991 992 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 993 994 handle->reset_level = HNAE3_VF_RESET; 995 996 /* reset of this VF requested */ 997 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 998 hclgevf_reset_task_schedule(hdev); 999 1000 handle->last_reset_time = jiffies; 1001 } 1002 1003 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1004 { 1005 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1006 1007 return hdev->fw_version; 1008 } 1009 1010 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1011 { 1012 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1013 1014 vector->vector_irq = pci_irq_vector(hdev->pdev, 1015 HCLGEVF_MISC_VECTOR_NUM); 1016 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1017 /* vector status always valid for Vector 0 */ 1018 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1019 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1020 1021 hdev->num_msi_left -= 1; 1022 hdev->num_msi_used += 1; 1023 } 1024 1025 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1026 { 1027 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1028 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1029 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1030 schedule_work(&hdev->rst_service_task); 1031 } 1032 } 1033 1034 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1035 { 1036 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1037 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1038 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1039 schedule_work(&hdev->mbx_service_task); 1040 } 1041 } 1042 1043 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1044 { 1045 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1046 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1047 schedule_work(&hdev->service_task); 1048 } 1049 1050 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1051 { 1052 /* if we have any pending mailbox event then schedule the mbx task */ 1053 if (hdev->mbx_event_pending) 1054 hclgevf_mbx_task_schedule(hdev); 1055 1056 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1057 hclgevf_reset_task_schedule(hdev); 1058 } 1059 1060 static void hclgevf_service_timer(struct timer_list *t) 1061 { 1062 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1063 1064 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1065 1066 hclgevf_task_schedule(hdev); 1067 } 1068 1069 static void hclgevf_reset_service_task(struct work_struct *work) 1070 { 1071 struct hclgevf_dev *hdev = 1072 container_of(work, struct hclgevf_dev, rst_service_task); 1073 int ret; 1074 1075 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1076 return; 1077 1078 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1079 1080 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1081 &hdev->reset_state)) { 1082 /* PF has initmated that it is about to reset the hardware. 1083 * We now have to poll & check if harware has actually completed 1084 * the reset sequence. On hardware reset completion, VF needs to 1085 * reset the client and ae device. 1086 */ 1087 hdev->reset_attempts = 0; 1088 1089 ret = hclgevf_reset(hdev); 1090 if (ret) 1091 dev_err(&hdev->pdev->dev, "VF stack reset failed.\n"); 1092 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1093 &hdev->reset_state)) { 1094 /* we could be here when either of below happens: 1095 * 1. reset was initiated due to watchdog timeout due to 1096 * a. IMP was earlier reset and our TX got choked down and 1097 * which resulted in watchdog reacting and inducing VF 1098 * reset. This also means our cmdq would be unreliable. 1099 * b. problem in TX due to other lower layer(example link 1100 * layer not functioning properly etc.) 1101 * 2. VF reset might have been initiated due to some config 1102 * change. 1103 * 1104 * NOTE: Theres no clear way to detect above cases than to react 1105 * to the response of PF for this reset request. PF will ack the 1106 * 1b and 2. cases but we will not get any intimation about 1a 1107 * from PF as cmdq would be in unreliable state i.e. mailbox 1108 * communication between PF and VF would be broken. 1109 */ 1110 1111 /* if we are never geting into pending state it means either: 1112 * 1. PF is not receiving our request which could be due to IMP 1113 * reset 1114 * 2. PF is screwed 1115 * We cannot do much for 2. but to check first we can try reset 1116 * our PCIe + stack and see if it alleviates the problem. 1117 */ 1118 if (hdev->reset_attempts > 3) { 1119 /* prepare for full reset of stack + pcie interface */ 1120 hdev->nic.reset_level = HNAE3_VF_FULL_RESET; 1121 1122 /* "defer" schedule the reset task again */ 1123 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1124 } else { 1125 hdev->reset_attempts++; 1126 1127 /* request PF for resetting this VF via mailbox */ 1128 ret = hclgevf_do_reset(hdev); 1129 if (ret) 1130 dev_warn(&hdev->pdev->dev, 1131 "VF rst fail, stack will call\n"); 1132 } 1133 } 1134 1135 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1136 } 1137 1138 static void hclgevf_mailbox_service_task(struct work_struct *work) 1139 { 1140 struct hclgevf_dev *hdev; 1141 1142 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1143 1144 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1145 return; 1146 1147 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1148 1149 hclgevf_mbx_async_handler(hdev); 1150 1151 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1152 } 1153 1154 static void hclgevf_service_task(struct work_struct *work) 1155 { 1156 struct hclgevf_dev *hdev; 1157 1158 hdev = container_of(work, struct hclgevf_dev, service_task); 1159 1160 /* request the link status from the PF. PF would be able to tell VF 1161 * about such updates in future so we might remove this later 1162 */ 1163 hclgevf_request_link_info(hdev); 1164 1165 hclgevf_deferred_task_schedule(hdev); 1166 1167 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1168 } 1169 1170 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1171 { 1172 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1173 } 1174 1175 static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) 1176 { 1177 u32 cmdq_src_reg; 1178 1179 /* fetch the events from their corresponding regs */ 1180 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1181 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1182 1183 /* check for vector0 mailbox(=CMDQ RX) event source */ 1184 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1185 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1186 *clearval = cmdq_src_reg; 1187 return true; 1188 } 1189 1190 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1191 1192 return false; 1193 } 1194 1195 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1196 { 1197 writel(en ? 1 : 0, vector->addr); 1198 } 1199 1200 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1201 { 1202 struct hclgevf_dev *hdev = data; 1203 u32 clearval; 1204 1205 hclgevf_enable_vector(&hdev->misc_vector, false); 1206 if (!hclgevf_check_event_cause(hdev, &clearval)) 1207 goto skip_sched; 1208 1209 hclgevf_mbx_handler(hdev); 1210 1211 hclgevf_clear_event_cause(hdev, clearval); 1212 1213 skip_sched: 1214 hclgevf_enable_vector(&hdev->misc_vector, true); 1215 1216 return IRQ_HANDLED; 1217 } 1218 1219 static int hclgevf_configure(struct hclgevf_dev *hdev) 1220 { 1221 int ret; 1222 1223 hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE; 1224 1225 /* get queue configuration from PF */ 1226 ret = hclgevf_get_queue_info(hdev); 1227 if (ret) 1228 return ret; 1229 /* get tc configuration from PF */ 1230 return hclgevf_get_tc_info(hdev); 1231 } 1232 1233 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1234 { 1235 struct pci_dev *pdev = ae_dev->pdev; 1236 struct hclgevf_dev *hdev = ae_dev->priv; 1237 1238 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1239 if (!hdev) 1240 return -ENOMEM; 1241 1242 hdev->pdev = pdev; 1243 hdev->ae_dev = ae_dev; 1244 ae_dev->priv = hdev; 1245 1246 return 0; 1247 } 1248 1249 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1250 { 1251 struct hnae3_handle *roce = &hdev->roce; 1252 struct hnae3_handle *nic = &hdev->nic; 1253 1254 roce->rinfo.num_vectors = hdev->num_roce_msix; 1255 1256 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1257 hdev->num_msi_left == 0) 1258 return -EINVAL; 1259 1260 roce->rinfo.base_vector = hdev->roce_base_vector; 1261 1262 roce->rinfo.netdev = nic->kinfo.netdev; 1263 roce->rinfo.roce_io_base = hdev->hw.io_base; 1264 1265 roce->pdev = nic->pdev; 1266 roce->ae_algo = nic->ae_algo; 1267 roce->numa_node_mask = nic->numa_node_mask; 1268 1269 return 0; 1270 } 1271 1272 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1273 { 1274 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1275 int i, ret; 1276 1277 rss_cfg->rss_size = hdev->rss_size_max; 1278 1279 /* Initialize RSS indirect table for each vport */ 1280 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1281 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1282 1283 ret = hclgevf_set_rss_indir_table(hdev); 1284 if (ret) 1285 return ret; 1286 1287 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1288 } 1289 1290 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1291 { 1292 /* other vlan config(like, VLAN TX/RX offload) would also be added 1293 * here later 1294 */ 1295 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1296 false); 1297 } 1298 1299 static int hclgevf_ae_start(struct hnae3_handle *handle) 1300 { 1301 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1302 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1303 int i, queue_id; 1304 1305 for (i = 0; i < kinfo->num_tqps; i++) { 1306 /* ring enable */ 1307 queue_id = hclgevf_get_queue_id(kinfo->tqp[i]); 1308 if (queue_id < 0) { 1309 dev_warn(&hdev->pdev->dev, 1310 "Get invalid queue id, ignore it\n"); 1311 continue; 1312 } 1313 1314 hclgevf_tqp_enable(hdev, queue_id, 0, true); 1315 } 1316 1317 /* reset tqp stats */ 1318 hclgevf_reset_tqp_stats(handle); 1319 1320 hclgevf_request_link_info(hdev); 1321 1322 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1323 mod_timer(&hdev->service_timer, jiffies + HZ); 1324 1325 return 0; 1326 } 1327 1328 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1329 { 1330 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1331 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1332 int i, queue_id; 1333 1334 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1335 1336 for (i = 0; i < kinfo->num_tqps; i++) { 1337 /* Ring disable */ 1338 queue_id = hclgevf_get_queue_id(kinfo->tqp[i]); 1339 if (queue_id < 0) { 1340 dev_warn(&hdev->pdev->dev, 1341 "Get invalid queue id, ignore it\n"); 1342 continue; 1343 } 1344 1345 hclgevf_tqp_enable(hdev, queue_id, 0, false); 1346 } 1347 1348 /* reset tqp stats */ 1349 hclgevf_reset_tqp_stats(handle); 1350 del_timer_sync(&hdev->service_timer); 1351 cancel_work_sync(&hdev->service_task); 1352 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1353 hclgevf_update_link_status(hdev, 0); 1354 } 1355 1356 static void hclgevf_state_init(struct hclgevf_dev *hdev) 1357 { 1358 /* if this is on going reset then skip this initialization */ 1359 if (hclgevf_dev_ongoing_reset(hdev)) 1360 return; 1361 1362 /* setup tasks for the MBX */ 1363 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1364 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1365 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1366 1367 /* setup tasks for service timer */ 1368 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1369 1370 INIT_WORK(&hdev->service_task, hclgevf_service_task); 1371 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1372 1373 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 1374 1375 mutex_init(&hdev->mbx_resp.mbx_mutex); 1376 1377 /* bring the device down */ 1378 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1379 } 1380 1381 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1382 { 1383 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1384 1385 if (hdev->service_timer.function) 1386 del_timer_sync(&hdev->service_timer); 1387 if (hdev->service_task.func) 1388 cancel_work_sync(&hdev->service_task); 1389 if (hdev->mbx_service_task.func) 1390 cancel_work_sync(&hdev->mbx_service_task); 1391 if (hdev->rst_service_task.func) 1392 cancel_work_sync(&hdev->rst_service_task); 1393 1394 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1395 } 1396 1397 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1398 { 1399 struct pci_dev *pdev = hdev->pdev; 1400 int vectors; 1401 int i; 1402 1403 /* if this is on going reset then skip this initialization */ 1404 if (hclgevf_dev_ongoing_reset(hdev)) 1405 return 0; 1406 1407 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 1408 vectors = pci_alloc_irq_vectors(pdev, 1409 hdev->roce_base_msix_offset + 1, 1410 hdev->num_msi, 1411 PCI_IRQ_MSIX); 1412 else 1413 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1414 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1415 1416 if (vectors < 0) { 1417 dev_err(&pdev->dev, 1418 "failed(%d) to allocate MSI/MSI-X vectors\n", 1419 vectors); 1420 return vectors; 1421 } 1422 if (vectors < hdev->num_msi) 1423 dev_warn(&hdev->pdev->dev, 1424 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1425 hdev->num_msi, vectors); 1426 1427 hdev->num_msi = vectors; 1428 hdev->num_msi_left = vectors; 1429 hdev->base_msi_vector = pdev->irq; 1430 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 1431 1432 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1433 sizeof(u16), GFP_KERNEL); 1434 if (!hdev->vector_status) { 1435 pci_free_irq_vectors(pdev); 1436 return -ENOMEM; 1437 } 1438 1439 for (i = 0; i < hdev->num_msi; i++) 1440 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 1441 1442 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 1443 sizeof(int), GFP_KERNEL); 1444 if (!hdev->vector_irq) { 1445 pci_free_irq_vectors(pdev); 1446 return -ENOMEM; 1447 } 1448 1449 return 0; 1450 } 1451 1452 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 1453 { 1454 struct pci_dev *pdev = hdev->pdev; 1455 1456 pci_free_irq_vectors(pdev); 1457 } 1458 1459 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 1460 { 1461 int ret = 0; 1462 1463 /* if this is on going reset then skip this initialization */ 1464 if (hclgevf_dev_ongoing_reset(hdev)) 1465 return 0; 1466 1467 hclgevf_get_misc_vector(hdev); 1468 1469 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 1470 0, "hclgevf_cmd", hdev); 1471 if (ret) { 1472 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 1473 hdev->misc_vector.vector_irq); 1474 return ret; 1475 } 1476 1477 hclgevf_clear_event_cause(hdev, 0); 1478 1479 /* enable misc. vector(vector 0) */ 1480 hclgevf_enable_vector(&hdev->misc_vector, true); 1481 1482 return ret; 1483 } 1484 1485 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 1486 { 1487 /* disable misc vector(vector 0) */ 1488 hclgevf_enable_vector(&hdev->misc_vector, false); 1489 synchronize_irq(hdev->misc_vector.vector_irq); 1490 free_irq(hdev->misc_vector.vector_irq, hdev); 1491 hclgevf_free_vector(hdev, 0); 1492 } 1493 1494 static int hclgevf_init_client_instance(struct hnae3_client *client, 1495 struct hnae3_ae_dev *ae_dev) 1496 { 1497 struct hclgevf_dev *hdev = ae_dev->priv; 1498 int ret; 1499 1500 switch (client->type) { 1501 case HNAE3_CLIENT_KNIC: 1502 hdev->nic_client = client; 1503 hdev->nic.client = client; 1504 1505 ret = client->ops->init_instance(&hdev->nic); 1506 if (ret) 1507 goto clear_nic; 1508 1509 hnae3_set_client_init_flag(client, ae_dev, 1); 1510 1511 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 1512 struct hnae3_client *rc = hdev->roce_client; 1513 1514 ret = hclgevf_init_roce_base_info(hdev); 1515 if (ret) 1516 goto clear_roce; 1517 ret = rc->ops->init_instance(&hdev->roce); 1518 if (ret) 1519 goto clear_roce; 1520 1521 hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 1522 1); 1523 } 1524 break; 1525 case HNAE3_CLIENT_UNIC: 1526 hdev->nic_client = client; 1527 hdev->nic.client = client; 1528 1529 ret = client->ops->init_instance(&hdev->nic); 1530 if (ret) 1531 goto clear_nic; 1532 1533 hnae3_set_client_init_flag(client, ae_dev, 1); 1534 break; 1535 case HNAE3_CLIENT_ROCE: 1536 if (hnae3_dev_roce_supported(hdev)) { 1537 hdev->roce_client = client; 1538 hdev->roce.client = client; 1539 } 1540 1541 if (hdev->roce_client && hdev->nic_client) { 1542 ret = hclgevf_init_roce_base_info(hdev); 1543 if (ret) 1544 goto clear_roce; 1545 1546 ret = client->ops->init_instance(&hdev->roce); 1547 if (ret) 1548 goto clear_roce; 1549 } 1550 1551 hnae3_set_client_init_flag(client, ae_dev, 1); 1552 break; 1553 default: 1554 return -EINVAL; 1555 } 1556 1557 return 0; 1558 1559 clear_nic: 1560 hdev->nic_client = NULL; 1561 hdev->nic.client = NULL; 1562 return ret; 1563 clear_roce: 1564 hdev->roce_client = NULL; 1565 hdev->roce.client = NULL; 1566 return ret; 1567 } 1568 1569 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 1570 struct hnae3_ae_dev *ae_dev) 1571 { 1572 struct hclgevf_dev *hdev = ae_dev->priv; 1573 1574 /* un-init roce, if it exists */ 1575 if (hdev->roce_client) { 1576 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 1577 hdev->roce_client = NULL; 1578 hdev->roce.client = NULL; 1579 } 1580 1581 /* un-init nic/unic, if this was not called by roce client */ 1582 if (client->ops->uninit_instance && hdev->nic_client && 1583 client->type != HNAE3_CLIENT_ROCE) { 1584 client->ops->uninit_instance(&hdev->nic, 0); 1585 hdev->nic_client = NULL; 1586 hdev->nic.client = NULL; 1587 } 1588 } 1589 1590 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 1591 { 1592 struct pci_dev *pdev = hdev->pdev; 1593 struct hclgevf_hw *hw; 1594 int ret; 1595 1596 /* check if we need to skip initialization of pci. This will happen if 1597 * device is undergoing VF reset. Otherwise, we would need to 1598 * re-initialize pci interface again i.e. when device is not going 1599 * through *any* reset or actually undergoing full reset. 1600 */ 1601 if (hclgevf_dev_ongoing_reset(hdev)) 1602 return 0; 1603 1604 ret = pci_enable_device(pdev); 1605 if (ret) { 1606 dev_err(&pdev->dev, "failed to enable PCI device\n"); 1607 return ret; 1608 } 1609 1610 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1611 if (ret) { 1612 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 1613 goto err_disable_device; 1614 } 1615 1616 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 1617 if (ret) { 1618 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 1619 goto err_disable_device; 1620 } 1621 1622 pci_set_master(pdev); 1623 hw = &hdev->hw; 1624 hw->hdev = hdev; 1625 hw->io_base = pci_iomap(pdev, 2, 0); 1626 if (!hw->io_base) { 1627 dev_err(&pdev->dev, "can't map configuration register space\n"); 1628 ret = -ENOMEM; 1629 goto err_clr_master; 1630 } 1631 1632 return 0; 1633 1634 err_clr_master: 1635 pci_clear_master(pdev); 1636 pci_release_regions(pdev); 1637 err_disable_device: 1638 pci_disable_device(pdev); 1639 1640 return ret; 1641 } 1642 1643 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 1644 { 1645 struct pci_dev *pdev = hdev->pdev; 1646 1647 pci_iounmap(pdev, hdev->hw.io_base); 1648 pci_clear_master(pdev); 1649 pci_release_regions(pdev); 1650 pci_disable_device(pdev); 1651 } 1652 1653 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 1654 { 1655 struct hclgevf_query_res_cmd *req; 1656 struct hclgevf_desc desc; 1657 int ret; 1658 1659 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 1660 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1661 if (ret) { 1662 dev_err(&hdev->pdev->dev, 1663 "query vf resource failed, ret = %d.\n", ret); 1664 return ret; 1665 } 1666 1667 req = (struct hclgevf_query_res_cmd *)desc.data; 1668 1669 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 1670 hdev->roce_base_msix_offset = 1671 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 1672 HCLGEVF_MSIX_OFT_ROCEE_M, 1673 HCLGEVF_MSIX_OFT_ROCEE_S); 1674 hdev->num_roce_msix = 1675 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 1676 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 1677 1678 /* VF should have NIC vectors and Roce vectors, NIC vectors 1679 * are queued before Roce vectors. The offset is fixed to 64. 1680 */ 1681 hdev->num_msi = hdev->num_roce_msix + 1682 hdev->roce_base_msix_offset; 1683 } else { 1684 hdev->num_msi = 1685 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 1686 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 1687 } 1688 1689 return 0; 1690 } 1691 1692 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 1693 { 1694 struct pci_dev *pdev = hdev->pdev; 1695 int ret; 1696 1697 /* check if device is on-going full reset(i.e. pcie as well) */ 1698 if (hclgevf_dev_ongoing_full_reset(hdev)) { 1699 dev_warn(&pdev->dev, "device is going full reset\n"); 1700 hclgevf_uninit_hdev(hdev); 1701 } 1702 1703 ret = hclgevf_pci_init(hdev); 1704 if (ret) { 1705 dev_err(&pdev->dev, "PCI initialization failed\n"); 1706 return ret; 1707 } 1708 1709 ret = hclgevf_cmd_init(hdev); 1710 if (ret) 1711 goto err_cmd_init; 1712 1713 /* Get vf resource */ 1714 ret = hclgevf_query_vf_resource(hdev); 1715 if (ret) { 1716 dev_err(&hdev->pdev->dev, 1717 "Query vf status error, ret = %d.\n", ret); 1718 goto err_query_vf; 1719 } 1720 1721 ret = hclgevf_init_msi(hdev); 1722 if (ret) { 1723 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 1724 goto err_query_vf; 1725 } 1726 1727 hclgevf_state_init(hdev); 1728 1729 ret = hclgevf_misc_irq_init(hdev); 1730 if (ret) { 1731 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 1732 ret); 1733 goto err_misc_irq_init; 1734 } 1735 1736 ret = hclgevf_configure(hdev); 1737 if (ret) { 1738 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 1739 goto err_config; 1740 } 1741 1742 ret = hclgevf_alloc_tqps(hdev); 1743 if (ret) { 1744 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 1745 goto err_config; 1746 } 1747 1748 ret = hclgevf_set_handle_info(hdev); 1749 if (ret) { 1750 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 1751 goto err_config; 1752 } 1753 1754 /* Initialize RSS for this VF */ 1755 ret = hclgevf_rss_init_hw(hdev); 1756 if (ret) { 1757 dev_err(&hdev->pdev->dev, 1758 "failed(%d) to initialize RSS\n", ret); 1759 goto err_config; 1760 } 1761 1762 ret = hclgevf_init_vlan_config(hdev); 1763 if (ret) { 1764 dev_err(&hdev->pdev->dev, 1765 "failed(%d) to initialize VLAN config\n", ret); 1766 goto err_config; 1767 } 1768 1769 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 1770 1771 return 0; 1772 1773 err_config: 1774 hclgevf_misc_irq_uninit(hdev); 1775 err_misc_irq_init: 1776 hclgevf_state_uninit(hdev); 1777 hclgevf_uninit_msi(hdev); 1778 err_query_vf: 1779 hclgevf_cmd_uninit(hdev); 1780 err_cmd_init: 1781 hclgevf_pci_uninit(hdev); 1782 return ret; 1783 } 1784 1785 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 1786 { 1787 hclgevf_state_uninit(hdev); 1788 hclgevf_misc_irq_uninit(hdev); 1789 hclgevf_cmd_uninit(hdev); 1790 hclgevf_uninit_msi(hdev); 1791 hclgevf_pci_uninit(hdev); 1792 } 1793 1794 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 1795 { 1796 struct pci_dev *pdev = ae_dev->pdev; 1797 int ret; 1798 1799 ret = hclgevf_alloc_hdev(ae_dev); 1800 if (ret) { 1801 dev_err(&pdev->dev, "hclge device allocation failed\n"); 1802 return ret; 1803 } 1804 1805 ret = hclgevf_init_hdev(ae_dev->priv); 1806 if (ret) 1807 dev_err(&pdev->dev, "hclge device initialization failed\n"); 1808 1809 return ret; 1810 } 1811 1812 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 1813 { 1814 struct hclgevf_dev *hdev = ae_dev->priv; 1815 1816 hclgevf_uninit_hdev(hdev); 1817 ae_dev->priv = NULL; 1818 } 1819 1820 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 1821 { 1822 struct hnae3_handle *nic = &hdev->nic; 1823 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1824 1825 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 1826 } 1827 1828 /** 1829 * hclgevf_get_channels - Get the current channels enabled and max supported. 1830 * @handle: hardware information for network interface 1831 * @ch: ethtool channels structure 1832 * 1833 * We don't support separate tx and rx queues as channels. The other count 1834 * represents how many queues are being used for control. max_combined counts 1835 * how many queue pairs we can support. They may not be mapped 1 to 1 with 1836 * q_vectors since we support a lot more queue pairs than q_vectors. 1837 **/ 1838 static void hclgevf_get_channels(struct hnae3_handle *handle, 1839 struct ethtool_channels *ch) 1840 { 1841 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1842 1843 ch->max_combined = hclgevf_get_max_channels(hdev); 1844 ch->other_count = 0; 1845 ch->max_other = 0; 1846 ch->combined_count = hdev->num_tqps; 1847 } 1848 1849 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 1850 u16 *alloc_tqps, u16 *max_rss_size) 1851 { 1852 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1853 1854 *alloc_tqps = hdev->num_tqps; 1855 *max_rss_size = hdev->rss_size_max; 1856 } 1857 1858 static int hclgevf_get_status(struct hnae3_handle *handle) 1859 { 1860 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1861 1862 return hdev->hw.mac.link; 1863 } 1864 1865 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 1866 u8 *auto_neg, u32 *speed, 1867 u8 *duplex) 1868 { 1869 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1870 1871 if (speed) 1872 *speed = hdev->hw.mac.speed; 1873 if (duplex) 1874 *duplex = hdev->hw.mac.duplex; 1875 if (auto_neg) 1876 *auto_neg = AUTONEG_DISABLE; 1877 } 1878 1879 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 1880 u8 duplex) 1881 { 1882 hdev->hw.mac.speed = speed; 1883 hdev->hw.mac.duplex = duplex; 1884 } 1885 1886 static void hclgevf_get_media_type(struct hnae3_handle *handle, 1887 u8 *media_type) 1888 { 1889 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1890 if (media_type) 1891 *media_type = hdev->hw.mac.media_type; 1892 } 1893 1894 static const struct hnae3_ae_ops hclgevf_ops = { 1895 .init_ae_dev = hclgevf_init_ae_dev, 1896 .uninit_ae_dev = hclgevf_uninit_ae_dev, 1897 .init_client_instance = hclgevf_init_client_instance, 1898 .uninit_client_instance = hclgevf_uninit_client_instance, 1899 .start = hclgevf_ae_start, 1900 .stop = hclgevf_ae_stop, 1901 .map_ring_to_vector = hclgevf_map_ring_to_vector, 1902 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 1903 .get_vector = hclgevf_get_vector, 1904 .put_vector = hclgevf_put_vector, 1905 .reset_queue = hclgevf_reset_tqp, 1906 .set_promisc_mode = hclgevf_set_promisc_mode, 1907 .get_mac_addr = hclgevf_get_mac_addr, 1908 .set_mac_addr = hclgevf_set_mac_addr, 1909 .add_uc_addr = hclgevf_add_uc_addr, 1910 .rm_uc_addr = hclgevf_rm_uc_addr, 1911 .add_mc_addr = hclgevf_add_mc_addr, 1912 .rm_mc_addr = hclgevf_rm_mc_addr, 1913 .get_stats = hclgevf_get_stats, 1914 .update_stats = hclgevf_update_stats, 1915 .get_strings = hclgevf_get_strings, 1916 .get_sset_count = hclgevf_get_sset_count, 1917 .get_rss_key_size = hclgevf_get_rss_key_size, 1918 .get_rss_indir_size = hclgevf_get_rss_indir_size, 1919 .get_rss = hclgevf_get_rss, 1920 .set_rss = hclgevf_set_rss, 1921 .get_tc_size = hclgevf_get_tc_size, 1922 .get_fw_version = hclgevf_get_fw_version, 1923 .set_vlan_filter = hclgevf_set_vlan_filter, 1924 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 1925 .reset_event = hclgevf_reset_event, 1926 .get_channels = hclgevf_get_channels, 1927 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 1928 .get_status = hclgevf_get_status, 1929 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 1930 .get_media_type = hclgevf_get_media_type, 1931 }; 1932 1933 static struct hnae3_ae_algo ae_algovf = { 1934 .ops = &hclgevf_ops, 1935 .pdev_id_table = ae_algovf_pci_tbl, 1936 }; 1937 1938 static int hclgevf_init(void) 1939 { 1940 pr_info("%s is initializing\n", HCLGEVF_NAME); 1941 1942 hnae3_register_ae_algo(&ae_algovf); 1943 1944 return 0; 1945 } 1946 1947 static void hclgevf_exit(void) 1948 { 1949 hnae3_unregister_ae_algo(&ae_algovf); 1950 } 1951 module_init(hclgevf_init); 1952 module_exit(hclgevf_exit); 1953 1954 MODULE_LICENSE("GPL"); 1955 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 1956 MODULE_DESCRIPTION("HCLGEVF Driver"); 1957 MODULE_VERSION(HCLGEVF_MOD_VERSION); 1958