1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <net/rtnetlink.h> 6 #include "hclgevf_cmd.h" 7 #include "hclgevf_main.h" 8 #include "hclge_mbx.h" 9 #include "hnae3.h" 10 11 #define HCLGEVF_NAME "hclgevf" 12 13 static int hclgevf_init_hdev(struct hclgevf_dev *hdev); 14 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 25 26 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 27 struct hnae3_handle *handle) 28 { 29 return container_of(handle, struct hclgevf_dev, nic); 30 } 31 32 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 33 { 34 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 35 struct hnae3_queue *queue; 36 struct hclgevf_desc desc; 37 struct hclgevf_tqp *tqp; 38 int status; 39 int i; 40 41 for (i = 0; i < hdev->num_tqps; i++) { 42 queue = handle->kinfo.tqp[i]; 43 tqp = container_of(queue, struct hclgevf_tqp, q); 44 hclgevf_cmd_setup_basic_desc(&desc, 45 HCLGEVF_OPC_QUERY_RX_STATUS, 46 true); 47 48 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 49 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 50 if (status) { 51 dev_err(&hdev->pdev->dev, 52 "Query tqp stat fail, status = %d,queue = %d\n", 53 status, i); 54 return status; 55 } 56 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 57 le32_to_cpu(desc.data[1]); 58 59 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 60 true); 61 62 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 63 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 64 if (status) { 65 dev_err(&hdev->pdev->dev, 66 "Query tqp stat fail, status = %d,queue = %d\n", 67 status, i); 68 return status; 69 } 70 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 71 le32_to_cpu(desc.data[1]); 72 } 73 74 return 0; 75 } 76 77 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 78 { 79 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 80 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 81 struct hclgevf_tqp *tqp; 82 u64 *buff = data; 83 int i; 84 85 for (i = 0; i < hdev->num_tqps; i++) { 86 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); 87 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 88 } 89 for (i = 0; i < kinfo->num_tqps; i++) { 90 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); 91 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 92 } 93 94 return buff; 95 } 96 97 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 98 { 99 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 100 101 return hdev->num_tqps * 2; 102 } 103 104 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 105 { 106 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 107 u8 *buff = data; 108 int i = 0; 109 110 for (i = 0; i < hdev->num_tqps; i++) { 111 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], 112 struct hclgevf_tqp, q); 113 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", 114 tqp->index); 115 buff += ETH_GSTRING_LEN; 116 } 117 118 for (i = 0; i < hdev->num_tqps; i++) { 119 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], 120 struct hclgevf_tqp, q); 121 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", 122 tqp->index); 123 buff += ETH_GSTRING_LEN; 124 } 125 126 return buff; 127 } 128 129 static void hclgevf_update_stats(struct hnae3_handle *handle, 130 struct net_device_stats *net_stats) 131 { 132 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 133 int status; 134 135 status = hclgevf_tqps_update_stats(handle); 136 if (status) 137 dev_err(&hdev->pdev->dev, 138 "VF update of TQPS stats fail, status = %d.\n", 139 status); 140 } 141 142 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 143 { 144 if (strset == ETH_SS_TEST) 145 return -EOPNOTSUPP; 146 else if (strset == ETH_SS_STATS) 147 return hclgevf_tqps_get_sset_count(handle, strset); 148 149 return 0; 150 } 151 152 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 153 u8 *data) 154 { 155 u8 *p = (char *)data; 156 157 if (strset == ETH_SS_STATS) 158 p = hclgevf_tqps_get_strings(handle, p); 159 } 160 161 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 162 { 163 hclgevf_tqps_get_stats(handle, data); 164 } 165 166 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 167 { 168 u8 resp_msg; 169 int status; 170 171 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 172 true, &resp_msg, sizeof(u8)); 173 if (status) { 174 dev_err(&hdev->pdev->dev, 175 "VF request to get TC info from PF failed %d", 176 status); 177 return status; 178 } 179 180 hdev->hw_tc_map = resp_msg; 181 182 return 0; 183 } 184 185 static int hclge_get_queue_info(struct hclgevf_dev *hdev) 186 { 187 #define HCLGEVF_TQPS_RSS_INFO_LEN 8 188 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 189 int status; 190 191 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 192 true, resp_msg, 193 HCLGEVF_TQPS_RSS_INFO_LEN); 194 if (status) { 195 dev_err(&hdev->pdev->dev, 196 "VF request to get tqp info from PF failed %d", 197 status); 198 return status; 199 } 200 201 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 202 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 203 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 204 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 205 206 return 0; 207 } 208 209 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 210 { 211 struct hclgevf_tqp *tqp; 212 int i; 213 214 /* if this is on going reset then we need to re-allocate the TPQs 215 * since we cannot assume we would get same number of TPQs back from PF 216 */ 217 if (hclgevf_dev_ongoing_reset(hdev)) 218 devm_kfree(&hdev->pdev->dev, hdev->htqp); 219 220 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 221 sizeof(struct hclgevf_tqp), GFP_KERNEL); 222 if (!hdev->htqp) 223 return -ENOMEM; 224 225 tqp = hdev->htqp; 226 227 for (i = 0; i < hdev->num_tqps; i++) { 228 tqp->dev = &hdev->pdev->dev; 229 tqp->index = i; 230 231 tqp->q.ae_algo = &ae_algovf; 232 tqp->q.buf_size = hdev->rx_buf_len; 233 tqp->q.desc_num = hdev->num_desc; 234 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 235 i * HCLGEVF_TQP_REG_SIZE; 236 237 tqp++; 238 } 239 240 return 0; 241 } 242 243 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 244 { 245 struct hnae3_handle *nic = &hdev->nic; 246 struct hnae3_knic_private_info *kinfo; 247 u16 new_tqps = hdev->num_tqps; 248 int i; 249 250 kinfo = &nic->kinfo; 251 kinfo->num_tc = 0; 252 kinfo->num_desc = hdev->num_desc; 253 kinfo->rx_buf_len = hdev->rx_buf_len; 254 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 255 if (hdev->hw_tc_map & BIT(i)) 256 kinfo->num_tc++; 257 258 kinfo->rss_size 259 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 260 new_tqps = kinfo->rss_size * kinfo->num_tc; 261 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 262 263 /* if this is on going reset then we need to re-allocate the hnae queues 264 * as well since number of TPQs from PF might have changed. 265 */ 266 if (hclgevf_dev_ongoing_reset(hdev)) 267 devm_kfree(&hdev->pdev->dev, kinfo->tqp); 268 269 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 270 sizeof(struct hnae3_queue *), GFP_KERNEL); 271 if (!kinfo->tqp) 272 return -ENOMEM; 273 274 for (i = 0; i < kinfo->num_tqps; i++) { 275 hdev->htqp[i].q.handle = &hdev->nic; 276 hdev->htqp[i].q.tqp_index = i; 277 kinfo->tqp[i] = &hdev->htqp[i].q; 278 } 279 280 return 0; 281 } 282 283 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 284 { 285 int status; 286 u8 resp_msg; 287 288 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 289 0, false, &resp_msg, sizeof(u8)); 290 if (status) 291 dev_err(&hdev->pdev->dev, 292 "VF failed to fetch link status(%d) from PF", status); 293 } 294 295 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 296 { 297 struct hnae3_handle *handle = &hdev->nic; 298 struct hnae3_client *client; 299 300 client = handle->client; 301 302 link_state = 303 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 304 305 if (link_state != hdev->hw.mac.link) { 306 client->ops->link_status_change(handle, !!link_state); 307 hdev->hw.mac.link = link_state; 308 } 309 } 310 311 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 312 { 313 struct hnae3_handle *nic = &hdev->nic; 314 int ret; 315 316 nic->ae_algo = &ae_algovf; 317 nic->pdev = hdev->pdev; 318 nic->numa_node_mask = hdev->numa_node_mask; 319 nic->flags |= HNAE3_SUPPORT_VF; 320 321 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 322 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 323 hdev->ae_dev->dev_type); 324 return -EINVAL; 325 } 326 327 ret = hclgevf_knic_setup(hdev); 328 if (ret) 329 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 330 ret); 331 return ret; 332 } 333 334 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 335 { 336 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 337 dev_warn(&hdev->pdev->dev, 338 "vector(vector_id %d) has been freed.\n", vector_id); 339 return; 340 } 341 342 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 343 hdev->num_msi_left += 1; 344 hdev->num_msi_used -= 1; 345 } 346 347 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 348 struct hnae3_vector_info *vector_info) 349 { 350 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 351 struct hnae3_vector_info *vector = vector_info; 352 int alloc = 0; 353 int i, j; 354 355 vector_num = min(hdev->num_msi_left, vector_num); 356 357 for (j = 0; j < vector_num; j++) { 358 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 359 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 360 vector->vector = pci_irq_vector(hdev->pdev, i); 361 vector->io_addr = hdev->hw.io_base + 362 HCLGEVF_VECTOR_REG_BASE + 363 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 364 hdev->vector_status[i] = 0; 365 hdev->vector_irq[i] = vector->vector; 366 367 vector++; 368 alloc++; 369 370 break; 371 } 372 } 373 } 374 hdev->num_msi_left -= alloc; 375 hdev->num_msi_used += alloc; 376 377 return alloc; 378 } 379 380 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 381 { 382 int i; 383 384 for (i = 0; i < hdev->num_msi; i++) 385 if (vector == hdev->vector_irq[i]) 386 return i; 387 388 return -EINVAL; 389 } 390 391 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 392 { 393 return HCLGEVF_RSS_KEY_SIZE; 394 } 395 396 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 397 { 398 return HCLGEVF_RSS_IND_TBL_SIZE; 399 } 400 401 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 402 { 403 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 404 struct hclgevf_rss_indirection_table_cmd *req; 405 struct hclgevf_desc desc; 406 int status; 407 int i, j; 408 409 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 410 411 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 412 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 413 false); 414 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 415 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 416 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 417 req->rss_result[j] = 418 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 419 420 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 421 if (status) { 422 dev_err(&hdev->pdev->dev, 423 "VF failed(=%d) to set RSS indirection table\n", 424 status); 425 return status; 426 } 427 } 428 429 return 0; 430 } 431 432 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 433 { 434 struct hclgevf_rss_tc_mode_cmd *req; 435 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 436 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 437 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 438 struct hclgevf_desc desc; 439 u16 roundup_size; 440 int status; 441 int i; 442 443 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 444 445 roundup_size = roundup_pow_of_two(rss_size); 446 roundup_size = ilog2(roundup_size); 447 448 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 449 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 450 tc_size[i] = roundup_size; 451 tc_offset[i] = rss_size * i; 452 } 453 454 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 455 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 456 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 457 (tc_valid[i] & 0x1)); 458 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 459 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 460 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 461 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 462 } 463 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 464 if (status) 465 dev_err(&hdev->pdev->dev, 466 "VF failed(=%d) to set rss tc mode\n", status); 467 468 return status; 469 } 470 471 static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash, 472 u8 *key) 473 { 474 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 475 struct hclgevf_rss_config_cmd *req; 476 int lkup_times = key ? 3 : 1; 477 struct hclgevf_desc desc; 478 int key_offset; 479 int key_size; 480 int status; 481 482 req = (struct hclgevf_rss_config_cmd *)desc.data; 483 lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0); 484 485 for (key_offset = 0; key_offset < lkup_times; key_offset++) { 486 hclgevf_cmd_setup_basic_desc(&desc, 487 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 488 true); 489 req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET); 490 491 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 492 if (status) { 493 dev_err(&hdev->pdev->dev, 494 "failed to get hardware RSS cfg, status = %d\n", 495 status); 496 return status; 497 } 498 499 if (key_offset == 2) 500 key_size = 501 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 502 else 503 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 504 505 if (key) 506 memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, 507 req->hash_key, 508 key_size); 509 } 510 511 if (hash) { 512 if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ) 513 *hash = ETH_RSS_HASH_TOP; 514 else 515 *hash = ETH_RSS_HASH_UNKNOWN; 516 } 517 518 return 0; 519 } 520 521 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 522 u8 *hfunc) 523 { 524 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 525 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 526 int i; 527 528 if (indir) 529 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 530 indir[i] = rss_cfg->rss_indirection_tbl[i]; 531 532 return hclgevf_get_rss_hw_cfg(handle, hfunc, key); 533 } 534 535 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 536 const u8 *key, const u8 hfunc) 537 { 538 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 539 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 540 int i; 541 542 /* update the shadow RSS table with user specified qids */ 543 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 544 rss_cfg->rss_indirection_tbl[i] = indir[i]; 545 546 /* update the hardware */ 547 return hclgevf_set_rss_indir_table(hdev); 548 } 549 550 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 551 { 552 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 553 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 554 555 return rss_cfg->rss_size; 556 } 557 558 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 559 int vector_id, 560 struct hnae3_ring_chain_node *ring_chain) 561 { 562 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 563 struct hnae3_ring_chain_node *node; 564 struct hclge_mbx_vf_to_pf_cmd *req; 565 struct hclgevf_desc desc; 566 int i = 0; 567 int status; 568 u8 type; 569 570 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 571 572 for (node = ring_chain; node; node = node->next) { 573 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 574 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 575 576 if (i == 0) { 577 hclgevf_cmd_setup_basic_desc(&desc, 578 HCLGEVF_OPC_MBX_VF_TO_PF, 579 false); 580 type = en ? 581 HCLGE_MBX_MAP_RING_TO_VECTOR : 582 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 583 req->msg[0] = type; 584 req->msg[1] = vector_id; 585 } 586 587 req->msg[idx_offset] = 588 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 589 req->msg[idx_offset + 1] = node->tqp_index; 590 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 591 HNAE3_RING_GL_IDX_M, 592 HNAE3_RING_GL_IDX_S); 593 594 i++; 595 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 596 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 597 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 598 !node->next) { 599 req->msg[2] = i; 600 601 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 602 if (status) { 603 dev_err(&hdev->pdev->dev, 604 "Map TQP fail, status is %d.\n", 605 status); 606 return status; 607 } 608 i = 0; 609 hclgevf_cmd_setup_basic_desc(&desc, 610 HCLGEVF_OPC_MBX_VF_TO_PF, 611 false); 612 req->msg[0] = type; 613 req->msg[1] = vector_id; 614 } 615 } 616 617 return 0; 618 } 619 620 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 621 struct hnae3_ring_chain_node *ring_chain) 622 { 623 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 624 int vector_id; 625 626 vector_id = hclgevf_get_vector_index(hdev, vector); 627 if (vector_id < 0) { 628 dev_err(&handle->pdev->dev, 629 "Get vector index fail. ret =%d\n", vector_id); 630 return vector_id; 631 } 632 633 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 634 } 635 636 static int hclgevf_unmap_ring_from_vector( 637 struct hnae3_handle *handle, 638 int vector, 639 struct hnae3_ring_chain_node *ring_chain) 640 { 641 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 642 int ret, vector_id; 643 644 vector_id = hclgevf_get_vector_index(hdev, vector); 645 if (vector_id < 0) { 646 dev_err(&handle->pdev->dev, 647 "Get vector index fail. ret =%d\n", vector_id); 648 return vector_id; 649 } 650 651 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 652 if (ret) 653 dev_err(&handle->pdev->dev, 654 "Unmap ring from vector fail. vector=%d, ret =%d\n", 655 vector_id, 656 ret); 657 658 return ret; 659 } 660 661 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 662 { 663 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 664 int vector_id; 665 666 vector_id = hclgevf_get_vector_index(hdev, vector); 667 if (vector_id < 0) { 668 dev_err(&handle->pdev->dev, 669 "hclgevf_put_vector get vector index fail. ret =%d\n", 670 vector_id); 671 return vector_id; 672 } 673 674 hclgevf_free_vector(hdev, vector_id); 675 676 return 0; 677 } 678 679 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 680 bool en_uc_pmc, bool en_mc_pmc) 681 { 682 struct hclge_mbx_vf_to_pf_cmd *req; 683 struct hclgevf_desc desc; 684 int status; 685 686 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 687 688 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 689 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 690 req->msg[1] = en_uc_pmc ? 1 : 0; 691 req->msg[2] = en_mc_pmc ? 1 : 0; 692 693 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 694 if (status) 695 dev_err(&hdev->pdev->dev, 696 "Set promisc mode fail, status is %d.\n", status); 697 698 return status; 699 } 700 701 static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, 702 bool en_uc_pmc, bool en_mc_pmc) 703 { 704 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 705 706 hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); 707 } 708 709 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 710 int stream_id, bool enable) 711 { 712 struct hclgevf_cfg_com_tqp_queue_cmd *req; 713 struct hclgevf_desc desc; 714 int status; 715 716 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 717 718 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 719 false); 720 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 721 req->stream_id = cpu_to_le16(stream_id); 722 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 723 724 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 725 if (status) 726 dev_err(&hdev->pdev->dev, 727 "TQP enable fail, status =%d.\n", status); 728 729 return status; 730 } 731 732 static int hclgevf_get_queue_id(struct hnae3_queue *queue) 733 { 734 struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q); 735 736 return tqp->index; 737 } 738 739 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 740 { 741 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 742 struct hnae3_queue *queue; 743 struct hclgevf_tqp *tqp; 744 int i; 745 746 for (i = 0; i < hdev->num_tqps; i++) { 747 queue = handle->kinfo.tqp[i]; 748 tqp = container_of(queue, struct hclgevf_tqp, q); 749 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 750 } 751 } 752 753 static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev) 754 { 755 u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX; 756 int ret; 757 758 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 759 HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, 760 NULL, 0, true, &resp_msg, sizeof(u8)); 761 762 if (ret) { 763 dev_err(&hdev->pdev->dev, 764 "Read mta type fail, ret=%d.\n", ret); 765 return ret; 766 } 767 768 if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) { 769 dev_err(&hdev->pdev->dev, 770 "Read mta type invalid, resp=%d.\n", resp_msg); 771 return -EINVAL; 772 } 773 774 hdev->mta_mac_sel_type = resp_msg; 775 776 return 0; 777 } 778 779 static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev, 780 const u8 *addr) 781 { 782 u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type; 783 u16 high_val = addr[1] | (addr[0] << 8); 784 785 return (high_val >> rsh) & 0xfff; 786 } 787 788 static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev, 789 unsigned long *status) 790 { 791 #define HCLGEVF_MTA_STATUS_MSG_SIZE 13 792 #define HCLGEVF_MTA_STATUS_MSG_BITS \ 793 (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) 794 #define HCLGEVF_MTA_STATUS_MSG_END_BITS \ 795 (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS) 796 u16 tbl_cnt; 797 u16 tbl_idx; 798 u8 msg_cnt; 799 u8 msg_idx; 800 int ret; 801 802 msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE, 803 HCLGEVF_MTA_STATUS_MSG_BITS); 804 tbl_idx = 0; 805 msg_idx = 0; 806 while (msg_cnt--) { 807 u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1]; 808 u8 *p = &msg[1]; 809 u8 msg_ofs; 810 u8 msg_bit; 811 812 memset(msg, 0, sizeof(msg)); 813 814 /* set index field */ 815 msg[0] = 0x7F & msg_idx; 816 817 /* set end flag field */ 818 if (msg_cnt == 0) { 819 msg[0] |= 0x80; 820 tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS; 821 } else { 822 tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS; 823 } 824 825 /* set status field */ 826 msg_ofs = 0; 827 msg_bit = 0; 828 while (tbl_cnt--) { 829 if (test_bit(tbl_idx, status)) 830 p[msg_ofs] |= BIT(msg_bit); 831 832 tbl_idx++; 833 834 msg_bit++; 835 if (msg_bit == BITS_PER_BYTE) { 836 msg_bit = 0; 837 msg_ofs++; 838 } 839 } 840 841 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 842 HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, 843 msg, sizeof(msg), false, NULL, 0); 844 if (ret) 845 break; 846 847 msg_idx++; 848 } 849 850 return ret; 851 } 852 853 static int hclgevf_update_mta_status(struct hnae3_handle *handle) 854 { 855 unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)]; 856 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 857 struct net_device *netdev = hdev->nic.kinfo.netdev; 858 struct netdev_hw_addr *ha; 859 u16 tbl_idx; 860 861 /* clear status */ 862 memset(mta_status, 0, sizeof(mta_status)); 863 864 /* update status from mc addr list */ 865 netdev_for_each_mc_addr(ha, netdev) { 866 tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr); 867 set_bit(tbl_idx, mta_status); 868 } 869 870 return hclgevf_do_update_mta_status(hdev, mta_status); 871 } 872 873 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 874 { 875 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 876 877 ether_addr_copy(p, hdev->hw.mac.mac_addr); 878 } 879 880 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 881 bool is_first) 882 { 883 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 884 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 885 u8 *new_mac_addr = (u8 *)p; 886 u8 msg_data[ETH_ALEN * 2]; 887 u16 subcode; 888 int status; 889 890 ether_addr_copy(msg_data, new_mac_addr); 891 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 892 893 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 894 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 895 896 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 897 subcode, msg_data, ETH_ALEN * 2, 898 true, NULL, 0); 899 if (!status) 900 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 901 902 return status; 903 } 904 905 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 906 const unsigned char *addr) 907 { 908 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 909 910 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 911 HCLGE_MBX_MAC_VLAN_UC_ADD, 912 addr, ETH_ALEN, false, NULL, 0); 913 } 914 915 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 916 const unsigned char *addr) 917 { 918 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 919 920 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 921 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 922 addr, ETH_ALEN, false, NULL, 0); 923 } 924 925 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 926 const unsigned char *addr) 927 { 928 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 929 930 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 931 HCLGE_MBX_MAC_VLAN_MC_ADD, 932 addr, ETH_ALEN, false, NULL, 0); 933 } 934 935 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 936 const unsigned char *addr) 937 { 938 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 939 940 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 941 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 942 addr, ETH_ALEN, false, NULL, 0); 943 } 944 945 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 946 __be16 proto, u16 vlan_id, 947 bool is_kill) 948 { 949 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 950 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 951 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 952 953 if (vlan_id > 4095) 954 return -EINVAL; 955 956 if (proto != htons(ETH_P_8021Q)) 957 return -EPROTONOSUPPORT; 958 959 msg_data[0] = is_kill; 960 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 961 memcpy(&msg_data[3], &proto, sizeof(proto)); 962 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 963 HCLGE_MBX_VLAN_FILTER, msg_data, 964 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 965 } 966 967 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 968 { 969 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 970 u8 msg_data; 971 972 msg_data = enable ? 1 : 0; 973 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 974 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 975 1, false, NULL, 0); 976 } 977 978 static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 979 { 980 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 981 u8 msg_data[2]; 982 int ret; 983 984 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 985 986 /* disable vf queue before send queue reset msg to PF */ 987 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 988 if (ret) 989 return; 990 991 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 992 2, true, NULL, 0); 993 } 994 995 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 996 enum hnae3_reset_notify_type type) 997 { 998 struct hnae3_client *client = hdev->nic_client; 999 struct hnae3_handle *handle = &hdev->nic; 1000 1001 if (!client->ops->reset_notify) 1002 return -EOPNOTSUPP; 1003 1004 return client->ops->reset_notify(handle, type); 1005 } 1006 1007 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1008 { 1009 #define HCLGEVF_RESET_WAIT_MS 500 1010 #define HCLGEVF_RESET_WAIT_CNT 20 1011 u32 val, cnt = 0; 1012 1013 /* wait to check the hardware reset completion status */ 1014 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 1015 while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) && 1016 (cnt < HCLGEVF_RESET_WAIT_CNT)) { 1017 msleep(HCLGEVF_RESET_WAIT_MS); 1018 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 1019 cnt++; 1020 } 1021 1022 /* hardware completion status should be available by this time */ 1023 if (cnt >= HCLGEVF_RESET_WAIT_CNT) { 1024 dev_warn(&hdev->pdev->dev, 1025 "could'nt get reset done status from h/w, timeout!\n"); 1026 return -EBUSY; 1027 } 1028 1029 /* we will wait a bit more to let reset of the stack to complete. This 1030 * might happen in case reset assertion was made by PF. Yes, this also 1031 * means we might end up waiting bit more even for VF reset. 1032 */ 1033 msleep(5000); 1034 1035 return 0; 1036 } 1037 1038 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1039 { 1040 int ret; 1041 1042 /* uninitialize the nic client */ 1043 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1044 1045 /* re-initialize the hclge device */ 1046 ret = hclgevf_init_hdev(hdev); 1047 if (ret) { 1048 dev_err(&hdev->pdev->dev, 1049 "hclge device re-init failed, VF is disabled!\n"); 1050 return ret; 1051 } 1052 1053 /* bring up the nic client again */ 1054 hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1055 1056 return 0; 1057 } 1058 1059 static int hclgevf_reset(struct hclgevf_dev *hdev) 1060 { 1061 int ret; 1062 1063 rtnl_lock(); 1064 1065 /* bring down the nic to stop any ongoing TX/RX */ 1066 hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1067 1068 /* check if VF could successfully fetch the hardware reset completion 1069 * status from the hardware 1070 */ 1071 ret = hclgevf_reset_wait(hdev); 1072 if (ret) { 1073 /* can't do much in this situation, will disable VF */ 1074 dev_err(&hdev->pdev->dev, 1075 "VF failed(=%d) to fetch H/W reset completion status\n", 1076 ret); 1077 1078 dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); 1079 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1080 1081 rtnl_unlock(); 1082 return ret; 1083 } 1084 1085 /* now, re-initialize the nic client and ae device*/ 1086 ret = hclgevf_reset_stack(hdev); 1087 if (ret) 1088 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1089 1090 /* bring up the nic to enable TX/RX again */ 1091 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1092 1093 rtnl_unlock(); 1094 1095 return ret; 1096 } 1097 1098 static int hclgevf_do_reset(struct hclgevf_dev *hdev) 1099 { 1100 int status; 1101 u8 respmsg; 1102 1103 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1104 0, false, &respmsg, sizeof(u8)); 1105 if (status) 1106 dev_err(&hdev->pdev->dev, 1107 "VF reset request to PF failed(=%d)\n", status); 1108 1109 return status; 1110 } 1111 1112 static void hclgevf_reset_event(struct hnae3_handle *handle) 1113 { 1114 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1115 1116 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1117 1118 handle->reset_level = HNAE3_VF_RESET; 1119 1120 /* reset of this VF requested */ 1121 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1122 hclgevf_reset_task_schedule(hdev); 1123 1124 handle->last_reset_time = jiffies; 1125 } 1126 1127 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1128 { 1129 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1130 1131 return hdev->fw_version; 1132 } 1133 1134 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1135 { 1136 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1137 1138 vector->vector_irq = pci_irq_vector(hdev->pdev, 1139 HCLGEVF_MISC_VECTOR_NUM); 1140 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1141 /* vector status always valid for Vector 0 */ 1142 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1143 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1144 1145 hdev->num_msi_left -= 1; 1146 hdev->num_msi_used += 1; 1147 } 1148 1149 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1150 { 1151 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1152 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1153 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1154 schedule_work(&hdev->rst_service_task); 1155 } 1156 } 1157 1158 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1159 { 1160 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1161 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1162 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1163 schedule_work(&hdev->mbx_service_task); 1164 } 1165 } 1166 1167 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1168 { 1169 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1170 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1171 schedule_work(&hdev->service_task); 1172 } 1173 1174 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1175 { 1176 /* if we have any pending mailbox event then schedule the mbx task */ 1177 if (hdev->mbx_event_pending) 1178 hclgevf_mbx_task_schedule(hdev); 1179 1180 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1181 hclgevf_reset_task_schedule(hdev); 1182 } 1183 1184 static void hclgevf_service_timer(struct timer_list *t) 1185 { 1186 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1187 1188 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1189 1190 hclgevf_task_schedule(hdev); 1191 } 1192 1193 static void hclgevf_reset_service_task(struct work_struct *work) 1194 { 1195 struct hclgevf_dev *hdev = 1196 container_of(work, struct hclgevf_dev, rst_service_task); 1197 int ret; 1198 1199 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1200 return; 1201 1202 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1203 1204 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1205 &hdev->reset_state)) { 1206 /* PF has initmated that it is about to reset the hardware. 1207 * We now have to poll & check if harware has actually completed 1208 * the reset sequence. On hardware reset completion, VF needs to 1209 * reset the client and ae device. 1210 */ 1211 hdev->reset_attempts = 0; 1212 1213 ret = hclgevf_reset(hdev); 1214 if (ret) 1215 dev_err(&hdev->pdev->dev, "VF stack reset failed.\n"); 1216 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1217 &hdev->reset_state)) { 1218 /* we could be here when either of below happens: 1219 * 1. reset was initiated due to watchdog timeout due to 1220 * a. IMP was earlier reset and our TX got choked down and 1221 * which resulted in watchdog reacting and inducing VF 1222 * reset. This also means our cmdq would be unreliable. 1223 * b. problem in TX due to other lower layer(example link 1224 * layer not functioning properly etc.) 1225 * 2. VF reset might have been initiated due to some config 1226 * change. 1227 * 1228 * NOTE: Theres no clear way to detect above cases than to react 1229 * to the response of PF for this reset request. PF will ack the 1230 * 1b and 2. cases but we will not get any intimation about 1a 1231 * from PF as cmdq would be in unreliable state i.e. mailbox 1232 * communication between PF and VF would be broken. 1233 */ 1234 1235 /* if we are never geting into pending state it means either: 1236 * 1. PF is not receiving our request which could be due to IMP 1237 * reset 1238 * 2. PF is screwed 1239 * We cannot do much for 2. but to check first we can try reset 1240 * our PCIe + stack and see if it alleviates the problem. 1241 */ 1242 if (hdev->reset_attempts > 3) { 1243 /* prepare for full reset of stack + pcie interface */ 1244 hdev->nic.reset_level = HNAE3_VF_FULL_RESET; 1245 1246 /* "defer" schedule the reset task again */ 1247 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1248 } else { 1249 hdev->reset_attempts++; 1250 1251 /* request PF for resetting this VF via mailbox */ 1252 ret = hclgevf_do_reset(hdev); 1253 if (ret) 1254 dev_warn(&hdev->pdev->dev, 1255 "VF rst fail, stack will call\n"); 1256 } 1257 } 1258 1259 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1260 } 1261 1262 static void hclgevf_mailbox_service_task(struct work_struct *work) 1263 { 1264 struct hclgevf_dev *hdev; 1265 1266 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1267 1268 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1269 return; 1270 1271 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1272 1273 hclgevf_mbx_async_handler(hdev); 1274 1275 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1276 } 1277 1278 static void hclgevf_service_task(struct work_struct *work) 1279 { 1280 struct hclgevf_dev *hdev; 1281 1282 hdev = container_of(work, struct hclgevf_dev, service_task); 1283 1284 /* request the link status from the PF. PF would be able to tell VF 1285 * about such updates in future so we might remove this later 1286 */ 1287 hclgevf_request_link_info(hdev); 1288 1289 hclgevf_deferred_task_schedule(hdev); 1290 1291 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1292 } 1293 1294 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1295 { 1296 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1297 } 1298 1299 static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) 1300 { 1301 u32 cmdq_src_reg; 1302 1303 /* fetch the events from their corresponding regs */ 1304 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1305 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1306 1307 /* check for vector0 mailbox(=CMDQ RX) event source */ 1308 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1309 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1310 *clearval = cmdq_src_reg; 1311 return true; 1312 } 1313 1314 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1315 1316 return false; 1317 } 1318 1319 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1320 { 1321 writel(en ? 1 : 0, vector->addr); 1322 } 1323 1324 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1325 { 1326 struct hclgevf_dev *hdev = data; 1327 u32 clearval; 1328 1329 hclgevf_enable_vector(&hdev->misc_vector, false); 1330 if (!hclgevf_check_event_cause(hdev, &clearval)) 1331 goto skip_sched; 1332 1333 hclgevf_mbx_handler(hdev); 1334 1335 hclgevf_clear_event_cause(hdev, clearval); 1336 1337 skip_sched: 1338 hclgevf_enable_vector(&hdev->misc_vector, true); 1339 1340 return IRQ_HANDLED; 1341 } 1342 1343 static int hclgevf_configure(struct hclgevf_dev *hdev) 1344 { 1345 int ret; 1346 1347 /* get queue configuration from PF */ 1348 ret = hclge_get_queue_info(hdev); 1349 if (ret) 1350 return ret; 1351 /* get tc configuration from PF */ 1352 return hclgevf_get_tc_info(hdev); 1353 } 1354 1355 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1356 { 1357 struct pci_dev *pdev = ae_dev->pdev; 1358 struct hclgevf_dev *hdev = ae_dev->priv; 1359 1360 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1361 if (!hdev) 1362 return -ENOMEM; 1363 1364 hdev->pdev = pdev; 1365 hdev->ae_dev = ae_dev; 1366 ae_dev->priv = hdev; 1367 1368 return 0; 1369 } 1370 1371 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1372 { 1373 struct hnae3_handle *roce = &hdev->roce; 1374 struct hnae3_handle *nic = &hdev->nic; 1375 1376 roce->rinfo.num_vectors = hdev->num_roce_msix; 1377 1378 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1379 hdev->num_msi_left == 0) 1380 return -EINVAL; 1381 1382 roce->rinfo.base_vector = hdev->roce_base_vector; 1383 1384 roce->rinfo.netdev = nic->kinfo.netdev; 1385 roce->rinfo.roce_io_base = hdev->hw.io_base; 1386 1387 roce->pdev = nic->pdev; 1388 roce->ae_algo = nic->ae_algo; 1389 roce->numa_node_mask = nic->numa_node_mask; 1390 1391 return 0; 1392 } 1393 1394 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1395 { 1396 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1397 int i, ret; 1398 1399 rss_cfg->rss_size = hdev->rss_size_max; 1400 1401 /* Initialize RSS indirect table for each vport */ 1402 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1403 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1404 1405 ret = hclgevf_set_rss_indir_table(hdev); 1406 if (ret) 1407 return ret; 1408 1409 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1410 } 1411 1412 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1413 { 1414 /* other vlan config(like, VLAN TX/RX offload) would also be added 1415 * here later 1416 */ 1417 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1418 false); 1419 } 1420 1421 static int hclgevf_ae_start(struct hnae3_handle *handle) 1422 { 1423 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1424 int i, queue_id; 1425 1426 for (i = 0; i < handle->kinfo.num_tqps; i++) { 1427 /* ring enable */ 1428 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); 1429 if (queue_id < 0) { 1430 dev_warn(&hdev->pdev->dev, 1431 "Get invalid queue id, ignore it\n"); 1432 continue; 1433 } 1434 1435 hclgevf_tqp_enable(hdev, queue_id, 0, true); 1436 } 1437 1438 /* reset tqp stats */ 1439 hclgevf_reset_tqp_stats(handle); 1440 1441 hclgevf_request_link_info(hdev); 1442 1443 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1444 mod_timer(&hdev->service_timer, jiffies + HZ); 1445 1446 return 0; 1447 } 1448 1449 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1450 { 1451 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1452 int i, queue_id; 1453 1454 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1455 1456 for (i = 0; i < hdev->num_tqps; i++) { 1457 /* Ring disable */ 1458 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); 1459 if (queue_id < 0) { 1460 dev_warn(&hdev->pdev->dev, 1461 "Get invalid queue id, ignore it\n"); 1462 continue; 1463 } 1464 1465 hclgevf_tqp_enable(hdev, queue_id, 0, false); 1466 } 1467 1468 /* reset tqp stats */ 1469 hclgevf_reset_tqp_stats(handle); 1470 del_timer_sync(&hdev->service_timer); 1471 cancel_work_sync(&hdev->service_task); 1472 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1473 hclgevf_update_link_status(hdev, 0); 1474 } 1475 1476 static void hclgevf_state_init(struct hclgevf_dev *hdev) 1477 { 1478 /* if this is on going reset then skip this initialization */ 1479 if (hclgevf_dev_ongoing_reset(hdev)) 1480 return; 1481 1482 /* setup tasks for the MBX */ 1483 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1484 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1485 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1486 1487 /* setup tasks for service timer */ 1488 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1489 1490 INIT_WORK(&hdev->service_task, hclgevf_service_task); 1491 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1492 1493 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 1494 1495 mutex_init(&hdev->mbx_resp.mbx_mutex); 1496 1497 /* bring the device down */ 1498 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1499 } 1500 1501 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1502 { 1503 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1504 1505 if (hdev->service_timer.function) 1506 del_timer_sync(&hdev->service_timer); 1507 if (hdev->service_task.func) 1508 cancel_work_sync(&hdev->service_task); 1509 if (hdev->mbx_service_task.func) 1510 cancel_work_sync(&hdev->mbx_service_task); 1511 if (hdev->rst_service_task.func) 1512 cancel_work_sync(&hdev->rst_service_task); 1513 1514 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1515 } 1516 1517 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1518 { 1519 struct pci_dev *pdev = hdev->pdev; 1520 int vectors; 1521 int i; 1522 1523 /* if this is on going reset then skip this initialization */ 1524 if (hclgevf_dev_ongoing_reset(hdev)) 1525 return 0; 1526 1527 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 1528 vectors = pci_alloc_irq_vectors(pdev, 1529 hdev->roce_base_msix_offset + 1, 1530 hdev->num_msi, 1531 PCI_IRQ_MSIX); 1532 else 1533 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1534 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1535 1536 if (vectors < 0) { 1537 dev_err(&pdev->dev, 1538 "failed(%d) to allocate MSI/MSI-X vectors\n", 1539 vectors); 1540 return vectors; 1541 } 1542 if (vectors < hdev->num_msi) 1543 dev_warn(&hdev->pdev->dev, 1544 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1545 hdev->num_msi, vectors); 1546 1547 hdev->num_msi = vectors; 1548 hdev->num_msi_left = vectors; 1549 hdev->base_msi_vector = pdev->irq; 1550 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 1551 1552 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1553 sizeof(u16), GFP_KERNEL); 1554 if (!hdev->vector_status) { 1555 pci_free_irq_vectors(pdev); 1556 return -ENOMEM; 1557 } 1558 1559 for (i = 0; i < hdev->num_msi; i++) 1560 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 1561 1562 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 1563 sizeof(int), GFP_KERNEL); 1564 if (!hdev->vector_irq) { 1565 pci_free_irq_vectors(pdev); 1566 return -ENOMEM; 1567 } 1568 1569 return 0; 1570 } 1571 1572 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 1573 { 1574 struct pci_dev *pdev = hdev->pdev; 1575 1576 pci_free_irq_vectors(pdev); 1577 } 1578 1579 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 1580 { 1581 int ret = 0; 1582 1583 /* if this is on going reset then skip this initialization */ 1584 if (hclgevf_dev_ongoing_reset(hdev)) 1585 return 0; 1586 1587 hclgevf_get_misc_vector(hdev); 1588 1589 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 1590 0, "hclgevf_cmd", hdev); 1591 if (ret) { 1592 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 1593 hdev->misc_vector.vector_irq); 1594 return ret; 1595 } 1596 1597 hclgevf_clear_event_cause(hdev, 0); 1598 1599 /* enable misc. vector(vector 0) */ 1600 hclgevf_enable_vector(&hdev->misc_vector, true); 1601 1602 return ret; 1603 } 1604 1605 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 1606 { 1607 /* disable misc vector(vector 0) */ 1608 hclgevf_enable_vector(&hdev->misc_vector, false); 1609 synchronize_irq(hdev->misc_vector.vector_irq); 1610 free_irq(hdev->misc_vector.vector_irq, hdev); 1611 hclgevf_free_vector(hdev, 0); 1612 } 1613 1614 static int hclgevf_init_client_instance(struct hnae3_client *client, 1615 struct hnae3_ae_dev *ae_dev) 1616 { 1617 struct hclgevf_dev *hdev = ae_dev->priv; 1618 int ret; 1619 1620 switch (client->type) { 1621 case HNAE3_CLIENT_KNIC: 1622 hdev->nic_client = client; 1623 hdev->nic.client = client; 1624 1625 ret = client->ops->init_instance(&hdev->nic); 1626 if (ret) 1627 goto clear_nic; 1628 1629 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 1630 struct hnae3_client *rc = hdev->roce_client; 1631 1632 ret = hclgevf_init_roce_base_info(hdev); 1633 if (ret) 1634 goto clear_roce; 1635 ret = rc->ops->init_instance(&hdev->roce); 1636 if (ret) 1637 goto clear_roce; 1638 } 1639 break; 1640 case HNAE3_CLIENT_UNIC: 1641 hdev->nic_client = client; 1642 hdev->nic.client = client; 1643 1644 ret = client->ops->init_instance(&hdev->nic); 1645 if (ret) 1646 goto clear_nic; 1647 break; 1648 case HNAE3_CLIENT_ROCE: 1649 if (hnae3_dev_roce_supported(hdev)) { 1650 hdev->roce_client = client; 1651 hdev->roce.client = client; 1652 } 1653 1654 if (hdev->roce_client && hdev->nic_client) { 1655 ret = hclgevf_init_roce_base_info(hdev); 1656 if (ret) 1657 goto clear_roce; 1658 1659 ret = client->ops->init_instance(&hdev->roce); 1660 if (ret) 1661 goto clear_roce; 1662 } 1663 } 1664 1665 return 0; 1666 1667 clear_nic: 1668 hdev->nic_client = NULL; 1669 hdev->nic.client = NULL; 1670 return ret; 1671 clear_roce: 1672 hdev->roce_client = NULL; 1673 hdev->roce.client = NULL; 1674 return ret; 1675 } 1676 1677 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 1678 struct hnae3_ae_dev *ae_dev) 1679 { 1680 struct hclgevf_dev *hdev = ae_dev->priv; 1681 1682 /* un-init roce, if it exists */ 1683 if (hdev->roce_client) { 1684 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 1685 hdev->roce_client = NULL; 1686 hdev->roce.client = NULL; 1687 } 1688 1689 /* un-init nic/unic, if this was not called by roce client */ 1690 if (client->ops->uninit_instance && hdev->nic_client && 1691 client->type != HNAE3_CLIENT_ROCE) { 1692 client->ops->uninit_instance(&hdev->nic, 0); 1693 hdev->nic_client = NULL; 1694 hdev->nic.client = NULL; 1695 } 1696 } 1697 1698 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 1699 { 1700 struct pci_dev *pdev = hdev->pdev; 1701 struct hclgevf_hw *hw; 1702 int ret; 1703 1704 /* check if we need to skip initialization of pci. This will happen if 1705 * device is undergoing VF reset. Otherwise, we would need to 1706 * re-initialize pci interface again i.e. when device is not going 1707 * through *any* reset or actually undergoing full reset. 1708 */ 1709 if (hclgevf_dev_ongoing_reset(hdev)) 1710 return 0; 1711 1712 ret = pci_enable_device(pdev); 1713 if (ret) { 1714 dev_err(&pdev->dev, "failed to enable PCI device\n"); 1715 return ret; 1716 } 1717 1718 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1719 if (ret) { 1720 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 1721 goto err_disable_device; 1722 } 1723 1724 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 1725 if (ret) { 1726 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 1727 goto err_disable_device; 1728 } 1729 1730 pci_set_master(pdev); 1731 hw = &hdev->hw; 1732 hw->hdev = hdev; 1733 hw->io_base = pci_iomap(pdev, 2, 0); 1734 if (!hw->io_base) { 1735 dev_err(&pdev->dev, "can't map configuration register space\n"); 1736 ret = -ENOMEM; 1737 goto err_clr_master; 1738 } 1739 1740 return 0; 1741 1742 err_clr_master: 1743 pci_clear_master(pdev); 1744 pci_release_regions(pdev); 1745 err_disable_device: 1746 pci_disable_device(pdev); 1747 1748 return ret; 1749 } 1750 1751 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 1752 { 1753 struct pci_dev *pdev = hdev->pdev; 1754 1755 pci_iounmap(pdev, hdev->hw.io_base); 1756 pci_clear_master(pdev); 1757 pci_release_regions(pdev); 1758 pci_disable_device(pdev); 1759 } 1760 1761 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 1762 { 1763 struct hclgevf_query_res_cmd *req; 1764 struct hclgevf_desc desc; 1765 int ret; 1766 1767 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 1768 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1769 if (ret) { 1770 dev_err(&hdev->pdev->dev, 1771 "query vf resource failed, ret = %d.\n", ret); 1772 return ret; 1773 } 1774 1775 req = (struct hclgevf_query_res_cmd *)desc.data; 1776 1777 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 1778 hdev->roce_base_msix_offset = 1779 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 1780 HCLGEVF_MSIX_OFT_ROCEE_M, 1781 HCLGEVF_MSIX_OFT_ROCEE_S); 1782 hdev->num_roce_msix = 1783 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 1784 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 1785 1786 /* VF should have NIC vectors and Roce vectors, NIC vectors 1787 * are queued before Roce vectors. The offset is fixed to 64. 1788 */ 1789 hdev->num_msi = hdev->num_roce_msix + 1790 hdev->roce_base_msix_offset; 1791 } else { 1792 hdev->num_msi = 1793 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 1794 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 1795 } 1796 1797 return 0; 1798 } 1799 1800 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 1801 { 1802 struct pci_dev *pdev = hdev->pdev; 1803 int ret; 1804 1805 /* check if device is on-going full reset(i.e. pcie as well) */ 1806 if (hclgevf_dev_ongoing_full_reset(hdev)) { 1807 dev_warn(&pdev->dev, "device is going full reset\n"); 1808 hclgevf_uninit_hdev(hdev); 1809 } 1810 1811 ret = hclgevf_pci_init(hdev); 1812 if (ret) { 1813 dev_err(&pdev->dev, "PCI initialization failed\n"); 1814 return ret; 1815 } 1816 1817 ret = hclgevf_cmd_init(hdev); 1818 if (ret) 1819 goto err_cmd_init; 1820 1821 /* Get vf resource */ 1822 ret = hclgevf_query_vf_resource(hdev); 1823 if (ret) { 1824 dev_err(&hdev->pdev->dev, 1825 "Query vf status error, ret = %d.\n", ret); 1826 goto err_query_vf; 1827 } 1828 1829 ret = hclgevf_init_msi(hdev); 1830 if (ret) { 1831 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 1832 goto err_query_vf; 1833 } 1834 1835 hclgevf_state_init(hdev); 1836 1837 ret = hclgevf_misc_irq_init(hdev); 1838 if (ret) { 1839 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 1840 ret); 1841 goto err_misc_irq_init; 1842 } 1843 1844 ret = hclgevf_configure(hdev); 1845 if (ret) { 1846 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 1847 goto err_config; 1848 } 1849 1850 ret = hclgevf_alloc_tqps(hdev); 1851 if (ret) { 1852 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 1853 goto err_config; 1854 } 1855 1856 ret = hclgevf_set_handle_info(hdev); 1857 if (ret) { 1858 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 1859 goto err_config; 1860 } 1861 1862 /* Initialize mta type for this VF */ 1863 ret = hclgevf_cfg_func_mta_type(hdev); 1864 if (ret) { 1865 dev_err(&hdev->pdev->dev, 1866 "failed(%d) to initialize MTA type\n", ret); 1867 goto err_config; 1868 } 1869 1870 /* Initialize RSS for this VF */ 1871 ret = hclgevf_rss_init_hw(hdev); 1872 if (ret) { 1873 dev_err(&hdev->pdev->dev, 1874 "failed(%d) to initialize RSS\n", ret); 1875 goto err_config; 1876 } 1877 1878 ret = hclgevf_init_vlan_config(hdev); 1879 if (ret) { 1880 dev_err(&hdev->pdev->dev, 1881 "failed(%d) to initialize VLAN config\n", ret); 1882 goto err_config; 1883 } 1884 1885 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 1886 1887 return 0; 1888 1889 err_config: 1890 hclgevf_misc_irq_uninit(hdev); 1891 err_misc_irq_init: 1892 hclgevf_state_uninit(hdev); 1893 hclgevf_uninit_msi(hdev); 1894 err_query_vf: 1895 hclgevf_cmd_uninit(hdev); 1896 err_cmd_init: 1897 hclgevf_pci_uninit(hdev); 1898 return ret; 1899 } 1900 1901 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 1902 { 1903 hclgevf_state_uninit(hdev); 1904 hclgevf_misc_irq_uninit(hdev); 1905 hclgevf_cmd_uninit(hdev); 1906 hclgevf_uninit_msi(hdev); 1907 hclgevf_pci_uninit(hdev); 1908 } 1909 1910 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 1911 { 1912 struct pci_dev *pdev = ae_dev->pdev; 1913 int ret; 1914 1915 ret = hclgevf_alloc_hdev(ae_dev); 1916 if (ret) { 1917 dev_err(&pdev->dev, "hclge device allocation failed\n"); 1918 return ret; 1919 } 1920 1921 ret = hclgevf_init_hdev(ae_dev->priv); 1922 if (ret) 1923 dev_err(&pdev->dev, "hclge device initialization failed\n"); 1924 1925 return ret; 1926 } 1927 1928 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 1929 { 1930 struct hclgevf_dev *hdev = ae_dev->priv; 1931 1932 hclgevf_uninit_hdev(hdev); 1933 ae_dev->priv = NULL; 1934 } 1935 1936 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 1937 { 1938 struct hnae3_handle *nic = &hdev->nic; 1939 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1940 1941 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 1942 } 1943 1944 /** 1945 * hclgevf_get_channels - Get the current channels enabled and max supported. 1946 * @handle: hardware information for network interface 1947 * @ch: ethtool channels structure 1948 * 1949 * We don't support separate tx and rx queues as channels. The other count 1950 * represents how many queues are being used for control. max_combined counts 1951 * how many queue pairs we can support. They may not be mapped 1 to 1 with 1952 * q_vectors since we support a lot more queue pairs than q_vectors. 1953 **/ 1954 static void hclgevf_get_channels(struct hnae3_handle *handle, 1955 struct ethtool_channels *ch) 1956 { 1957 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1958 1959 ch->max_combined = hclgevf_get_max_channels(hdev); 1960 ch->other_count = 0; 1961 ch->max_other = 0; 1962 ch->combined_count = hdev->num_tqps; 1963 } 1964 1965 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 1966 u16 *free_tqps, u16 *max_rss_size) 1967 { 1968 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1969 1970 *free_tqps = 0; 1971 *max_rss_size = hdev->rss_size_max; 1972 } 1973 1974 static int hclgevf_get_status(struct hnae3_handle *handle) 1975 { 1976 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1977 1978 return hdev->hw.mac.link; 1979 } 1980 1981 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 1982 u8 *auto_neg, u32 *speed, 1983 u8 *duplex) 1984 { 1985 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1986 1987 if (speed) 1988 *speed = hdev->hw.mac.speed; 1989 if (duplex) 1990 *duplex = hdev->hw.mac.duplex; 1991 if (auto_neg) 1992 *auto_neg = AUTONEG_DISABLE; 1993 } 1994 1995 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 1996 u8 duplex) 1997 { 1998 hdev->hw.mac.speed = speed; 1999 hdev->hw.mac.duplex = duplex; 2000 } 2001 2002 static const struct hnae3_ae_ops hclgevf_ops = { 2003 .init_ae_dev = hclgevf_init_ae_dev, 2004 .uninit_ae_dev = hclgevf_uninit_ae_dev, 2005 .init_client_instance = hclgevf_init_client_instance, 2006 .uninit_client_instance = hclgevf_uninit_client_instance, 2007 .start = hclgevf_ae_start, 2008 .stop = hclgevf_ae_stop, 2009 .map_ring_to_vector = hclgevf_map_ring_to_vector, 2010 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2011 .get_vector = hclgevf_get_vector, 2012 .put_vector = hclgevf_put_vector, 2013 .reset_queue = hclgevf_reset_tqp, 2014 .set_promisc_mode = hclgevf_set_promisc_mode, 2015 .get_mac_addr = hclgevf_get_mac_addr, 2016 .set_mac_addr = hclgevf_set_mac_addr, 2017 .add_uc_addr = hclgevf_add_uc_addr, 2018 .rm_uc_addr = hclgevf_rm_uc_addr, 2019 .add_mc_addr = hclgevf_add_mc_addr, 2020 .rm_mc_addr = hclgevf_rm_mc_addr, 2021 .update_mta_status = hclgevf_update_mta_status, 2022 .get_stats = hclgevf_get_stats, 2023 .update_stats = hclgevf_update_stats, 2024 .get_strings = hclgevf_get_strings, 2025 .get_sset_count = hclgevf_get_sset_count, 2026 .get_rss_key_size = hclgevf_get_rss_key_size, 2027 .get_rss_indir_size = hclgevf_get_rss_indir_size, 2028 .get_rss = hclgevf_get_rss, 2029 .set_rss = hclgevf_set_rss, 2030 .get_tc_size = hclgevf_get_tc_size, 2031 .get_fw_version = hclgevf_get_fw_version, 2032 .set_vlan_filter = hclgevf_set_vlan_filter, 2033 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 2034 .reset_event = hclgevf_reset_event, 2035 .get_channels = hclgevf_get_channels, 2036 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2037 .get_status = hclgevf_get_status, 2038 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2039 }; 2040 2041 static struct hnae3_ae_algo ae_algovf = { 2042 .ops = &hclgevf_ops, 2043 .pdev_id_table = ae_algovf_pci_tbl, 2044 }; 2045 2046 static int hclgevf_init(void) 2047 { 2048 pr_info("%s is initializing\n", HCLGEVF_NAME); 2049 2050 hnae3_register_ae_algo(&ae_algovf); 2051 2052 return 0; 2053 } 2054 2055 static void hclgevf_exit(void) 2056 { 2057 hnae3_unregister_ae_algo(&ae_algovf); 2058 } 2059 module_init(hclgevf_init); 2060 module_exit(hclgevf_exit); 2061 2062 MODULE_LICENSE("GPL"); 2063 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2064 MODULE_DESCRIPTION("HCLGEVF Driver"); 2065 MODULE_VERSION(HCLGEVF_MOD_VERSION); 2066