1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include "hclgevf_cmd.h" 6 #include "hclgevf_main.h" 7 #include "hclge_mbx.h" 8 #include "hnae3.h" 9 10 #define HCLGEVF_NAME "hclgevf" 11 12 static struct hnae3_ae_algo ae_algovf; 13 14 static const struct pci_device_id ae_algovf_pci_tbl[] = { 15 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 16 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 17 /* required last entry */ 18 {0, } 19 }; 20 21 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 22 struct hnae3_handle *handle) 23 { 24 return container_of(handle, struct hclgevf_dev, nic); 25 } 26 27 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 28 { 29 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 30 struct hnae3_queue *queue; 31 struct hclgevf_desc desc; 32 struct hclgevf_tqp *tqp; 33 int status; 34 int i; 35 36 for (i = 0; i < hdev->num_tqps; i++) { 37 queue = handle->kinfo.tqp[i]; 38 tqp = container_of(queue, struct hclgevf_tqp, q); 39 hclgevf_cmd_setup_basic_desc(&desc, 40 HCLGEVF_OPC_QUERY_RX_STATUS, 41 true); 42 43 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 44 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 45 if (status) { 46 dev_err(&hdev->pdev->dev, 47 "Query tqp stat fail, status = %d,queue = %d\n", 48 status, i); 49 return status; 50 } 51 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 52 le32_to_cpu(desc.data[1]); 53 54 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 55 true); 56 57 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 58 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 59 if (status) { 60 dev_err(&hdev->pdev->dev, 61 "Query tqp stat fail, status = %d,queue = %d\n", 62 status, i); 63 return status; 64 } 65 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 66 le32_to_cpu(desc.data[1]); 67 } 68 69 return 0; 70 } 71 72 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 73 { 74 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 75 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 76 struct hclgevf_tqp *tqp; 77 u64 *buff = data; 78 int i; 79 80 for (i = 0; i < hdev->num_tqps; i++) { 81 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); 82 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 83 } 84 for (i = 0; i < kinfo->num_tqps; i++) { 85 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); 86 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 87 } 88 89 return buff; 90 } 91 92 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 93 { 94 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 95 96 return hdev->num_tqps * 2; 97 } 98 99 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 100 { 101 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 102 u8 *buff = data; 103 int i = 0; 104 105 for (i = 0; i < hdev->num_tqps; i++) { 106 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], 107 struct hclgevf_tqp, q); 108 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", 109 tqp->index); 110 buff += ETH_GSTRING_LEN; 111 } 112 113 for (i = 0; i < hdev->num_tqps; i++) { 114 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], 115 struct hclgevf_tqp, q); 116 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", 117 tqp->index); 118 buff += ETH_GSTRING_LEN; 119 } 120 121 return buff; 122 } 123 124 static void hclgevf_update_stats(struct hnae3_handle *handle, 125 struct net_device_stats *net_stats) 126 { 127 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 128 int status; 129 130 status = hclgevf_tqps_update_stats(handle); 131 if (status) 132 dev_err(&hdev->pdev->dev, 133 "VF update of TQPS stats fail, status = %d.\n", 134 status); 135 } 136 137 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 138 { 139 if (strset == ETH_SS_TEST) 140 return -EOPNOTSUPP; 141 else if (strset == ETH_SS_STATS) 142 return hclgevf_tqps_get_sset_count(handle, strset); 143 144 return 0; 145 } 146 147 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 148 u8 *data) 149 { 150 u8 *p = (char *)data; 151 152 if (strset == ETH_SS_STATS) 153 p = hclgevf_tqps_get_strings(handle, p); 154 } 155 156 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 157 { 158 hclgevf_tqps_get_stats(handle, data); 159 } 160 161 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 162 { 163 u8 resp_msg; 164 int status; 165 166 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 167 true, &resp_msg, sizeof(u8)); 168 if (status) { 169 dev_err(&hdev->pdev->dev, 170 "VF request to get TC info from PF failed %d", 171 status); 172 return status; 173 } 174 175 hdev->hw_tc_map = resp_msg; 176 177 return 0; 178 } 179 180 static int hclge_get_queue_info(struct hclgevf_dev *hdev) 181 { 182 #define HCLGEVF_TQPS_RSS_INFO_LEN 8 183 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 184 int status; 185 186 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 187 true, resp_msg, 188 HCLGEVF_TQPS_RSS_INFO_LEN); 189 if (status) { 190 dev_err(&hdev->pdev->dev, 191 "VF request to get tqp info from PF failed %d", 192 status); 193 return status; 194 } 195 196 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 197 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 198 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 199 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 200 201 return 0; 202 } 203 204 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 205 { 206 struct hclgevf_tqp *tqp; 207 int i; 208 209 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 210 sizeof(struct hclgevf_tqp), GFP_KERNEL); 211 if (!hdev->htqp) 212 return -ENOMEM; 213 214 tqp = hdev->htqp; 215 216 for (i = 0; i < hdev->num_tqps; i++) { 217 tqp->dev = &hdev->pdev->dev; 218 tqp->index = i; 219 220 tqp->q.ae_algo = &ae_algovf; 221 tqp->q.buf_size = hdev->rx_buf_len; 222 tqp->q.desc_num = hdev->num_desc; 223 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 224 i * HCLGEVF_TQP_REG_SIZE; 225 226 tqp++; 227 } 228 229 return 0; 230 } 231 232 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 233 { 234 struct hnae3_handle *nic = &hdev->nic; 235 struct hnae3_knic_private_info *kinfo; 236 u16 new_tqps = hdev->num_tqps; 237 int i; 238 239 kinfo = &nic->kinfo; 240 kinfo->num_tc = 0; 241 kinfo->num_desc = hdev->num_desc; 242 kinfo->rx_buf_len = hdev->rx_buf_len; 243 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 244 if (hdev->hw_tc_map & BIT(i)) 245 kinfo->num_tc++; 246 247 kinfo->rss_size 248 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 249 new_tqps = kinfo->rss_size * kinfo->num_tc; 250 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 251 252 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 253 sizeof(struct hnae3_queue *), GFP_KERNEL); 254 if (!kinfo->tqp) 255 return -ENOMEM; 256 257 for (i = 0; i < kinfo->num_tqps; i++) { 258 hdev->htqp[i].q.handle = &hdev->nic; 259 hdev->htqp[i].q.tqp_index = i; 260 kinfo->tqp[i] = &hdev->htqp[i].q; 261 } 262 263 return 0; 264 } 265 266 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 267 { 268 int status; 269 u8 resp_msg; 270 271 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 272 0, false, &resp_msg, sizeof(u8)); 273 if (status) 274 dev_err(&hdev->pdev->dev, 275 "VF failed to fetch link status(%d) from PF", status); 276 } 277 278 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 279 { 280 struct hnae3_handle *handle = &hdev->nic; 281 struct hnae3_client *client; 282 283 client = handle->client; 284 285 if (link_state != hdev->hw.mac.link) { 286 client->ops->link_status_change(handle, !!link_state); 287 hdev->hw.mac.link = link_state; 288 } 289 } 290 291 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 292 { 293 struct hnae3_handle *nic = &hdev->nic; 294 int ret; 295 296 nic->ae_algo = &ae_algovf; 297 nic->pdev = hdev->pdev; 298 nic->numa_node_mask = hdev->numa_node_mask; 299 nic->flags |= HNAE3_SUPPORT_VF; 300 301 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 302 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 303 hdev->ae_dev->dev_type); 304 return -EINVAL; 305 } 306 307 ret = hclgevf_knic_setup(hdev); 308 if (ret) 309 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 310 ret); 311 return ret; 312 } 313 314 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 315 { 316 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 317 hdev->num_msi_left += 1; 318 hdev->num_msi_used -= 1; 319 } 320 321 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 322 struct hnae3_vector_info *vector_info) 323 { 324 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 325 struct hnae3_vector_info *vector = vector_info; 326 int alloc = 0; 327 int i, j; 328 329 vector_num = min(hdev->num_msi_left, vector_num); 330 331 for (j = 0; j < vector_num; j++) { 332 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 333 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 334 vector->vector = pci_irq_vector(hdev->pdev, i); 335 vector->io_addr = hdev->hw.io_base + 336 HCLGEVF_VECTOR_REG_BASE + 337 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 338 hdev->vector_status[i] = 0; 339 hdev->vector_irq[i] = vector->vector; 340 341 vector++; 342 alloc++; 343 344 break; 345 } 346 } 347 } 348 hdev->num_msi_left -= alloc; 349 hdev->num_msi_used += alloc; 350 351 return alloc; 352 } 353 354 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 355 { 356 int i; 357 358 for (i = 0; i < hdev->num_msi; i++) 359 if (vector == hdev->vector_irq[i]) 360 return i; 361 362 return -EINVAL; 363 } 364 365 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 366 { 367 return HCLGEVF_RSS_KEY_SIZE; 368 } 369 370 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 371 { 372 return HCLGEVF_RSS_IND_TBL_SIZE; 373 } 374 375 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 376 { 377 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 378 struct hclgevf_rss_indirection_table_cmd *req; 379 struct hclgevf_desc desc; 380 int status; 381 int i, j; 382 383 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 384 385 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 386 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 387 false); 388 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 389 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 390 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 391 req->rss_result[j] = 392 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 393 394 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 395 if (status) { 396 dev_err(&hdev->pdev->dev, 397 "VF failed(=%d) to set RSS indirection table\n", 398 status); 399 return status; 400 } 401 } 402 403 return 0; 404 } 405 406 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 407 { 408 struct hclgevf_rss_tc_mode_cmd *req; 409 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 410 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 411 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 412 struct hclgevf_desc desc; 413 u16 roundup_size; 414 int status; 415 int i; 416 417 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 418 419 roundup_size = roundup_pow_of_two(rss_size); 420 roundup_size = ilog2(roundup_size); 421 422 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 423 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 424 tc_size[i] = roundup_size; 425 tc_offset[i] = rss_size * i; 426 } 427 428 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 429 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 430 hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 431 (tc_valid[i] & 0x1)); 432 hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 433 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 434 hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 435 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 436 } 437 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 438 if (status) 439 dev_err(&hdev->pdev->dev, 440 "VF failed(=%d) to set rss tc mode\n", status); 441 442 return status; 443 } 444 445 static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash, 446 u8 *key) 447 { 448 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 449 struct hclgevf_rss_config_cmd *req; 450 int lkup_times = key ? 3 : 1; 451 struct hclgevf_desc desc; 452 int key_offset; 453 int key_size; 454 int status; 455 456 req = (struct hclgevf_rss_config_cmd *)desc.data; 457 lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0); 458 459 for (key_offset = 0; key_offset < lkup_times; key_offset++) { 460 hclgevf_cmd_setup_basic_desc(&desc, 461 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 462 true); 463 req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET); 464 465 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 466 if (status) { 467 dev_err(&hdev->pdev->dev, 468 "failed to get hardware RSS cfg, status = %d\n", 469 status); 470 return status; 471 } 472 473 if (key_offset == 2) 474 key_size = 475 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 476 else 477 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 478 479 if (key) 480 memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, 481 req->hash_key, 482 key_size); 483 } 484 485 if (hash) { 486 if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ) 487 *hash = ETH_RSS_HASH_TOP; 488 else 489 *hash = ETH_RSS_HASH_UNKNOWN; 490 } 491 492 return 0; 493 } 494 495 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 496 u8 *hfunc) 497 { 498 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 499 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 500 int i; 501 502 if (indir) 503 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 504 indir[i] = rss_cfg->rss_indirection_tbl[i]; 505 506 return hclgevf_get_rss_hw_cfg(handle, hfunc, key); 507 } 508 509 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 510 const u8 *key, const u8 hfunc) 511 { 512 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 513 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 514 int i; 515 516 /* update the shadow RSS table with user specified qids */ 517 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 518 rss_cfg->rss_indirection_tbl[i] = indir[i]; 519 520 /* update the hardware */ 521 return hclgevf_set_rss_indir_table(hdev); 522 } 523 524 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 525 { 526 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 527 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 528 529 return rss_cfg->rss_size; 530 } 531 532 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 533 int vector, 534 struct hnae3_ring_chain_node *ring_chain) 535 { 536 #define HCLGEVF_RING_NODE_VARIABLE_NUM 3 537 #define HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM 3 538 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 539 struct hnae3_ring_chain_node *node; 540 struct hclge_mbx_vf_to_pf_cmd *req; 541 struct hclgevf_desc desc; 542 int i, vector_id; 543 int status; 544 u8 type; 545 546 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 547 vector_id = hclgevf_get_vector_index(hdev, vector); 548 if (vector_id < 0) { 549 dev_err(&handle->pdev->dev, 550 "Get vector index fail. ret =%d\n", vector_id); 551 return vector_id; 552 } 553 554 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 555 type = en ? 556 HCLGE_MBX_MAP_RING_TO_VECTOR : HCLGE_MBX_UNMAP_RING_TO_VECTOR; 557 req->msg[0] = type; 558 req->msg[1] = vector_id; /* vector_id should be id in VF */ 559 560 i = 0; 561 for (node = ring_chain; node; node = node->next) { 562 i++; 563 /* msg[2] is cause num */ 564 req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i] = 565 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B); 566 req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 1] = 567 node->tqp_index; 568 req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 2] = 569 hnae_get_field(node->int_gl_idx, 570 HNAE3_RING_GL_IDX_M, 571 HNAE3_RING_GL_IDX_S); 572 573 if (i == (HCLGE_MBX_VF_MSG_DATA_NUM - 574 HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM) / 575 HCLGEVF_RING_NODE_VARIABLE_NUM) { 576 req->msg[2] = i; 577 578 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 579 if (status) { 580 dev_err(&hdev->pdev->dev, 581 "Map TQP fail, status is %d.\n", 582 status); 583 return status; 584 } 585 i = 0; 586 hclgevf_cmd_setup_basic_desc(&desc, 587 HCLGEVF_OPC_MBX_VF_TO_PF, 588 false); 589 req->msg[0] = type; 590 req->msg[1] = vector_id; 591 } 592 } 593 594 if (i > 0) { 595 req->msg[2] = i; 596 597 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 598 if (status) { 599 dev_err(&hdev->pdev->dev, 600 "Map TQP fail, status is %d.\n", status); 601 return status; 602 } 603 } 604 605 return 0; 606 } 607 608 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 609 struct hnae3_ring_chain_node *ring_chain) 610 { 611 return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain); 612 } 613 614 static int hclgevf_unmap_ring_from_vector( 615 struct hnae3_handle *handle, 616 int vector, 617 struct hnae3_ring_chain_node *ring_chain) 618 { 619 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 620 int ret, vector_id; 621 622 vector_id = hclgevf_get_vector_index(hdev, vector); 623 if (vector_id < 0) { 624 dev_err(&handle->pdev->dev, 625 "Get vector index fail. ret =%d\n", vector_id); 626 return vector_id; 627 } 628 629 ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain); 630 if (ret) { 631 dev_err(&handle->pdev->dev, 632 "Unmap ring from vector fail. vector=%d, ret =%d\n", 633 vector_id, 634 ret); 635 return ret; 636 } 637 638 hclgevf_free_vector(hdev, vector); 639 640 return 0; 641 } 642 643 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en) 644 { 645 struct hclge_mbx_vf_to_pf_cmd *req; 646 struct hclgevf_desc desc; 647 int status; 648 649 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 650 651 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 652 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 653 req->msg[1] = en; 654 655 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 656 if (status) 657 dev_err(&hdev->pdev->dev, 658 "Set promisc mode fail, status is %d.\n", status); 659 660 return status; 661 } 662 663 static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en) 664 { 665 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 666 667 hclgevf_cmd_set_promisc_mode(hdev, en); 668 } 669 670 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 671 int stream_id, bool enable) 672 { 673 struct hclgevf_cfg_com_tqp_queue_cmd *req; 674 struct hclgevf_desc desc; 675 int status; 676 677 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 678 679 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 680 false); 681 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 682 req->stream_id = cpu_to_le16(stream_id); 683 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 684 685 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 686 if (status) 687 dev_err(&hdev->pdev->dev, 688 "TQP enable fail, status =%d.\n", status); 689 690 return status; 691 } 692 693 static int hclgevf_get_queue_id(struct hnae3_queue *queue) 694 { 695 struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q); 696 697 return tqp->index; 698 } 699 700 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 701 { 702 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 703 struct hnae3_queue *queue; 704 struct hclgevf_tqp *tqp; 705 int i; 706 707 for (i = 0; i < hdev->num_tqps; i++) { 708 queue = handle->kinfo.tqp[i]; 709 tqp = container_of(queue, struct hclgevf_tqp, q); 710 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 711 } 712 } 713 714 static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en) 715 { 716 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 717 u8 msg[2] = {0}; 718 719 msg[0] = en; 720 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 721 HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, 722 msg, 1, false, NULL, 0); 723 } 724 725 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 726 { 727 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 728 729 ether_addr_copy(p, hdev->hw.mac.mac_addr); 730 } 731 732 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p) 733 { 734 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 735 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 736 u8 *new_mac_addr = (u8 *)p; 737 u8 msg_data[ETH_ALEN * 2]; 738 int status; 739 740 ether_addr_copy(msg_data, new_mac_addr); 741 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 742 743 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 744 HCLGE_MBX_MAC_VLAN_UC_MODIFY, 745 msg_data, ETH_ALEN * 2, 746 false, NULL, 0); 747 if (!status) 748 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 749 750 return status; 751 } 752 753 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 754 const unsigned char *addr) 755 { 756 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 757 758 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 759 HCLGE_MBX_MAC_VLAN_UC_ADD, 760 addr, ETH_ALEN, false, NULL, 0); 761 } 762 763 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 764 const unsigned char *addr) 765 { 766 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 767 768 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 769 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 770 addr, ETH_ALEN, false, NULL, 0); 771 } 772 773 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 774 const unsigned char *addr) 775 { 776 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 777 778 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 779 HCLGE_MBX_MAC_VLAN_MC_ADD, 780 addr, ETH_ALEN, false, NULL, 0); 781 } 782 783 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 784 const unsigned char *addr) 785 { 786 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 787 788 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 789 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 790 addr, ETH_ALEN, false, NULL, 0); 791 } 792 793 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 794 __be16 proto, u16 vlan_id, 795 bool is_kill) 796 { 797 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 798 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 799 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 800 801 if (vlan_id > 4095) 802 return -EINVAL; 803 804 if (proto != htons(ETH_P_8021Q)) 805 return -EPROTONOSUPPORT; 806 807 msg_data[0] = is_kill; 808 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 809 memcpy(&msg_data[3], &proto, sizeof(proto)); 810 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 811 HCLGE_MBX_VLAN_FILTER, msg_data, 812 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 813 } 814 815 static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 816 { 817 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 818 u8 msg_data[2]; 819 820 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 821 822 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 2, false, 823 NULL, 0); 824 } 825 826 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 827 { 828 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 829 830 return hdev->fw_version; 831 } 832 833 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 834 { 835 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 836 837 vector->vector_irq = pci_irq_vector(hdev->pdev, 838 HCLGEVF_MISC_VECTOR_NUM); 839 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 840 /* vector status always valid for Vector 0 */ 841 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 842 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 843 844 hdev->num_msi_left -= 1; 845 hdev->num_msi_used += 1; 846 } 847 848 static void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 849 { 850 if (!test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 851 schedule_work(&hdev->mbx_service_task); 852 } 853 854 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 855 { 856 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 857 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 858 schedule_work(&hdev->service_task); 859 } 860 861 static void hclgevf_service_timer(struct timer_list *t) 862 { 863 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 864 865 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 866 867 hclgevf_task_schedule(hdev); 868 } 869 870 static void hclgevf_mailbox_service_task(struct work_struct *work) 871 { 872 struct hclgevf_dev *hdev; 873 874 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 875 876 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 877 return; 878 879 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 880 881 hclgevf_mbx_handler(hdev); 882 883 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 884 } 885 886 static void hclgevf_service_task(struct work_struct *work) 887 { 888 struct hclgevf_dev *hdev; 889 890 hdev = container_of(work, struct hclgevf_dev, service_task); 891 892 /* request the link status from the PF. PF would be able to tell VF 893 * about such updates in future so we might remove this later 894 */ 895 hclgevf_request_link_info(hdev); 896 897 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 898 } 899 900 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 901 { 902 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 903 } 904 905 static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) 906 { 907 u32 cmdq_src_reg; 908 909 /* fetch the events from their corresponding regs */ 910 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 911 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 912 913 /* check for vector0 mailbox(=CMDQ RX) event source */ 914 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 915 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 916 *clearval = cmdq_src_reg; 917 return true; 918 } 919 920 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 921 922 return false; 923 } 924 925 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 926 { 927 writel(en ? 1 : 0, vector->addr); 928 } 929 930 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 931 { 932 struct hclgevf_dev *hdev = data; 933 u32 clearval; 934 935 hclgevf_enable_vector(&hdev->misc_vector, false); 936 if (!hclgevf_check_event_cause(hdev, &clearval)) 937 goto skip_sched; 938 939 /* schedule the VF mailbox service task, if not already scheduled */ 940 hclgevf_mbx_task_schedule(hdev); 941 942 hclgevf_clear_event_cause(hdev, clearval); 943 944 skip_sched: 945 hclgevf_enable_vector(&hdev->misc_vector, true); 946 947 return IRQ_HANDLED; 948 } 949 950 static int hclgevf_configure(struct hclgevf_dev *hdev) 951 { 952 int ret; 953 954 /* get queue configuration from PF */ 955 ret = hclge_get_queue_info(hdev); 956 if (ret) 957 return ret; 958 /* get tc configuration from PF */ 959 return hclgevf_get_tc_info(hdev); 960 } 961 962 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 963 { 964 struct hnae3_handle *roce = &hdev->roce; 965 struct hnae3_handle *nic = &hdev->nic; 966 967 roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM; 968 969 if (hdev->num_msi_left < roce->rinfo.num_vectors || 970 hdev->num_msi_left == 0) 971 return -EINVAL; 972 973 roce->rinfo.base_vector = 974 hdev->vector_status[hdev->num_msi_used]; 975 976 roce->rinfo.netdev = nic->kinfo.netdev; 977 roce->rinfo.roce_io_base = hdev->hw.io_base; 978 979 roce->pdev = nic->pdev; 980 roce->ae_algo = nic->ae_algo; 981 roce->numa_node_mask = nic->numa_node_mask; 982 983 return 0; 984 } 985 986 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 987 { 988 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 989 int i, ret; 990 991 rss_cfg->rss_size = hdev->rss_size_max; 992 993 /* Initialize RSS indirect table for each vport */ 994 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 995 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 996 997 ret = hclgevf_set_rss_indir_table(hdev); 998 if (ret) 999 return ret; 1000 1001 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1002 } 1003 1004 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1005 { 1006 /* other vlan config(like, VLAN TX/RX offload) would also be added 1007 * here later 1008 */ 1009 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1010 false); 1011 } 1012 1013 static int hclgevf_ae_start(struct hnae3_handle *handle) 1014 { 1015 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1016 int i, queue_id; 1017 1018 for (i = 0; i < handle->kinfo.num_tqps; i++) { 1019 /* ring enable */ 1020 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); 1021 if (queue_id < 0) { 1022 dev_warn(&hdev->pdev->dev, 1023 "Get invalid queue id, ignore it\n"); 1024 continue; 1025 } 1026 1027 hclgevf_tqp_enable(hdev, queue_id, 0, true); 1028 } 1029 1030 /* reset tqp stats */ 1031 hclgevf_reset_tqp_stats(handle); 1032 1033 hclgevf_request_link_info(hdev); 1034 1035 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1036 mod_timer(&hdev->service_timer, jiffies + HZ); 1037 1038 return 0; 1039 } 1040 1041 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1042 { 1043 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1044 int i, queue_id; 1045 1046 for (i = 0; i < hdev->num_tqps; i++) { 1047 /* Ring disable */ 1048 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); 1049 if (queue_id < 0) { 1050 dev_warn(&hdev->pdev->dev, 1051 "Get invalid queue id, ignore it\n"); 1052 continue; 1053 } 1054 1055 hclgevf_tqp_enable(hdev, queue_id, 0, false); 1056 } 1057 1058 /* reset tqp stats */ 1059 hclgevf_reset_tqp_stats(handle); 1060 } 1061 1062 static void hclgevf_state_init(struct hclgevf_dev *hdev) 1063 { 1064 /* setup tasks for the MBX */ 1065 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1066 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1067 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1068 1069 /* setup tasks for service timer */ 1070 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1071 1072 INIT_WORK(&hdev->service_task, hclgevf_service_task); 1073 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1074 1075 mutex_init(&hdev->mbx_resp.mbx_mutex); 1076 1077 /* bring the device down */ 1078 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1079 } 1080 1081 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1082 { 1083 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1084 1085 if (hdev->service_timer.function) 1086 del_timer_sync(&hdev->service_timer); 1087 if (hdev->service_task.func) 1088 cancel_work_sync(&hdev->service_task); 1089 if (hdev->mbx_service_task.func) 1090 cancel_work_sync(&hdev->mbx_service_task); 1091 1092 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1093 } 1094 1095 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1096 { 1097 struct pci_dev *pdev = hdev->pdev; 1098 int vectors; 1099 int i; 1100 1101 hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM; 1102 1103 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1104 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1105 if (vectors < 0) { 1106 dev_err(&pdev->dev, 1107 "failed(%d) to allocate MSI/MSI-X vectors\n", 1108 vectors); 1109 return vectors; 1110 } 1111 if (vectors < hdev->num_msi) 1112 dev_warn(&hdev->pdev->dev, 1113 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1114 hdev->num_msi, vectors); 1115 1116 hdev->num_msi = vectors; 1117 hdev->num_msi_left = vectors; 1118 hdev->base_msi_vector = pdev->irq; 1119 1120 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1121 sizeof(u16), GFP_KERNEL); 1122 if (!hdev->vector_status) { 1123 pci_free_irq_vectors(pdev); 1124 return -ENOMEM; 1125 } 1126 1127 for (i = 0; i < hdev->num_msi; i++) 1128 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 1129 1130 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 1131 sizeof(int), GFP_KERNEL); 1132 if (!hdev->vector_irq) { 1133 pci_free_irq_vectors(pdev); 1134 return -ENOMEM; 1135 } 1136 1137 return 0; 1138 } 1139 1140 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 1141 { 1142 struct pci_dev *pdev = hdev->pdev; 1143 1144 pci_free_irq_vectors(pdev); 1145 } 1146 1147 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 1148 { 1149 int ret = 0; 1150 1151 hclgevf_get_misc_vector(hdev); 1152 1153 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 1154 0, "hclgevf_cmd", hdev); 1155 if (ret) { 1156 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 1157 hdev->misc_vector.vector_irq); 1158 return ret; 1159 } 1160 1161 /* enable misc. vector(vector 0) */ 1162 hclgevf_enable_vector(&hdev->misc_vector, true); 1163 1164 return ret; 1165 } 1166 1167 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 1168 { 1169 /* disable misc vector(vector 0) */ 1170 hclgevf_enable_vector(&hdev->misc_vector, false); 1171 free_irq(hdev->misc_vector.vector_irq, hdev); 1172 hclgevf_free_vector(hdev, 0); 1173 } 1174 1175 static int hclgevf_init_instance(struct hclgevf_dev *hdev, 1176 struct hnae3_client *client) 1177 { 1178 int ret; 1179 1180 switch (client->type) { 1181 case HNAE3_CLIENT_KNIC: 1182 hdev->nic_client = client; 1183 hdev->nic.client = client; 1184 1185 ret = client->ops->init_instance(&hdev->nic); 1186 if (ret) 1187 return ret; 1188 1189 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 1190 struct hnae3_client *rc = hdev->roce_client; 1191 1192 ret = hclgevf_init_roce_base_info(hdev); 1193 if (ret) 1194 return ret; 1195 ret = rc->ops->init_instance(&hdev->roce); 1196 if (ret) 1197 return ret; 1198 } 1199 break; 1200 case HNAE3_CLIENT_UNIC: 1201 hdev->nic_client = client; 1202 hdev->nic.client = client; 1203 1204 ret = client->ops->init_instance(&hdev->nic); 1205 if (ret) 1206 return ret; 1207 break; 1208 case HNAE3_CLIENT_ROCE: 1209 hdev->roce_client = client; 1210 hdev->roce.client = client; 1211 1212 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 1213 ret = hclgevf_init_roce_base_info(hdev); 1214 if (ret) 1215 return ret; 1216 1217 ret = client->ops->init_instance(&hdev->roce); 1218 if (ret) 1219 return ret; 1220 } 1221 } 1222 1223 return 0; 1224 } 1225 1226 static void hclgevf_uninit_instance(struct hclgevf_dev *hdev, 1227 struct hnae3_client *client) 1228 { 1229 /* un-init roce, if it exists */ 1230 if (hdev->roce_client) 1231 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 1232 1233 /* un-init nic/unic, if this was not called by roce client */ 1234 if ((client->ops->uninit_instance) && 1235 (client->type != HNAE3_CLIENT_ROCE)) 1236 client->ops->uninit_instance(&hdev->nic, 0); 1237 } 1238 1239 static int hclgevf_register_client(struct hnae3_client *client, 1240 struct hnae3_ae_dev *ae_dev) 1241 { 1242 struct hclgevf_dev *hdev = ae_dev->priv; 1243 1244 return hclgevf_init_instance(hdev, client); 1245 } 1246 1247 static void hclgevf_unregister_client(struct hnae3_client *client, 1248 struct hnae3_ae_dev *ae_dev) 1249 { 1250 struct hclgevf_dev *hdev = ae_dev->priv; 1251 1252 hclgevf_uninit_instance(hdev, client); 1253 } 1254 1255 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 1256 { 1257 struct pci_dev *pdev = hdev->pdev; 1258 struct hclgevf_hw *hw; 1259 int ret; 1260 1261 ret = pci_enable_device(pdev); 1262 if (ret) { 1263 dev_err(&pdev->dev, "failed to enable PCI device\n"); 1264 goto err_no_drvdata; 1265 } 1266 1267 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1268 if (ret) { 1269 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 1270 goto err_disable_device; 1271 } 1272 1273 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 1274 if (ret) { 1275 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 1276 goto err_disable_device; 1277 } 1278 1279 pci_set_master(pdev); 1280 hw = &hdev->hw; 1281 hw->hdev = hdev; 1282 hw->io_base = pci_iomap(pdev, 2, 0); 1283 if (!hw->io_base) { 1284 dev_err(&pdev->dev, "can't map configuration register space\n"); 1285 ret = -ENOMEM; 1286 goto err_clr_master; 1287 } 1288 1289 return 0; 1290 1291 err_clr_master: 1292 pci_clear_master(pdev); 1293 pci_release_regions(pdev); 1294 err_disable_device: 1295 pci_disable_device(pdev); 1296 err_no_drvdata: 1297 pci_set_drvdata(pdev, NULL); 1298 return ret; 1299 } 1300 1301 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 1302 { 1303 struct pci_dev *pdev = hdev->pdev; 1304 1305 pci_iounmap(pdev, hdev->hw.io_base); 1306 pci_clear_master(pdev); 1307 pci_release_regions(pdev); 1308 pci_disable_device(pdev); 1309 pci_set_drvdata(pdev, NULL); 1310 } 1311 1312 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 1313 { 1314 struct pci_dev *pdev = ae_dev->pdev; 1315 struct hclgevf_dev *hdev; 1316 int ret; 1317 1318 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1319 if (!hdev) 1320 return -ENOMEM; 1321 1322 hdev->pdev = pdev; 1323 hdev->ae_dev = ae_dev; 1324 ae_dev->priv = hdev; 1325 1326 ret = hclgevf_pci_init(hdev); 1327 if (ret) { 1328 dev_err(&pdev->dev, "PCI initialization failed\n"); 1329 return ret; 1330 } 1331 1332 ret = hclgevf_init_msi(hdev); 1333 if (ret) { 1334 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 1335 goto err_irq_init; 1336 } 1337 1338 hclgevf_state_init(hdev); 1339 1340 ret = hclgevf_misc_irq_init(hdev); 1341 if (ret) { 1342 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 1343 ret); 1344 goto err_misc_irq_init; 1345 } 1346 1347 ret = hclgevf_cmd_init(hdev); 1348 if (ret) 1349 goto err_cmd_init; 1350 1351 ret = hclgevf_configure(hdev); 1352 if (ret) { 1353 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 1354 goto err_config; 1355 } 1356 1357 ret = hclgevf_alloc_tqps(hdev); 1358 if (ret) { 1359 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 1360 goto err_config; 1361 } 1362 1363 ret = hclgevf_set_handle_info(hdev); 1364 if (ret) { 1365 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 1366 goto err_config; 1367 } 1368 1369 /* Initialize VF's MTA */ 1370 hdev->accept_mta_mc = true; 1371 ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc); 1372 if (ret) { 1373 dev_err(&hdev->pdev->dev, 1374 "failed(%d) to set mta filter mode\n", ret); 1375 goto err_config; 1376 } 1377 1378 /* Initialize RSS for this VF */ 1379 ret = hclgevf_rss_init_hw(hdev); 1380 if (ret) { 1381 dev_err(&hdev->pdev->dev, 1382 "failed(%d) to initialize RSS\n", ret); 1383 goto err_config; 1384 } 1385 1386 ret = hclgevf_init_vlan_config(hdev); 1387 if (ret) { 1388 dev_err(&hdev->pdev->dev, 1389 "failed(%d) to initialize VLAN config\n", ret); 1390 goto err_config; 1391 } 1392 1393 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 1394 1395 return 0; 1396 1397 err_config: 1398 hclgevf_cmd_uninit(hdev); 1399 err_cmd_init: 1400 hclgevf_misc_irq_uninit(hdev); 1401 err_misc_irq_init: 1402 hclgevf_state_uninit(hdev); 1403 hclgevf_uninit_msi(hdev); 1404 err_irq_init: 1405 hclgevf_pci_uninit(hdev); 1406 return ret; 1407 } 1408 1409 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 1410 { 1411 struct hclgevf_dev *hdev = ae_dev->priv; 1412 1413 hclgevf_cmd_uninit(hdev); 1414 hclgevf_misc_irq_uninit(hdev); 1415 hclgevf_state_uninit(hdev); 1416 hclgevf_uninit_msi(hdev); 1417 hclgevf_pci_uninit(hdev); 1418 ae_dev->priv = NULL; 1419 } 1420 1421 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 1422 { 1423 struct hnae3_handle *nic = &hdev->nic; 1424 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1425 1426 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 1427 } 1428 1429 /** 1430 * hclgevf_get_channels - Get the current channels enabled and max supported. 1431 * @handle: hardware information for network interface 1432 * @ch: ethtool channels structure 1433 * 1434 * We don't support separate tx and rx queues as channels. The other count 1435 * represents how many queues are being used for control. max_combined counts 1436 * how many queue pairs we can support. They may not be mapped 1 to 1 with 1437 * q_vectors since we support a lot more queue pairs than q_vectors. 1438 **/ 1439 static void hclgevf_get_channels(struct hnae3_handle *handle, 1440 struct ethtool_channels *ch) 1441 { 1442 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1443 1444 ch->max_combined = hclgevf_get_max_channels(hdev); 1445 ch->other_count = 0; 1446 ch->max_other = 0; 1447 ch->combined_count = hdev->num_tqps; 1448 } 1449 1450 static const struct hnae3_ae_ops hclgevf_ops = { 1451 .init_ae_dev = hclgevf_init_ae_dev, 1452 .uninit_ae_dev = hclgevf_uninit_ae_dev, 1453 .init_client_instance = hclgevf_register_client, 1454 .uninit_client_instance = hclgevf_unregister_client, 1455 .start = hclgevf_ae_start, 1456 .stop = hclgevf_ae_stop, 1457 .map_ring_to_vector = hclgevf_map_ring_to_vector, 1458 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 1459 .get_vector = hclgevf_get_vector, 1460 .reset_queue = hclgevf_reset_tqp, 1461 .set_promisc_mode = hclgevf_set_promisc_mode, 1462 .get_mac_addr = hclgevf_get_mac_addr, 1463 .set_mac_addr = hclgevf_set_mac_addr, 1464 .add_uc_addr = hclgevf_add_uc_addr, 1465 .rm_uc_addr = hclgevf_rm_uc_addr, 1466 .add_mc_addr = hclgevf_add_mc_addr, 1467 .rm_mc_addr = hclgevf_rm_mc_addr, 1468 .get_stats = hclgevf_get_stats, 1469 .update_stats = hclgevf_update_stats, 1470 .get_strings = hclgevf_get_strings, 1471 .get_sset_count = hclgevf_get_sset_count, 1472 .get_rss_key_size = hclgevf_get_rss_key_size, 1473 .get_rss_indir_size = hclgevf_get_rss_indir_size, 1474 .get_rss = hclgevf_get_rss, 1475 .set_rss = hclgevf_set_rss, 1476 .get_tc_size = hclgevf_get_tc_size, 1477 .get_fw_version = hclgevf_get_fw_version, 1478 .set_vlan_filter = hclgevf_set_vlan_filter, 1479 .get_channels = hclgevf_get_channels, 1480 }; 1481 1482 static struct hnae3_ae_algo ae_algovf = { 1483 .ops = &hclgevf_ops, 1484 .name = HCLGEVF_NAME, 1485 .pdev_id_table = ae_algovf_pci_tbl, 1486 }; 1487 1488 static int hclgevf_init(void) 1489 { 1490 pr_info("%s is initializing\n", HCLGEVF_NAME); 1491 1492 return hnae3_register_ae_algo(&ae_algovf); 1493 } 1494 1495 static void hclgevf_exit(void) 1496 { 1497 hnae3_unregister_ae_algo(&ae_algovf); 1498 } 1499 module_init(hclgevf_init); 1500 module_exit(hclgevf_exit); 1501 1502 MODULE_LICENSE("GPL"); 1503 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 1504 MODULE_DESCRIPTION("HCLGEVF Driver"); 1505 MODULE_VERSION(HCLGEVF_MOD_VERSION); 1506