1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <net/rtnetlink.h> 6 #include "hclgevf_cmd.h" 7 #include "hclgevf_main.h" 8 #include "hclge_mbx.h" 9 #include "hnae3.h" 10 11 #define HCLGEVF_NAME "hclgevf" 12 13 static int hclgevf_init_hdev(struct hclgevf_dev *hdev); 14 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 25 26 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 27 struct hnae3_handle *handle) 28 { 29 return container_of(handle, struct hclgevf_dev, nic); 30 } 31 32 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 33 { 34 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 35 struct hnae3_queue *queue; 36 struct hclgevf_desc desc; 37 struct hclgevf_tqp *tqp; 38 int status; 39 int i; 40 41 for (i = 0; i < hdev->num_tqps; i++) { 42 queue = handle->kinfo.tqp[i]; 43 tqp = container_of(queue, struct hclgevf_tqp, q); 44 hclgevf_cmd_setup_basic_desc(&desc, 45 HCLGEVF_OPC_QUERY_RX_STATUS, 46 true); 47 48 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 49 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 50 if (status) { 51 dev_err(&hdev->pdev->dev, 52 "Query tqp stat fail, status = %d,queue = %d\n", 53 status, i); 54 return status; 55 } 56 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 57 le32_to_cpu(desc.data[1]); 58 59 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 60 true); 61 62 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 63 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 64 if (status) { 65 dev_err(&hdev->pdev->dev, 66 "Query tqp stat fail, status = %d,queue = %d\n", 67 status, i); 68 return status; 69 } 70 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 71 le32_to_cpu(desc.data[1]); 72 } 73 74 return 0; 75 } 76 77 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 78 { 79 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 80 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 81 struct hclgevf_tqp *tqp; 82 u64 *buff = data; 83 int i; 84 85 for (i = 0; i < hdev->num_tqps; i++) { 86 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); 87 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 88 } 89 for (i = 0; i < kinfo->num_tqps; i++) { 90 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); 91 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 92 } 93 94 return buff; 95 } 96 97 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 98 { 99 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 100 101 return hdev->num_tqps * 2; 102 } 103 104 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 105 { 106 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 107 u8 *buff = data; 108 int i = 0; 109 110 for (i = 0; i < hdev->num_tqps; i++) { 111 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], 112 struct hclgevf_tqp, q); 113 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", 114 tqp->index); 115 buff += ETH_GSTRING_LEN; 116 } 117 118 for (i = 0; i < hdev->num_tqps; i++) { 119 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], 120 struct hclgevf_tqp, q); 121 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", 122 tqp->index); 123 buff += ETH_GSTRING_LEN; 124 } 125 126 return buff; 127 } 128 129 static void hclgevf_update_stats(struct hnae3_handle *handle, 130 struct net_device_stats *net_stats) 131 { 132 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 133 int status; 134 135 status = hclgevf_tqps_update_stats(handle); 136 if (status) 137 dev_err(&hdev->pdev->dev, 138 "VF update of TQPS stats fail, status = %d.\n", 139 status); 140 } 141 142 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 143 { 144 if (strset == ETH_SS_TEST) 145 return -EOPNOTSUPP; 146 else if (strset == ETH_SS_STATS) 147 return hclgevf_tqps_get_sset_count(handle, strset); 148 149 return 0; 150 } 151 152 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 153 u8 *data) 154 { 155 u8 *p = (char *)data; 156 157 if (strset == ETH_SS_STATS) 158 p = hclgevf_tqps_get_strings(handle, p); 159 } 160 161 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 162 { 163 hclgevf_tqps_get_stats(handle, data); 164 } 165 166 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 167 { 168 u8 resp_msg; 169 int status; 170 171 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 172 true, &resp_msg, sizeof(u8)); 173 if (status) { 174 dev_err(&hdev->pdev->dev, 175 "VF request to get TC info from PF failed %d", 176 status); 177 return status; 178 } 179 180 hdev->hw_tc_map = resp_msg; 181 182 return 0; 183 } 184 185 static int hclge_get_queue_info(struct hclgevf_dev *hdev) 186 { 187 #define HCLGEVF_TQPS_RSS_INFO_LEN 8 188 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 189 int status; 190 191 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 192 true, resp_msg, 193 HCLGEVF_TQPS_RSS_INFO_LEN); 194 if (status) { 195 dev_err(&hdev->pdev->dev, 196 "VF request to get tqp info from PF failed %d", 197 status); 198 return status; 199 } 200 201 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 202 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 203 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 204 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 205 206 return 0; 207 } 208 209 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 210 { 211 struct hclgevf_tqp *tqp; 212 int i; 213 214 /* if this is on going reset then we need to re-allocate the TPQs 215 * since we cannot assume we would get same number of TPQs back from PF 216 */ 217 if (hclgevf_dev_ongoing_reset(hdev)) 218 devm_kfree(&hdev->pdev->dev, hdev->htqp); 219 220 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 221 sizeof(struct hclgevf_tqp), GFP_KERNEL); 222 if (!hdev->htqp) 223 return -ENOMEM; 224 225 tqp = hdev->htqp; 226 227 for (i = 0; i < hdev->num_tqps; i++) { 228 tqp->dev = &hdev->pdev->dev; 229 tqp->index = i; 230 231 tqp->q.ae_algo = &ae_algovf; 232 tqp->q.buf_size = hdev->rx_buf_len; 233 tqp->q.desc_num = hdev->num_desc; 234 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 235 i * HCLGEVF_TQP_REG_SIZE; 236 237 tqp++; 238 } 239 240 return 0; 241 } 242 243 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 244 { 245 struct hnae3_handle *nic = &hdev->nic; 246 struct hnae3_knic_private_info *kinfo; 247 u16 new_tqps = hdev->num_tqps; 248 int i; 249 250 kinfo = &nic->kinfo; 251 kinfo->num_tc = 0; 252 kinfo->num_desc = hdev->num_desc; 253 kinfo->rx_buf_len = hdev->rx_buf_len; 254 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 255 if (hdev->hw_tc_map & BIT(i)) 256 kinfo->num_tc++; 257 258 kinfo->rss_size 259 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 260 new_tqps = kinfo->rss_size * kinfo->num_tc; 261 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 262 263 /* if this is on going reset then we need to re-allocate the hnae queues 264 * as well since number of TPQs from PF might have changed. 265 */ 266 if (hclgevf_dev_ongoing_reset(hdev)) 267 devm_kfree(&hdev->pdev->dev, kinfo->tqp); 268 269 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 270 sizeof(struct hnae3_queue *), GFP_KERNEL); 271 if (!kinfo->tqp) 272 return -ENOMEM; 273 274 for (i = 0; i < kinfo->num_tqps; i++) { 275 hdev->htqp[i].q.handle = &hdev->nic; 276 hdev->htqp[i].q.tqp_index = i; 277 kinfo->tqp[i] = &hdev->htqp[i].q; 278 } 279 280 return 0; 281 } 282 283 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 284 { 285 int status; 286 u8 resp_msg; 287 288 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 289 0, false, &resp_msg, sizeof(u8)); 290 if (status) 291 dev_err(&hdev->pdev->dev, 292 "VF failed to fetch link status(%d) from PF", status); 293 } 294 295 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 296 { 297 struct hnae3_handle *handle = &hdev->nic; 298 struct hnae3_client *client; 299 300 client = handle->client; 301 302 if (link_state != hdev->hw.mac.link) { 303 client->ops->link_status_change(handle, !!link_state); 304 hdev->hw.mac.link = link_state; 305 } 306 } 307 308 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 309 { 310 struct hnae3_handle *nic = &hdev->nic; 311 int ret; 312 313 nic->ae_algo = &ae_algovf; 314 nic->pdev = hdev->pdev; 315 nic->numa_node_mask = hdev->numa_node_mask; 316 nic->flags |= HNAE3_SUPPORT_VF; 317 318 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 319 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 320 hdev->ae_dev->dev_type); 321 return -EINVAL; 322 } 323 324 ret = hclgevf_knic_setup(hdev); 325 if (ret) 326 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 327 ret); 328 return ret; 329 } 330 331 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 332 { 333 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 334 hdev->num_msi_left += 1; 335 hdev->num_msi_used -= 1; 336 } 337 338 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 339 struct hnae3_vector_info *vector_info) 340 { 341 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 342 struct hnae3_vector_info *vector = vector_info; 343 int alloc = 0; 344 int i, j; 345 346 vector_num = min(hdev->num_msi_left, vector_num); 347 348 for (j = 0; j < vector_num; j++) { 349 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 350 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 351 vector->vector = pci_irq_vector(hdev->pdev, i); 352 vector->io_addr = hdev->hw.io_base + 353 HCLGEVF_VECTOR_REG_BASE + 354 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 355 hdev->vector_status[i] = 0; 356 hdev->vector_irq[i] = vector->vector; 357 358 vector++; 359 alloc++; 360 361 break; 362 } 363 } 364 } 365 hdev->num_msi_left -= alloc; 366 hdev->num_msi_used += alloc; 367 368 return alloc; 369 } 370 371 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 372 { 373 int i; 374 375 for (i = 0; i < hdev->num_msi; i++) 376 if (vector == hdev->vector_irq[i]) 377 return i; 378 379 return -EINVAL; 380 } 381 382 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 383 { 384 return HCLGEVF_RSS_KEY_SIZE; 385 } 386 387 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 388 { 389 return HCLGEVF_RSS_IND_TBL_SIZE; 390 } 391 392 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 393 { 394 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 395 struct hclgevf_rss_indirection_table_cmd *req; 396 struct hclgevf_desc desc; 397 int status; 398 int i, j; 399 400 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 401 402 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 403 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 404 false); 405 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 406 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 407 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 408 req->rss_result[j] = 409 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 410 411 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 412 if (status) { 413 dev_err(&hdev->pdev->dev, 414 "VF failed(=%d) to set RSS indirection table\n", 415 status); 416 return status; 417 } 418 } 419 420 return 0; 421 } 422 423 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 424 { 425 struct hclgevf_rss_tc_mode_cmd *req; 426 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 427 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 428 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 429 struct hclgevf_desc desc; 430 u16 roundup_size; 431 int status; 432 int i; 433 434 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 435 436 roundup_size = roundup_pow_of_two(rss_size); 437 roundup_size = ilog2(roundup_size); 438 439 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 440 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 441 tc_size[i] = roundup_size; 442 tc_offset[i] = rss_size * i; 443 } 444 445 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 446 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 447 hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 448 (tc_valid[i] & 0x1)); 449 hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 450 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 451 hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 452 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 453 } 454 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 455 if (status) 456 dev_err(&hdev->pdev->dev, 457 "VF failed(=%d) to set rss tc mode\n", status); 458 459 return status; 460 } 461 462 static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash, 463 u8 *key) 464 { 465 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 466 struct hclgevf_rss_config_cmd *req; 467 int lkup_times = key ? 3 : 1; 468 struct hclgevf_desc desc; 469 int key_offset; 470 int key_size; 471 int status; 472 473 req = (struct hclgevf_rss_config_cmd *)desc.data; 474 lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0); 475 476 for (key_offset = 0; key_offset < lkup_times; key_offset++) { 477 hclgevf_cmd_setup_basic_desc(&desc, 478 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 479 true); 480 req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET); 481 482 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 483 if (status) { 484 dev_err(&hdev->pdev->dev, 485 "failed to get hardware RSS cfg, status = %d\n", 486 status); 487 return status; 488 } 489 490 if (key_offset == 2) 491 key_size = 492 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 493 else 494 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 495 496 if (key) 497 memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, 498 req->hash_key, 499 key_size); 500 } 501 502 if (hash) { 503 if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ) 504 *hash = ETH_RSS_HASH_TOP; 505 else 506 *hash = ETH_RSS_HASH_UNKNOWN; 507 } 508 509 return 0; 510 } 511 512 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 513 u8 *hfunc) 514 { 515 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 516 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 517 int i; 518 519 if (indir) 520 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 521 indir[i] = rss_cfg->rss_indirection_tbl[i]; 522 523 return hclgevf_get_rss_hw_cfg(handle, hfunc, key); 524 } 525 526 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 527 const u8 *key, const u8 hfunc) 528 { 529 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 530 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 531 int i; 532 533 /* update the shadow RSS table with user specified qids */ 534 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 535 rss_cfg->rss_indirection_tbl[i] = indir[i]; 536 537 /* update the hardware */ 538 return hclgevf_set_rss_indir_table(hdev); 539 } 540 541 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 542 { 543 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 544 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 545 546 return rss_cfg->rss_size; 547 } 548 549 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 550 int vector_id, 551 struct hnae3_ring_chain_node *ring_chain) 552 { 553 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 554 struct hnae3_ring_chain_node *node; 555 struct hclge_mbx_vf_to_pf_cmd *req; 556 struct hclgevf_desc desc; 557 int i = 0; 558 int status; 559 u8 type; 560 561 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 562 563 for (node = ring_chain; node; node = node->next) { 564 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 565 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 566 567 if (i == 0) { 568 hclgevf_cmd_setup_basic_desc(&desc, 569 HCLGEVF_OPC_MBX_VF_TO_PF, 570 false); 571 type = en ? 572 HCLGE_MBX_MAP_RING_TO_VECTOR : 573 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 574 req->msg[0] = type; 575 req->msg[1] = vector_id; 576 } 577 578 req->msg[idx_offset] = 579 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B); 580 req->msg[idx_offset + 1] = node->tqp_index; 581 req->msg[idx_offset + 2] = hnae_get_field(node->int_gl_idx, 582 HNAE3_RING_GL_IDX_M, 583 HNAE3_RING_GL_IDX_S); 584 585 i++; 586 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 587 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 588 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 589 !node->next) { 590 req->msg[2] = i; 591 592 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 593 if (status) { 594 dev_err(&hdev->pdev->dev, 595 "Map TQP fail, status is %d.\n", 596 status); 597 return status; 598 } 599 i = 0; 600 hclgevf_cmd_setup_basic_desc(&desc, 601 HCLGEVF_OPC_MBX_VF_TO_PF, 602 false); 603 req->msg[0] = type; 604 req->msg[1] = vector_id; 605 } 606 } 607 608 return 0; 609 } 610 611 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 612 struct hnae3_ring_chain_node *ring_chain) 613 { 614 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 615 int vector_id; 616 617 vector_id = hclgevf_get_vector_index(hdev, vector); 618 if (vector_id < 0) { 619 dev_err(&handle->pdev->dev, 620 "Get vector index fail. ret =%d\n", vector_id); 621 return vector_id; 622 } 623 624 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 625 } 626 627 static int hclgevf_unmap_ring_from_vector( 628 struct hnae3_handle *handle, 629 int vector, 630 struct hnae3_ring_chain_node *ring_chain) 631 { 632 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 633 int ret, vector_id; 634 635 vector_id = hclgevf_get_vector_index(hdev, vector); 636 if (vector_id < 0) { 637 dev_err(&handle->pdev->dev, 638 "Get vector index fail. ret =%d\n", vector_id); 639 return vector_id; 640 } 641 642 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 643 if (ret) 644 dev_err(&handle->pdev->dev, 645 "Unmap ring from vector fail. vector=%d, ret =%d\n", 646 vector_id, 647 ret); 648 649 return ret; 650 } 651 652 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 653 { 654 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 655 656 hclgevf_free_vector(hdev, vector); 657 658 return 0; 659 } 660 661 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 662 bool en_uc_pmc, bool en_mc_pmc) 663 { 664 struct hclge_mbx_vf_to_pf_cmd *req; 665 struct hclgevf_desc desc; 666 int status; 667 668 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 669 670 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 671 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 672 req->msg[1] = en_uc_pmc ? 1 : 0; 673 req->msg[2] = en_mc_pmc ? 1 : 0; 674 675 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 676 if (status) 677 dev_err(&hdev->pdev->dev, 678 "Set promisc mode fail, status is %d.\n", status); 679 680 return status; 681 } 682 683 static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, 684 bool en_uc_pmc, bool en_mc_pmc) 685 { 686 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 687 688 hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); 689 } 690 691 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 692 int stream_id, bool enable) 693 { 694 struct hclgevf_cfg_com_tqp_queue_cmd *req; 695 struct hclgevf_desc desc; 696 int status; 697 698 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 699 700 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 701 false); 702 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 703 req->stream_id = cpu_to_le16(stream_id); 704 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 705 706 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 707 if (status) 708 dev_err(&hdev->pdev->dev, 709 "TQP enable fail, status =%d.\n", status); 710 711 return status; 712 } 713 714 static int hclgevf_get_queue_id(struct hnae3_queue *queue) 715 { 716 struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q); 717 718 return tqp->index; 719 } 720 721 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 722 { 723 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 724 struct hnae3_queue *queue; 725 struct hclgevf_tqp *tqp; 726 int i; 727 728 for (i = 0; i < hdev->num_tqps; i++) { 729 queue = handle->kinfo.tqp[i]; 730 tqp = container_of(queue, struct hclgevf_tqp, q); 731 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 732 } 733 } 734 735 static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev) 736 { 737 u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX; 738 int ret; 739 740 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 741 HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, 742 NULL, 0, true, &resp_msg, sizeof(u8)); 743 744 if (ret) { 745 dev_err(&hdev->pdev->dev, 746 "Read mta type fail, ret=%d.\n", ret); 747 return ret; 748 } 749 750 if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) { 751 dev_err(&hdev->pdev->dev, 752 "Read mta type invalid, resp=%d.\n", resp_msg); 753 return -EINVAL; 754 } 755 756 hdev->mta_mac_sel_type = resp_msg; 757 758 return 0; 759 } 760 761 static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev, 762 const u8 *addr) 763 { 764 u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type; 765 u16 high_val = addr[1] | (addr[0] << 8); 766 767 return (high_val >> rsh) & 0xfff; 768 } 769 770 static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev, 771 unsigned long *status) 772 { 773 #define HCLGEVF_MTA_STATUS_MSG_SIZE 13 774 #define HCLGEVF_MTA_STATUS_MSG_BITS \ 775 (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) 776 #define HCLGEVF_MTA_STATUS_MSG_END_BITS \ 777 (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS) 778 u16 tbl_cnt; 779 u16 tbl_idx; 780 u8 msg_cnt; 781 u8 msg_idx; 782 int ret; 783 784 msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE, 785 HCLGEVF_MTA_STATUS_MSG_BITS); 786 tbl_idx = 0; 787 msg_idx = 0; 788 while (msg_cnt--) { 789 u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1]; 790 u8 *p = &msg[1]; 791 u8 msg_ofs; 792 u8 msg_bit; 793 794 memset(msg, 0, sizeof(msg)); 795 796 /* set index field */ 797 msg[0] = 0x7F & msg_idx; 798 799 /* set end flag field */ 800 if (msg_cnt == 0) { 801 msg[0] |= 0x80; 802 tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS; 803 } else { 804 tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS; 805 } 806 807 /* set status field */ 808 msg_ofs = 0; 809 msg_bit = 0; 810 while (tbl_cnt--) { 811 if (test_bit(tbl_idx, status)) 812 p[msg_ofs] |= BIT(msg_bit); 813 814 tbl_idx++; 815 816 msg_bit++; 817 if (msg_bit == BITS_PER_BYTE) { 818 msg_bit = 0; 819 msg_ofs++; 820 } 821 } 822 823 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 824 HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, 825 msg, sizeof(msg), false, NULL, 0); 826 if (ret) 827 break; 828 829 msg_idx++; 830 } 831 832 return ret; 833 } 834 835 static int hclgevf_update_mta_status(struct hnae3_handle *handle) 836 { 837 unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)]; 838 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 839 struct net_device *netdev = hdev->nic.kinfo.netdev; 840 struct netdev_hw_addr *ha; 841 u16 tbl_idx; 842 843 /* clear status */ 844 memset(mta_status, 0, sizeof(mta_status)); 845 846 /* update status from mc addr list */ 847 netdev_for_each_mc_addr(ha, netdev) { 848 tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr); 849 set_bit(tbl_idx, mta_status); 850 } 851 852 return hclgevf_do_update_mta_status(hdev, mta_status); 853 } 854 855 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 856 { 857 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 858 859 ether_addr_copy(p, hdev->hw.mac.mac_addr); 860 } 861 862 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 863 bool is_first) 864 { 865 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 866 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 867 u8 *new_mac_addr = (u8 *)p; 868 u8 msg_data[ETH_ALEN * 2]; 869 u16 subcode; 870 int status; 871 872 ether_addr_copy(msg_data, new_mac_addr); 873 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 874 875 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 876 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 877 878 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 879 subcode, msg_data, ETH_ALEN * 2, 880 true, NULL, 0); 881 if (!status) 882 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 883 884 return status; 885 } 886 887 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 888 const unsigned char *addr) 889 { 890 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 891 892 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 893 HCLGE_MBX_MAC_VLAN_UC_ADD, 894 addr, ETH_ALEN, false, NULL, 0); 895 } 896 897 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 898 const unsigned char *addr) 899 { 900 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 901 902 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 903 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 904 addr, ETH_ALEN, false, NULL, 0); 905 } 906 907 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 908 const unsigned char *addr) 909 { 910 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 911 912 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 913 HCLGE_MBX_MAC_VLAN_MC_ADD, 914 addr, ETH_ALEN, false, NULL, 0); 915 } 916 917 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 918 const unsigned char *addr) 919 { 920 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 921 922 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 923 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 924 addr, ETH_ALEN, false, NULL, 0); 925 } 926 927 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 928 __be16 proto, u16 vlan_id, 929 bool is_kill) 930 { 931 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 932 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 933 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 934 935 if (vlan_id > 4095) 936 return -EINVAL; 937 938 if (proto != htons(ETH_P_8021Q)) 939 return -EPROTONOSUPPORT; 940 941 msg_data[0] = is_kill; 942 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 943 memcpy(&msg_data[3], &proto, sizeof(proto)); 944 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 945 HCLGE_MBX_VLAN_FILTER, msg_data, 946 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 947 } 948 949 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 950 { 951 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 952 u8 msg_data; 953 954 msg_data = enable ? 1 : 0; 955 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 956 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 957 1, false, NULL, 0); 958 } 959 960 static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 961 { 962 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 963 u8 msg_data[2]; 964 int ret; 965 966 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 967 968 /* disable vf queue before send queue reset msg to PF */ 969 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 970 if (ret) 971 return; 972 973 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 974 2, true, NULL, 0); 975 } 976 977 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 978 enum hnae3_reset_notify_type type) 979 { 980 struct hnae3_client *client = hdev->nic_client; 981 struct hnae3_handle *handle = &hdev->nic; 982 983 if (!client->ops->reset_notify) 984 return -EOPNOTSUPP; 985 986 return client->ops->reset_notify(handle, type); 987 } 988 989 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 990 { 991 #define HCLGEVF_RESET_WAIT_MS 500 992 #define HCLGEVF_RESET_WAIT_CNT 20 993 u32 val, cnt = 0; 994 995 /* wait to check the hardware reset completion status */ 996 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 997 while (hnae_get_bit(val, HCLGEVF_FUN_RST_ING_B) && 998 (cnt < HCLGEVF_RESET_WAIT_CNT)) { 999 msleep(HCLGEVF_RESET_WAIT_MS); 1000 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 1001 cnt++; 1002 } 1003 1004 /* hardware completion status should be available by this time */ 1005 if (cnt >= HCLGEVF_RESET_WAIT_CNT) { 1006 dev_warn(&hdev->pdev->dev, 1007 "could'nt get reset done status from h/w, timeout!\n"); 1008 return -EBUSY; 1009 } 1010 1011 /* we will wait a bit more to let reset of the stack to complete. This 1012 * might happen in case reset assertion was made by PF. Yes, this also 1013 * means we might end up waiting bit more even for VF reset. 1014 */ 1015 msleep(5000); 1016 1017 return 0; 1018 } 1019 1020 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1021 { 1022 int ret; 1023 1024 /* uninitialize the nic client */ 1025 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1026 1027 /* re-initialize the hclge device */ 1028 ret = hclgevf_init_hdev(hdev); 1029 if (ret) { 1030 dev_err(&hdev->pdev->dev, 1031 "hclge device re-init failed, VF is disabled!\n"); 1032 return ret; 1033 } 1034 1035 /* bring up the nic client again */ 1036 hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1037 1038 return 0; 1039 } 1040 1041 static int hclgevf_reset(struct hclgevf_dev *hdev) 1042 { 1043 int ret; 1044 1045 rtnl_lock(); 1046 1047 /* bring down the nic to stop any ongoing TX/RX */ 1048 hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1049 1050 /* check if VF could successfully fetch the hardware reset completion 1051 * status from the hardware 1052 */ 1053 ret = hclgevf_reset_wait(hdev); 1054 if (ret) { 1055 /* can't do much in this situation, will disable VF */ 1056 dev_err(&hdev->pdev->dev, 1057 "VF failed(=%d) to fetch H/W reset completion status\n", 1058 ret); 1059 1060 dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); 1061 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1062 1063 rtnl_unlock(); 1064 return ret; 1065 } 1066 1067 /* now, re-initialize the nic client and ae device*/ 1068 ret = hclgevf_reset_stack(hdev); 1069 if (ret) 1070 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1071 1072 /* bring up the nic to enable TX/RX again */ 1073 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1074 1075 rtnl_unlock(); 1076 1077 return ret; 1078 } 1079 1080 static int hclgevf_do_reset(struct hclgevf_dev *hdev) 1081 { 1082 int status; 1083 u8 respmsg; 1084 1085 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1086 0, false, &respmsg, sizeof(u8)); 1087 if (status) 1088 dev_err(&hdev->pdev->dev, 1089 "VF reset request to PF failed(=%d)\n", status); 1090 1091 return status; 1092 } 1093 1094 static void hclgevf_reset_event(struct hnae3_handle *handle) 1095 { 1096 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1097 1098 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1099 1100 handle->reset_level = HNAE3_VF_RESET; 1101 1102 /* reset of this VF requested */ 1103 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1104 hclgevf_reset_task_schedule(hdev); 1105 1106 handle->last_reset_time = jiffies; 1107 } 1108 1109 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1110 { 1111 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1112 1113 return hdev->fw_version; 1114 } 1115 1116 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1117 { 1118 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1119 1120 vector->vector_irq = pci_irq_vector(hdev->pdev, 1121 HCLGEVF_MISC_VECTOR_NUM); 1122 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1123 /* vector status always valid for Vector 0 */ 1124 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1125 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1126 1127 hdev->num_msi_left -= 1; 1128 hdev->num_msi_used += 1; 1129 } 1130 1131 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1132 { 1133 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1134 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1135 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1136 schedule_work(&hdev->rst_service_task); 1137 } 1138 } 1139 1140 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1141 { 1142 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1143 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1144 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1145 schedule_work(&hdev->mbx_service_task); 1146 } 1147 } 1148 1149 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1150 { 1151 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1152 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1153 schedule_work(&hdev->service_task); 1154 } 1155 1156 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1157 { 1158 /* if we have any pending mailbox event then schedule the mbx task */ 1159 if (hdev->mbx_event_pending) 1160 hclgevf_mbx_task_schedule(hdev); 1161 1162 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1163 hclgevf_reset_task_schedule(hdev); 1164 } 1165 1166 static void hclgevf_service_timer(struct timer_list *t) 1167 { 1168 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1169 1170 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1171 1172 hclgevf_task_schedule(hdev); 1173 } 1174 1175 static void hclgevf_reset_service_task(struct work_struct *work) 1176 { 1177 struct hclgevf_dev *hdev = 1178 container_of(work, struct hclgevf_dev, rst_service_task); 1179 int ret; 1180 1181 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1182 return; 1183 1184 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1185 1186 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1187 &hdev->reset_state)) { 1188 /* PF has initmated that it is about to reset the hardware. 1189 * We now have to poll & check if harware has actually completed 1190 * the reset sequence. On hardware reset completion, VF needs to 1191 * reset the client and ae device. 1192 */ 1193 hdev->reset_attempts = 0; 1194 1195 ret = hclgevf_reset(hdev); 1196 if (ret) 1197 dev_err(&hdev->pdev->dev, "VF stack reset failed.\n"); 1198 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1199 &hdev->reset_state)) { 1200 /* we could be here when either of below happens: 1201 * 1. reset was initiated due to watchdog timeout due to 1202 * a. IMP was earlier reset and our TX got choked down and 1203 * which resulted in watchdog reacting and inducing VF 1204 * reset. This also means our cmdq would be unreliable. 1205 * b. problem in TX due to other lower layer(example link 1206 * layer not functioning properly etc.) 1207 * 2. VF reset might have been initiated due to some config 1208 * change. 1209 * 1210 * NOTE: Theres no clear way to detect above cases than to react 1211 * to the response of PF for this reset request. PF will ack the 1212 * 1b and 2. cases but we will not get any intimation about 1a 1213 * from PF as cmdq would be in unreliable state i.e. mailbox 1214 * communication between PF and VF would be broken. 1215 */ 1216 1217 /* if we are never geting into pending state it means either: 1218 * 1. PF is not receiving our request which could be due to IMP 1219 * reset 1220 * 2. PF is screwed 1221 * We cannot do much for 2. but to check first we can try reset 1222 * our PCIe + stack and see if it alleviates the problem. 1223 */ 1224 if (hdev->reset_attempts > 3) { 1225 /* prepare for full reset of stack + pcie interface */ 1226 hdev->nic.reset_level = HNAE3_VF_FULL_RESET; 1227 1228 /* "defer" schedule the reset task again */ 1229 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1230 } else { 1231 hdev->reset_attempts++; 1232 1233 /* request PF for resetting this VF via mailbox */ 1234 ret = hclgevf_do_reset(hdev); 1235 if (ret) 1236 dev_warn(&hdev->pdev->dev, 1237 "VF rst fail, stack will call\n"); 1238 } 1239 } 1240 1241 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1242 } 1243 1244 static void hclgevf_mailbox_service_task(struct work_struct *work) 1245 { 1246 struct hclgevf_dev *hdev; 1247 1248 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1249 1250 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1251 return; 1252 1253 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1254 1255 hclgevf_mbx_async_handler(hdev); 1256 1257 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1258 } 1259 1260 static void hclgevf_service_task(struct work_struct *work) 1261 { 1262 struct hclgevf_dev *hdev; 1263 1264 hdev = container_of(work, struct hclgevf_dev, service_task); 1265 1266 /* request the link status from the PF. PF would be able to tell VF 1267 * about such updates in future so we might remove this later 1268 */ 1269 hclgevf_request_link_info(hdev); 1270 1271 hclgevf_deferred_task_schedule(hdev); 1272 1273 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1274 } 1275 1276 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1277 { 1278 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1279 } 1280 1281 static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) 1282 { 1283 u32 cmdq_src_reg; 1284 1285 /* fetch the events from their corresponding regs */ 1286 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1287 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1288 1289 /* check for vector0 mailbox(=CMDQ RX) event source */ 1290 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1291 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1292 *clearval = cmdq_src_reg; 1293 return true; 1294 } 1295 1296 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1297 1298 return false; 1299 } 1300 1301 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1302 { 1303 writel(en ? 1 : 0, vector->addr); 1304 } 1305 1306 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1307 { 1308 struct hclgevf_dev *hdev = data; 1309 u32 clearval; 1310 1311 hclgevf_enable_vector(&hdev->misc_vector, false); 1312 if (!hclgevf_check_event_cause(hdev, &clearval)) 1313 goto skip_sched; 1314 1315 hclgevf_mbx_handler(hdev); 1316 1317 hclgevf_clear_event_cause(hdev, clearval); 1318 1319 skip_sched: 1320 hclgevf_enable_vector(&hdev->misc_vector, true); 1321 1322 return IRQ_HANDLED; 1323 } 1324 1325 static int hclgevf_configure(struct hclgevf_dev *hdev) 1326 { 1327 int ret; 1328 1329 /* get queue configuration from PF */ 1330 ret = hclge_get_queue_info(hdev); 1331 if (ret) 1332 return ret; 1333 /* get tc configuration from PF */ 1334 return hclgevf_get_tc_info(hdev); 1335 } 1336 1337 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1338 { 1339 struct pci_dev *pdev = ae_dev->pdev; 1340 struct hclgevf_dev *hdev = ae_dev->priv; 1341 1342 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1343 if (!hdev) 1344 return -ENOMEM; 1345 1346 hdev->pdev = pdev; 1347 hdev->ae_dev = ae_dev; 1348 ae_dev->priv = hdev; 1349 1350 return 0; 1351 } 1352 1353 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1354 { 1355 struct hnae3_handle *roce = &hdev->roce; 1356 struct hnae3_handle *nic = &hdev->nic; 1357 1358 roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM; 1359 1360 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1361 hdev->num_msi_left == 0) 1362 return -EINVAL; 1363 1364 roce->rinfo.base_vector = 1365 hdev->vector_status[hdev->num_msi_used]; 1366 1367 roce->rinfo.netdev = nic->kinfo.netdev; 1368 roce->rinfo.roce_io_base = hdev->hw.io_base; 1369 1370 roce->pdev = nic->pdev; 1371 roce->ae_algo = nic->ae_algo; 1372 roce->numa_node_mask = nic->numa_node_mask; 1373 1374 return 0; 1375 } 1376 1377 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1378 { 1379 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1380 int i, ret; 1381 1382 rss_cfg->rss_size = hdev->rss_size_max; 1383 1384 /* Initialize RSS indirect table for each vport */ 1385 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1386 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1387 1388 ret = hclgevf_set_rss_indir_table(hdev); 1389 if (ret) 1390 return ret; 1391 1392 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1393 } 1394 1395 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1396 { 1397 /* other vlan config(like, VLAN TX/RX offload) would also be added 1398 * here later 1399 */ 1400 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1401 false); 1402 } 1403 1404 static int hclgevf_ae_start(struct hnae3_handle *handle) 1405 { 1406 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1407 int i, queue_id; 1408 1409 for (i = 0; i < handle->kinfo.num_tqps; i++) { 1410 /* ring enable */ 1411 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); 1412 if (queue_id < 0) { 1413 dev_warn(&hdev->pdev->dev, 1414 "Get invalid queue id, ignore it\n"); 1415 continue; 1416 } 1417 1418 hclgevf_tqp_enable(hdev, queue_id, 0, true); 1419 } 1420 1421 /* reset tqp stats */ 1422 hclgevf_reset_tqp_stats(handle); 1423 1424 hclgevf_request_link_info(hdev); 1425 1426 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1427 mod_timer(&hdev->service_timer, jiffies + HZ); 1428 1429 return 0; 1430 } 1431 1432 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1433 { 1434 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1435 int i, queue_id; 1436 1437 for (i = 0; i < hdev->num_tqps; i++) { 1438 /* Ring disable */ 1439 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); 1440 if (queue_id < 0) { 1441 dev_warn(&hdev->pdev->dev, 1442 "Get invalid queue id, ignore it\n"); 1443 continue; 1444 } 1445 1446 hclgevf_tqp_enable(hdev, queue_id, 0, false); 1447 } 1448 1449 /* reset tqp stats */ 1450 hclgevf_reset_tqp_stats(handle); 1451 del_timer_sync(&hdev->service_timer); 1452 cancel_work_sync(&hdev->service_task); 1453 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1454 hclgevf_update_link_status(hdev, 0); 1455 } 1456 1457 static void hclgevf_state_init(struct hclgevf_dev *hdev) 1458 { 1459 /* if this is on going reset then skip this initialization */ 1460 if (hclgevf_dev_ongoing_reset(hdev)) 1461 return; 1462 1463 /* setup tasks for the MBX */ 1464 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1465 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1466 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1467 1468 /* setup tasks for service timer */ 1469 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1470 1471 INIT_WORK(&hdev->service_task, hclgevf_service_task); 1472 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1473 1474 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 1475 1476 mutex_init(&hdev->mbx_resp.mbx_mutex); 1477 1478 /* bring the device down */ 1479 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1480 } 1481 1482 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1483 { 1484 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1485 1486 if (hdev->service_timer.function) 1487 del_timer_sync(&hdev->service_timer); 1488 if (hdev->service_task.func) 1489 cancel_work_sync(&hdev->service_task); 1490 if (hdev->mbx_service_task.func) 1491 cancel_work_sync(&hdev->mbx_service_task); 1492 if (hdev->rst_service_task.func) 1493 cancel_work_sync(&hdev->rst_service_task); 1494 1495 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1496 } 1497 1498 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1499 { 1500 struct pci_dev *pdev = hdev->pdev; 1501 int vectors; 1502 int i; 1503 1504 /* if this is on going reset then skip this initialization */ 1505 if (hclgevf_dev_ongoing_reset(hdev)) 1506 return 0; 1507 1508 hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM; 1509 1510 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1511 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1512 if (vectors < 0) { 1513 dev_err(&pdev->dev, 1514 "failed(%d) to allocate MSI/MSI-X vectors\n", 1515 vectors); 1516 return vectors; 1517 } 1518 if (vectors < hdev->num_msi) 1519 dev_warn(&hdev->pdev->dev, 1520 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1521 hdev->num_msi, vectors); 1522 1523 hdev->num_msi = vectors; 1524 hdev->num_msi_left = vectors; 1525 hdev->base_msi_vector = pdev->irq; 1526 1527 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1528 sizeof(u16), GFP_KERNEL); 1529 if (!hdev->vector_status) { 1530 pci_free_irq_vectors(pdev); 1531 return -ENOMEM; 1532 } 1533 1534 for (i = 0; i < hdev->num_msi; i++) 1535 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 1536 1537 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 1538 sizeof(int), GFP_KERNEL); 1539 if (!hdev->vector_irq) { 1540 pci_free_irq_vectors(pdev); 1541 return -ENOMEM; 1542 } 1543 1544 return 0; 1545 } 1546 1547 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 1548 { 1549 struct pci_dev *pdev = hdev->pdev; 1550 1551 pci_free_irq_vectors(pdev); 1552 } 1553 1554 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 1555 { 1556 int ret = 0; 1557 1558 /* if this is on going reset then skip this initialization */ 1559 if (hclgevf_dev_ongoing_reset(hdev)) 1560 return 0; 1561 1562 hclgevf_get_misc_vector(hdev); 1563 1564 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 1565 0, "hclgevf_cmd", hdev); 1566 if (ret) { 1567 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 1568 hdev->misc_vector.vector_irq); 1569 return ret; 1570 } 1571 1572 hclgevf_clear_event_cause(hdev, 0); 1573 1574 /* enable misc. vector(vector 0) */ 1575 hclgevf_enable_vector(&hdev->misc_vector, true); 1576 1577 return ret; 1578 } 1579 1580 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 1581 { 1582 /* disable misc vector(vector 0) */ 1583 hclgevf_enable_vector(&hdev->misc_vector, false); 1584 synchronize_irq(hdev->misc_vector.vector_irq); 1585 free_irq(hdev->misc_vector.vector_irq, hdev); 1586 hclgevf_free_vector(hdev, 0); 1587 } 1588 1589 static int hclgevf_init_client_instance(struct hnae3_client *client, 1590 struct hnae3_ae_dev *ae_dev) 1591 { 1592 struct hclgevf_dev *hdev = ae_dev->priv; 1593 int ret; 1594 1595 switch (client->type) { 1596 case HNAE3_CLIENT_KNIC: 1597 hdev->nic_client = client; 1598 hdev->nic.client = client; 1599 1600 ret = client->ops->init_instance(&hdev->nic); 1601 if (ret) 1602 return ret; 1603 1604 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 1605 struct hnae3_client *rc = hdev->roce_client; 1606 1607 ret = hclgevf_init_roce_base_info(hdev); 1608 if (ret) 1609 return ret; 1610 ret = rc->ops->init_instance(&hdev->roce); 1611 if (ret) 1612 return ret; 1613 } 1614 break; 1615 case HNAE3_CLIENT_UNIC: 1616 hdev->nic_client = client; 1617 hdev->nic.client = client; 1618 1619 ret = client->ops->init_instance(&hdev->nic); 1620 if (ret) 1621 return ret; 1622 break; 1623 case HNAE3_CLIENT_ROCE: 1624 if (hnae3_dev_roce_supported(hdev)) { 1625 hdev->roce_client = client; 1626 hdev->roce.client = client; 1627 } 1628 1629 if (hdev->roce_client && hdev->nic_client) { 1630 ret = hclgevf_init_roce_base_info(hdev); 1631 if (ret) 1632 return ret; 1633 1634 ret = client->ops->init_instance(&hdev->roce); 1635 if (ret) 1636 return ret; 1637 } 1638 } 1639 1640 return 0; 1641 } 1642 1643 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 1644 struct hnae3_ae_dev *ae_dev) 1645 { 1646 struct hclgevf_dev *hdev = ae_dev->priv; 1647 1648 /* un-init roce, if it exists */ 1649 if (hdev->roce_client) 1650 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 1651 1652 /* un-init nic/unic, if this was not called by roce client */ 1653 if ((client->ops->uninit_instance) && 1654 (client->type != HNAE3_CLIENT_ROCE)) 1655 client->ops->uninit_instance(&hdev->nic, 0); 1656 } 1657 1658 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 1659 { 1660 struct pci_dev *pdev = hdev->pdev; 1661 struct hclgevf_hw *hw; 1662 int ret; 1663 1664 /* check if we need to skip initialization of pci. This will happen if 1665 * device is undergoing VF reset. Otherwise, we would need to 1666 * re-initialize pci interface again i.e. when device is not going 1667 * through *any* reset or actually undergoing full reset. 1668 */ 1669 if (hclgevf_dev_ongoing_reset(hdev)) 1670 return 0; 1671 1672 ret = pci_enable_device(pdev); 1673 if (ret) { 1674 dev_err(&pdev->dev, "failed to enable PCI device\n"); 1675 return ret; 1676 } 1677 1678 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1679 if (ret) { 1680 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 1681 goto err_disable_device; 1682 } 1683 1684 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 1685 if (ret) { 1686 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 1687 goto err_disable_device; 1688 } 1689 1690 pci_set_master(pdev); 1691 hw = &hdev->hw; 1692 hw->hdev = hdev; 1693 hw->io_base = pci_iomap(pdev, 2, 0); 1694 if (!hw->io_base) { 1695 dev_err(&pdev->dev, "can't map configuration register space\n"); 1696 ret = -ENOMEM; 1697 goto err_clr_master; 1698 } 1699 1700 return 0; 1701 1702 err_clr_master: 1703 pci_clear_master(pdev); 1704 pci_release_regions(pdev); 1705 err_disable_device: 1706 pci_disable_device(pdev); 1707 1708 return ret; 1709 } 1710 1711 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 1712 { 1713 struct pci_dev *pdev = hdev->pdev; 1714 1715 pci_iounmap(pdev, hdev->hw.io_base); 1716 pci_clear_master(pdev); 1717 pci_release_regions(pdev); 1718 pci_disable_device(pdev); 1719 } 1720 1721 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 1722 { 1723 struct pci_dev *pdev = hdev->pdev; 1724 int ret; 1725 1726 /* check if device is on-going full reset(i.e. pcie as well) */ 1727 if (hclgevf_dev_ongoing_full_reset(hdev)) { 1728 dev_warn(&pdev->dev, "device is going full reset\n"); 1729 hclgevf_uninit_hdev(hdev); 1730 } 1731 1732 ret = hclgevf_pci_init(hdev); 1733 if (ret) { 1734 dev_err(&pdev->dev, "PCI initialization failed\n"); 1735 return ret; 1736 } 1737 1738 ret = hclgevf_init_msi(hdev); 1739 if (ret) { 1740 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 1741 goto err_irq_init; 1742 } 1743 1744 hclgevf_state_init(hdev); 1745 1746 ret = hclgevf_cmd_init(hdev); 1747 if (ret) 1748 goto err_cmd_init; 1749 1750 ret = hclgevf_misc_irq_init(hdev); 1751 if (ret) { 1752 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 1753 ret); 1754 goto err_misc_irq_init; 1755 } 1756 1757 ret = hclgevf_configure(hdev); 1758 if (ret) { 1759 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 1760 goto err_config; 1761 } 1762 1763 ret = hclgevf_alloc_tqps(hdev); 1764 if (ret) { 1765 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 1766 goto err_config; 1767 } 1768 1769 ret = hclgevf_set_handle_info(hdev); 1770 if (ret) { 1771 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 1772 goto err_config; 1773 } 1774 1775 /* Initialize mta type for this VF */ 1776 ret = hclgevf_cfg_func_mta_type(hdev); 1777 if (ret) { 1778 dev_err(&hdev->pdev->dev, 1779 "failed(%d) to initialize MTA type\n", ret); 1780 goto err_config; 1781 } 1782 1783 /* Initialize RSS for this VF */ 1784 ret = hclgevf_rss_init_hw(hdev); 1785 if (ret) { 1786 dev_err(&hdev->pdev->dev, 1787 "failed(%d) to initialize RSS\n", ret); 1788 goto err_config; 1789 } 1790 1791 ret = hclgevf_init_vlan_config(hdev); 1792 if (ret) { 1793 dev_err(&hdev->pdev->dev, 1794 "failed(%d) to initialize VLAN config\n", ret); 1795 goto err_config; 1796 } 1797 1798 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 1799 1800 return 0; 1801 1802 err_config: 1803 hclgevf_misc_irq_uninit(hdev); 1804 err_misc_irq_init: 1805 hclgevf_cmd_uninit(hdev); 1806 err_cmd_init: 1807 hclgevf_state_uninit(hdev); 1808 hclgevf_uninit_msi(hdev); 1809 err_irq_init: 1810 hclgevf_pci_uninit(hdev); 1811 return ret; 1812 } 1813 1814 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 1815 { 1816 hclgevf_state_uninit(hdev); 1817 hclgevf_misc_irq_uninit(hdev); 1818 hclgevf_cmd_uninit(hdev); 1819 hclgevf_uninit_msi(hdev); 1820 hclgevf_pci_uninit(hdev); 1821 } 1822 1823 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 1824 { 1825 struct pci_dev *pdev = ae_dev->pdev; 1826 int ret; 1827 1828 ret = hclgevf_alloc_hdev(ae_dev); 1829 if (ret) { 1830 dev_err(&pdev->dev, "hclge device allocation failed\n"); 1831 return ret; 1832 } 1833 1834 ret = hclgevf_init_hdev(ae_dev->priv); 1835 if (ret) 1836 dev_err(&pdev->dev, "hclge device initialization failed\n"); 1837 1838 return ret; 1839 } 1840 1841 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 1842 { 1843 struct hclgevf_dev *hdev = ae_dev->priv; 1844 1845 hclgevf_uninit_hdev(hdev); 1846 ae_dev->priv = NULL; 1847 } 1848 1849 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 1850 { 1851 struct hnae3_handle *nic = &hdev->nic; 1852 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1853 1854 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 1855 } 1856 1857 /** 1858 * hclgevf_get_channels - Get the current channels enabled and max supported. 1859 * @handle: hardware information for network interface 1860 * @ch: ethtool channels structure 1861 * 1862 * We don't support separate tx and rx queues as channels. The other count 1863 * represents how many queues are being used for control. max_combined counts 1864 * how many queue pairs we can support. They may not be mapped 1 to 1 with 1865 * q_vectors since we support a lot more queue pairs than q_vectors. 1866 **/ 1867 static void hclgevf_get_channels(struct hnae3_handle *handle, 1868 struct ethtool_channels *ch) 1869 { 1870 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1871 1872 ch->max_combined = hclgevf_get_max_channels(hdev); 1873 ch->other_count = 0; 1874 ch->max_other = 0; 1875 ch->combined_count = hdev->num_tqps; 1876 } 1877 1878 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 1879 u16 *free_tqps, u16 *max_rss_size) 1880 { 1881 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1882 1883 *free_tqps = 0; 1884 *max_rss_size = hdev->rss_size_max; 1885 } 1886 1887 static int hclgevf_get_status(struct hnae3_handle *handle) 1888 { 1889 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1890 1891 return hdev->hw.mac.link; 1892 } 1893 1894 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 1895 u8 *auto_neg, u32 *speed, 1896 u8 *duplex) 1897 { 1898 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1899 1900 if (speed) 1901 *speed = hdev->hw.mac.speed; 1902 if (duplex) 1903 *duplex = hdev->hw.mac.duplex; 1904 if (auto_neg) 1905 *auto_neg = AUTONEG_DISABLE; 1906 } 1907 1908 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 1909 u8 duplex) 1910 { 1911 hdev->hw.mac.speed = speed; 1912 hdev->hw.mac.duplex = duplex; 1913 } 1914 1915 static const struct hnae3_ae_ops hclgevf_ops = { 1916 .init_ae_dev = hclgevf_init_ae_dev, 1917 .uninit_ae_dev = hclgevf_uninit_ae_dev, 1918 .init_client_instance = hclgevf_init_client_instance, 1919 .uninit_client_instance = hclgevf_uninit_client_instance, 1920 .start = hclgevf_ae_start, 1921 .stop = hclgevf_ae_stop, 1922 .map_ring_to_vector = hclgevf_map_ring_to_vector, 1923 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 1924 .get_vector = hclgevf_get_vector, 1925 .put_vector = hclgevf_put_vector, 1926 .reset_queue = hclgevf_reset_tqp, 1927 .set_promisc_mode = hclgevf_set_promisc_mode, 1928 .get_mac_addr = hclgevf_get_mac_addr, 1929 .set_mac_addr = hclgevf_set_mac_addr, 1930 .add_uc_addr = hclgevf_add_uc_addr, 1931 .rm_uc_addr = hclgevf_rm_uc_addr, 1932 .add_mc_addr = hclgevf_add_mc_addr, 1933 .rm_mc_addr = hclgevf_rm_mc_addr, 1934 .update_mta_status = hclgevf_update_mta_status, 1935 .get_stats = hclgevf_get_stats, 1936 .update_stats = hclgevf_update_stats, 1937 .get_strings = hclgevf_get_strings, 1938 .get_sset_count = hclgevf_get_sset_count, 1939 .get_rss_key_size = hclgevf_get_rss_key_size, 1940 .get_rss_indir_size = hclgevf_get_rss_indir_size, 1941 .get_rss = hclgevf_get_rss, 1942 .set_rss = hclgevf_set_rss, 1943 .get_tc_size = hclgevf_get_tc_size, 1944 .get_fw_version = hclgevf_get_fw_version, 1945 .set_vlan_filter = hclgevf_set_vlan_filter, 1946 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 1947 .reset_event = hclgevf_reset_event, 1948 .get_channels = hclgevf_get_channels, 1949 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 1950 .get_status = hclgevf_get_status, 1951 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 1952 }; 1953 1954 static struct hnae3_ae_algo ae_algovf = { 1955 .ops = &hclgevf_ops, 1956 .name = HCLGEVF_NAME, 1957 .pdev_id_table = ae_algovf_pci_tbl, 1958 }; 1959 1960 static int hclgevf_init(void) 1961 { 1962 pr_info("%s is initializing\n", HCLGEVF_NAME); 1963 1964 hnae3_register_ae_algo(&ae_algovf); 1965 1966 return 0; 1967 } 1968 1969 static void hclgevf_exit(void) 1970 { 1971 hnae3_unregister_ae_algo(&ae_algovf); 1972 } 1973 module_init(hclgevf_init); 1974 module_exit(hclgevf_exit); 1975 1976 MODULE_LICENSE("GPL"); 1977 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 1978 MODULE_DESCRIPTION("HCLGEVF Driver"); 1979 MODULE_VERSION(HCLGEVF_MOD_VERSION); 1980