1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <net/rtnetlink.h> 6 #include "hclgevf_cmd.h" 7 #include "hclgevf_main.h" 8 #include "hclge_mbx.h" 9 #include "hnae3.h" 10 11 #define HCLGEVF_NAME "hclgevf" 12 13 static int hclgevf_init_hdev(struct hclgevf_dev *hdev); 14 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 25 26 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 27 struct hnae3_handle *handle) 28 { 29 return container_of(handle, struct hclgevf_dev, nic); 30 } 31 32 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 33 { 34 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 35 struct hnae3_queue *queue; 36 struct hclgevf_desc desc; 37 struct hclgevf_tqp *tqp; 38 int status; 39 int i; 40 41 for (i = 0; i < hdev->num_tqps; i++) { 42 queue = handle->kinfo.tqp[i]; 43 tqp = container_of(queue, struct hclgevf_tqp, q); 44 hclgevf_cmd_setup_basic_desc(&desc, 45 HCLGEVF_OPC_QUERY_RX_STATUS, 46 true); 47 48 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 49 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 50 if (status) { 51 dev_err(&hdev->pdev->dev, 52 "Query tqp stat fail, status = %d,queue = %d\n", 53 status, i); 54 return status; 55 } 56 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 57 le32_to_cpu(desc.data[1]); 58 59 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 60 true); 61 62 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 63 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 64 if (status) { 65 dev_err(&hdev->pdev->dev, 66 "Query tqp stat fail, status = %d,queue = %d\n", 67 status, i); 68 return status; 69 } 70 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 71 le32_to_cpu(desc.data[1]); 72 } 73 74 return 0; 75 } 76 77 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 78 { 79 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 80 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 81 struct hclgevf_tqp *tqp; 82 u64 *buff = data; 83 int i; 84 85 for (i = 0; i < hdev->num_tqps; i++) { 86 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); 87 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 88 } 89 for (i = 0; i < kinfo->num_tqps; i++) { 90 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); 91 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 92 } 93 94 return buff; 95 } 96 97 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 98 { 99 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 100 101 return hdev->num_tqps * 2; 102 } 103 104 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 105 { 106 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 107 u8 *buff = data; 108 int i = 0; 109 110 for (i = 0; i < hdev->num_tqps; i++) { 111 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], 112 struct hclgevf_tqp, q); 113 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", 114 tqp->index); 115 buff += ETH_GSTRING_LEN; 116 } 117 118 for (i = 0; i < hdev->num_tqps; i++) { 119 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], 120 struct hclgevf_tqp, q); 121 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", 122 tqp->index); 123 buff += ETH_GSTRING_LEN; 124 } 125 126 return buff; 127 } 128 129 static void hclgevf_update_stats(struct hnae3_handle *handle, 130 struct net_device_stats *net_stats) 131 { 132 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 133 int status; 134 135 status = hclgevf_tqps_update_stats(handle); 136 if (status) 137 dev_err(&hdev->pdev->dev, 138 "VF update of TQPS stats fail, status = %d.\n", 139 status); 140 } 141 142 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 143 { 144 if (strset == ETH_SS_TEST) 145 return -EOPNOTSUPP; 146 else if (strset == ETH_SS_STATS) 147 return hclgevf_tqps_get_sset_count(handle, strset); 148 149 return 0; 150 } 151 152 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 153 u8 *data) 154 { 155 u8 *p = (char *)data; 156 157 if (strset == ETH_SS_STATS) 158 p = hclgevf_tqps_get_strings(handle, p); 159 } 160 161 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 162 { 163 hclgevf_tqps_get_stats(handle, data); 164 } 165 166 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 167 { 168 u8 resp_msg; 169 int status; 170 171 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 172 true, &resp_msg, sizeof(u8)); 173 if (status) { 174 dev_err(&hdev->pdev->dev, 175 "VF request to get TC info from PF failed %d", 176 status); 177 return status; 178 } 179 180 hdev->hw_tc_map = resp_msg; 181 182 return 0; 183 } 184 185 static int hclge_get_queue_info(struct hclgevf_dev *hdev) 186 { 187 #define HCLGEVF_TQPS_RSS_INFO_LEN 8 188 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 189 int status; 190 191 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 192 true, resp_msg, 193 HCLGEVF_TQPS_RSS_INFO_LEN); 194 if (status) { 195 dev_err(&hdev->pdev->dev, 196 "VF request to get tqp info from PF failed %d", 197 status); 198 return status; 199 } 200 201 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 202 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 203 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); 204 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); 205 206 return 0; 207 } 208 209 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 210 { 211 struct hclgevf_tqp *tqp; 212 int i; 213 214 /* if this is on going reset then we need to re-allocate the TPQs 215 * since we cannot assume we would get same number of TPQs back from PF 216 */ 217 if (hclgevf_dev_ongoing_reset(hdev)) 218 devm_kfree(&hdev->pdev->dev, hdev->htqp); 219 220 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 221 sizeof(struct hclgevf_tqp), GFP_KERNEL); 222 if (!hdev->htqp) 223 return -ENOMEM; 224 225 tqp = hdev->htqp; 226 227 for (i = 0; i < hdev->num_tqps; i++) { 228 tqp->dev = &hdev->pdev->dev; 229 tqp->index = i; 230 231 tqp->q.ae_algo = &ae_algovf; 232 tqp->q.buf_size = hdev->rx_buf_len; 233 tqp->q.desc_num = hdev->num_desc; 234 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 235 i * HCLGEVF_TQP_REG_SIZE; 236 237 tqp++; 238 } 239 240 return 0; 241 } 242 243 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 244 { 245 struct hnae3_handle *nic = &hdev->nic; 246 struct hnae3_knic_private_info *kinfo; 247 u16 new_tqps = hdev->num_tqps; 248 int i; 249 250 kinfo = &nic->kinfo; 251 kinfo->num_tc = 0; 252 kinfo->num_desc = hdev->num_desc; 253 kinfo->rx_buf_len = hdev->rx_buf_len; 254 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 255 if (hdev->hw_tc_map & BIT(i)) 256 kinfo->num_tc++; 257 258 kinfo->rss_size 259 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 260 new_tqps = kinfo->rss_size * kinfo->num_tc; 261 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 262 263 /* if this is on going reset then we need to re-allocate the hnae queues 264 * as well since number of TPQs from PF might have changed. 265 */ 266 if (hclgevf_dev_ongoing_reset(hdev)) 267 devm_kfree(&hdev->pdev->dev, kinfo->tqp); 268 269 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 270 sizeof(struct hnae3_queue *), GFP_KERNEL); 271 if (!kinfo->tqp) 272 return -ENOMEM; 273 274 for (i = 0; i < kinfo->num_tqps; i++) { 275 hdev->htqp[i].q.handle = &hdev->nic; 276 hdev->htqp[i].q.tqp_index = i; 277 kinfo->tqp[i] = &hdev->htqp[i].q; 278 } 279 280 return 0; 281 } 282 283 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 284 { 285 int status; 286 u8 resp_msg; 287 288 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 289 0, false, &resp_msg, sizeof(u8)); 290 if (status) 291 dev_err(&hdev->pdev->dev, 292 "VF failed to fetch link status(%d) from PF", status); 293 } 294 295 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 296 { 297 struct hnae3_handle *handle = &hdev->nic; 298 struct hnae3_client *client; 299 300 client = handle->client; 301 302 if (link_state != hdev->hw.mac.link) { 303 client->ops->link_status_change(handle, !!link_state); 304 hdev->hw.mac.link = link_state; 305 } 306 } 307 308 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 309 { 310 struct hnae3_handle *nic = &hdev->nic; 311 int ret; 312 313 nic->ae_algo = &ae_algovf; 314 nic->pdev = hdev->pdev; 315 nic->numa_node_mask = hdev->numa_node_mask; 316 nic->flags |= HNAE3_SUPPORT_VF; 317 318 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 319 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 320 hdev->ae_dev->dev_type); 321 return -EINVAL; 322 } 323 324 ret = hclgevf_knic_setup(hdev); 325 if (ret) 326 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 327 ret); 328 return ret; 329 } 330 331 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 332 { 333 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 334 dev_warn(&hdev->pdev->dev, 335 "vector(vector_id %d) has been freed.\n", vector_id); 336 return; 337 } 338 339 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 340 hdev->num_msi_left += 1; 341 hdev->num_msi_used -= 1; 342 } 343 344 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 345 struct hnae3_vector_info *vector_info) 346 { 347 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 348 struct hnae3_vector_info *vector = vector_info; 349 int alloc = 0; 350 int i, j; 351 352 vector_num = min(hdev->num_msi_left, vector_num); 353 354 for (j = 0; j < vector_num; j++) { 355 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 356 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 357 vector->vector = pci_irq_vector(hdev->pdev, i); 358 vector->io_addr = hdev->hw.io_base + 359 HCLGEVF_VECTOR_REG_BASE + 360 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 361 hdev->vector_status[i] = 0; 362 hdev->vector_irq[i] = vector->vector; 363 364 vector++; 365 alloc++; 366 367 break; 368 } 369 } 370 } 371 hdev->num_msi_left -= alloc; 372 hdev->num_msi_used += alloc; 373 374 return alloc; 375 } 376 377 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 378 { 379 int i; 380 381 for (i = 0; i < hdev->num_msi; i++) 382 if (vector == hdev->vector_irq[i]) 383 return i; 384 385 return -EINVAL; 386 } 387 388 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 389 { 390 return HCLGEVF_RSS_KEY_SIZE; 391 } 392 393 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 394 { 395 return HCLGEVF_RSS_IND_TBL_SIZE; 396 } 397 398 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 399 { 400 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 401 struct hclgevf_rss_indirection_table_cmd *req; 402 struct hclgevf_desc desc; 403 int status; 404 int i, j; 405 406 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 407 408 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 409 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 410 false); 411 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 412 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 413 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 414 req->rss_result[j] = 415 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 416 417 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 418 if (status) { 419 dev_err(&hdev->pdev->dev, 420 "VF failed(=%d) to set RSS indirection table\n", 421 status); 422 return status; 423 } 424 } 425 426 return 0; 427 } 428 429 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 430 { 431 struct hclgevf_rss_tc_mode_cmd *req; 432 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 433 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 434 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 435 struct hclgevf_desc desc; 436 u16 roundup_size; 437 int status; 438 int i; 439 440 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 441 442 roundup_size = roundup_pow_of_two(rss_size); 443 roundup_size = ilog2(roundup_size); 444 445 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 446 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 447 tc_size[i] = roundup_size; 448 tc_offset[i] = rss_size * i; 449 } 450 451 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 452 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 453 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 454 (tc_valid[i] & 0x1)); 455 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 456 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 457 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 458 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 459 } 460 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 461 if (status) 462 dev_err(&hdev->pdev->dev, 463 "VF failed(=%d) to set rss tc mode\n", status); 464 465 return status; 466 } 467 468 static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash, 469 u8 *key) 470 { 471 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 472 struct hclgevf_rss_config_cmd *req; 473 int lkup_times = key ? 3 : 1; 474 struct hclgevf_desc desc; 475 int key_offset; 476 int key_size; 477 int status; 478 479 req = (struct hclgevf_rss_config_cmd *)desc.data; 480 lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0); 481 482 for (key_offset = 0; key_offset < lkup_times; key_offset++) { 483 hclgevf_cmd_setup_basic_desc(&desc, 484 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 485 true); 486 req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET); 487 488 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 489 if (status) { 490 dev_err(&hdev->pdev->dev, 491 "failed to get hardware RSS cfg, status = %d\n", 492 status); 493 return status; 494 } 495 496 if (key_offset == 2) 497 key_size = 498 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 499 else 500 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 501 502 if (key) 503 memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, 504 req->hash_key, 505 key_size); 506 } 507 508 if (hash) { 509 if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ) 510 *hash = ETH_RSS_HASH_TOP; 511 else 512 *hash = ETH_RSS_HASH_UNKNOWN; 513 } 514 515 return 0; 516 } 517 518 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 519 u8 *hfunc) 520 { 521 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 522 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 523 int i; 524 525 if (indir) 526 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 527 indir[i] = rss_cfg->rss_indirection_tbl[i]; 528 529 return hclgevf_get_rss_hw_cfg(handle, hfunc, key); 530 } 531 532 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 533 const u8 *key, const u8 hfunc) 534 { 535 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 536 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 537 int i; 538 539 /* update the shadow RSS table with user specified qids */ 540 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 541 rss_cfg->rss_indirection_tbl[i] = indir[i]; 542 543 /* update the hardware */ 544 return hclgevf_set_rss_indir_table(hdev); 545 } 546 547 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 548 { 549 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 550 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 551 552 return rss_cfg->rss_size; 553 } 554 555 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 556 int vector_id, 557 struct hnae3_ring_chain_node *ring_chain) 558 { 559 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 560 struct hnae3_ring_chain_node *node; 561 struct hclge_mbx_vf_to_pf_cmd *req; 562 struct hclgevf_desc desc; 563 int i = 0; 564 int status; 565 u8 type; 566 567 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 568 569 for (node = ring_chain; node; node = node->next) { 570 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 571 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 572 573 if (i == 0) { 574 hclgevf_cmd_setup_basic_desc(&desc, 575 HCLGEVF_OPC_MBX_VF_TO_PF, 576 false); 577 type = en ? 578 HCLGE_MBX_MAP_RING_TO_VECTOR : 579 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 580 req->msg[0] = type; 581 req->msg[1] = vector_id; 582 } 583 584 req->msg[idx_offset] = 585 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 586 req->msg[idx_offset + 1] = node->tqp_index; 587 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 588 HNAE3_RING_GL_IDX_M, 589 HNAE3_RING_GL_IDX_S); 590 591 i++; 592 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 593 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 594 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 595 !node->next) { 596 req->msg[2] = i; 597 598 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 599 if (status) { 600 dev_err(&hdev->pdev->dev, 601 "Map TQP fail, status is %d.\n", 602 status); 603 return status; 604 } 605 i = 0; 606 hclgevf_cmd_setup_basic_desc(&desc, 607 HCLGEVF_OPC_MBX_VF_TO_PF, 608 false); 609 req->msg[0] = type; 610 req->msg[1] = vector_id; 611 } 612 } 613 614 return 0; 615 } 616 617 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 618 struct hnae3_ring_chain_node *ring_chain) 619 { 620 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 621 int vector_id; 622 623 vector_id = hclgevf_get_vector_index(hdev, vector); 624 if (vector_id < 0) { 625 dev_err(&handle->pdev->dev, 626 "Get vector index fail. ret =%d\n", vector_id); 627 return vector_id; 628 } 629 630 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 631 } 632 633 static int hclgevf_unmap_ring_from_vector( 634 struct hnae3_handle *handle, 635 int vector, 636 struct hnae3_ring_chain_node *ring_chain) 637 { 638 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 639 int ret, vector_id; 640 641 vector_id = hclgevf_get_vector_index(hdev, vector); 642 if (vector_id < 0) { 643 dev_err(&handle->pdev->dev, 644 "Get vector index fail. ret =%d\n", vector_id); 645 return vector_id; 646 } 647 648 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 649 if (ret) 650 dev_err(&handle->pdev->dev, 651 "Unmap ring from vector fail. vector=%d, ret =%d\n", 652 vector_id, 653 ret); 654 655 return ret; 656 } 657 658 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 659 { 660 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 661 662 hclgevf_free_vector(hdev, vector); 663 664 return 0; 665 } 666 667 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 668 bool en_uc_pmc, bool en_mc_pmc) 669 { 670 struct hclge_mbx_vf_to_pf_cmd *req; 671 struct hclgevf_desc desc; 672 int status; 673 674 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 675 676 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 677 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 678 req->msg[1] = en_uc_pmc ? 1 : 0; 679 req->msg[2] = en_mc_pmc ? 1 : 0; 680 681 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 682 if (status) 683 dev_err(&hdev->pdev->dev, 684 "Set promisc mode fail, status is %d.\n", status); 685 686 return status; 687 } 688 689 static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, 690 bool en_uc_pmc, bool en_mc_pmc) 691 { 692 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 693 694 hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); 695 } 696 697 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 698 int stream_id, bool enable) 699 { 700 struct hclgevf_cfg_com_tqp_queue_cmd *req; 701 struct hclgevf_desc desc; 702 int status; 703 704 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 705 706 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 707 false); 708 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 709 req->stream_id = cpu_to_le16(stream_id); 710 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 711 712 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 713 if (status) 714 dev_err(&hdev->pdev->dev, 715 "TQP enable fail, status =%d.\n", status); 716 717 return status; 718 } 719 720 static int hclgevf_get_queue_id(struct hnae3_queue *queue) 721 { 722 struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q); 723 724 return tqp->index; 725 } 726 727 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 728 { 729 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 730 struct hnae3_queue *queue; 731 struct hclgevf_tqp *tqp; 732 int i; 733 734 for (i = 0; i < hdev->num_tqps; i++) { 735 queue = handle->kinfo.tqp[i]; 736 tqp = container_of(queue, struct hclgevf_tqp, q); 737 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 738 } 739 } 740 741 static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev) 742 { 743 u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX; 744 int ret; 745 746 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 747 HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, 748 NULL, 0, true, &resp_msg, sizeof(u8)); 749 750 if (ret) { 751 dev_err(&hdev->pdev->dev, 752 "Read mta type fail, ret=%d.\n", ret); 753 return ret; 754 } 755 756 if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) { 757 dev_err(&hdev->pdev->dev, 758 "Read mta type invalid, resp=%d.\n", resp_msg); 759 return -EINVAL; 760 } 761 762 hdev->mta_mac_sel_type = resp_msg; 763 764 return 0; 765 } 766 767 static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev, 768 const u8 *addr) 769 { 770 u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type; 771 u16 high_val = addr[1] | (addr[0] << 8); 772 773 return (high_val >> rsh) & 0xfff; 774 } 775 776 static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev, 777 unsigned long *status) 778 { 779 #define HCLGEVF_MTA_STATUS_MSG_SIZE 13 780 #define HCLGEVF_MTA_STATUS_MSG_BITS \ 781 (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) 782 #define HCLGEVF_MTA_STATUS_MSG_END_BITS \ 783 (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS) 784 u16 tbl_cnt; 785 u16 tbl_idx; 786 u8 msg_cnt; 787 u8 msg_idx; 788 int ret; 789 790 msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE, 791 HCLGEVF_MTA_STATUS_MSG_BITS); 792 tbl_idx = 0; 793 msg_idx = 0; 794 while (msg_cnt--) { 795 u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1]; 796 u8 *p = &msg[1]; 797 u8 msg_ofs; 798 u8 msg_bit; 799 800 memset(msg, 0, sizeof(msg)); 801 802 /* set index field */ 803 msg[0] = 0x7F & msg_idx; 804 805 /* set end flag field */ 806 if (msg_cnt == 0) { 807 msg[0] |= 0x80; 808 tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS; 809 } else { 810 tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS; 811 } 812 813 /* set status field */ 814 msg_ofs = 0; 815 msg_bit = 0; 816 while (tbl_cnt--) { 817 if (test_bit(tbl_idx, status)) 818 p[msg_ofs] |= BIT(msg_bit); 819 820 tbl_idx++; 821 822 msg_bit++; 823 if (msg_bit == BITS_PER_BYTE) { 824 msg_bit = 0; 825 msg_ofs++; 826 } 827 } 828 829 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 830 HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, 831 msg, sizeof(msg), false, NULL, 0); 832 if (ret) 833 break; 834 835 msg_idx++; 836 } 837 838 return ret; 839 } 840 841 static int hclgevf_update_mta_status(struct hnae3_handle *handle) 842 { 843 unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)]; 844 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 845 struct net_device *netdev = hdev->nic.kinfo.netdev; 846 struct netdev_hw_addr *ha; 847 u16 tbl_idx; 848 849 /* clear status */ 850 memset(mta_status, 0, sizeof(mta_status)); 851 852 /* update status from mc addr list */ 853 netdev_for_each_mc_addr(ha, netdev) { 854 tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr); 855 set_bit(tbl_idx, mta_status); 856 } 857 858 return hclgevf_do_update_mta_status(hdev, mta_status); 859 } 860 861 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 862 { 863 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 864 865 ether_addr_copy(p, hdev->hw.mac.mac_addr); 866 } 867 868 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 869 bool is_first) 870 { 871 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 872 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 873 u8 *new_mac_addr = (u8 *)p; 874 u8 msg_data[ETH_ALEN * 2]; 875 u16 subcode; 876 int status; 877 878 ether_addr_copy(msg_data, new_mac_addr); 879 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 880 881 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 882 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 883 884 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 885 subcode, msg_data, ETH_ALEN * 2, 886 true, NULL, 0); 887 if (!status) 888 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 889 890 return status; 891 } 892 893 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 894 const unsigned char *addr) 895 { 896 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 897 898 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 899 HCLGE_MBX_MAC_VLAN_UC_ADD, 900 addr, ETH_ALEN, false, NULL, 0); 901 } 902 903 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 904 const unsigned char *addr) 905 { 906 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 907 908 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 909 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 910 addr, ETH_ALEN, false, NULL, 0); 911 } 912 913 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 914 const unsigned char *addr) 915 { 916 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 917 918 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 919 HCLGE_MBX_MAC_VLAN_MC_ADD, 920 addr, ETH_ALEN, false, NULL, 0); 921 } 922 923 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 924 const unsigned char *addr) 925 { 926 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 927 928 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 929 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 930 addr, ETH_ALEN, false, NULL, 0); 931 } 932 933 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 934 __be16 proto, u16 vlan_id, 935 bool is_kill) 936 { 937 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 938 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 939 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 940 941 if (vlan_id > 4095) 942 return -EINVAL; 943 944 if (proto != htons(ETH_P_8021Q)) 945 return -EPROTONOSUPPORT; 946 947 msg_data[0] = is_kill; 948 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 949 memcpy(&msg_data[3], &proto, sizeof(proto)); 950 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 951 HCLGE_MBX_VLAN_FILTER, msg_data, 952 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 953 } 954 955 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 956 { 957 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 958 u8 msg_data; 959 960 msg_data = enable ? 1 : 0; 961 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 962 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 963 1, false, NULL, 0); 964 } 965 966 static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 967 { 968 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 969 u8 msg_data[2]; 970 int ret; 971 972 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 973 974 /* disable vf queue before send queue reset msg to PF */ 975 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 976 if (ret) 977 return; 978 979 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 980 2, true, NULL, 0); 981 } 982 983 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 984 enum hnae3_reset_notify_type type) 985 { 986 struct hnae3_client *client = hdev->nic_client; 987 struct hnae3_handle *handle = &hdev->nic; 988 989 if (!client->ops->reset_notify) 990 return -EOPNOTSUPP; 991 992 return client->ops->reset_notify(handle, type); 993 } 994 995 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 996 { 997 #define HCLGEVF_RESET_WAIT_MS 500 998 #define HCLGEVF_RESET_WAIT_CNT 20 999 u32 val, cnt = 0; 1000 1001 /* wait to check the hardware reset completion status */ 1002 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 1003 while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) && 1004 (cnt < HCLGEVF_RESET_WAIT_CNT)) { 1005 msleep(HCLGEVF_RESET_WAIT_MS); 1006 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); 1007 cnt++; 1008 } 1009 1010 /* hardware completion status should be available by this time */ 1011 if (cnt >= HCLGEVF_RESET_WAIT_CNT) { 1012 dev_warn(&hdev->pdev->dev, 1013 "could'nt get reset done status from h/w, timeout!\n"); 1014 return -EBUSY; 1015 } 1016 1017 /* we will wait a bit more to let reset of the stack to complete. This 1018 * might happen in case reset assertion was made by PF. Yes, this also 1019 * means we might end up waiting bit more even for VF reset. 1020 */ 1021 msleep(5000); 1022 1023 return 0; 1024 } 1025 1026 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1027 { 1028 int ret; 1029 1030 /* uninitialize the nic client */ 1031 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1032 1033 /* re-initialize the hclge device */ 1034 ret = hclgevf_init_hdev(hdev); 1035 if (ret) { 1036 dev_err(&hdev->pdev->dev, 1037 "hclge device re-init failed, VF is disabled!\n"); 1038 return ret; 1039 } 1040 1041 /* bring up the nic client again */ 1042 hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1043 1044 return 0; 1045 } 1046 1047 static int hclgevf_reset(struct hclgevf_dev *hdev) 1048 { 1049 int ret; 1050 1051 rtnl_lock(); 1052 1053 /* bring down the nic to stop any ongoing TX/RX */ 1054 hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1055 1056 /* check if VF could successfully fetch the hardware reset completion 1057 * status from the hardware 1058 */ 1059 ret = hclgevf_reset_wait(hdev); 1060 if (ret) { 1061 /* can't do much in this situation, will disable VF */ 1062 dev_err(&hdev->pdev->dev, 1063 "VF failed(=%d) to fetch H/W reset completion status\n", 1064 ret); 1065 1066 dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); 1067 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1068 1069 rtnl_unlock(); 1070 return ret; 1071 } 1072 1073 /* now, re-initialize the nic client and ae device*/ 1074 ret = hclgevf_reset_stack(hdev); 1075 if (ret) 1076 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1077 1078 /* bring up the nic to enable TX/RX again */ 1079 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1080 1081 rtnl_unlock(); 1082 1083 return ret; 1084 } 1085 1086 static int hclgevf_do_reset(struct hclgevf_dev *hdev) 1087 { 1088 int status; 1089 u8 respmsg; 1090 1091 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1092 0, false, &respmsg, sizeof(u8)); 1093 if (status) 1094 dev_err(&hdev->pdev->dev, 1095 "VF reset request to PF failed(=%d)\n", status); 1096 1097 return status; 1098 } 1099 1100 static void hclgevf_reset_event(struct hnae3_handle *handle) 1101 { 1102 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1103 1104 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1105 1106 handle->reset_level = HNAE3_VF_RESET; 1107 1108 /* reset of this VF requested */ 1109 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1110 hclgevf_reset_task_schedule(hdev); 1111 1112 handle->last_reset_time = jiffies; 1113 } 1114 1115 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1116 { 1117 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1118 1119 return hdev->fw_version; 1120 } 1121 1122 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1123 { 1124 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1125 1126 vector->vector_irq = pci_irq_vector(hdev->pdev, 1127 HCLGEVF_MISC_VECTOR_NUM); 1128 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1129 /* vector status always valid for Vector 0 */ 1130 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1131 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1132 1133 hdev->num_msi_left -= 1; 1134 hdev->num_msi_used += 1; 1135 } 1136 1137 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1138 { 1139 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1140 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1141 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1142 schedule_work(&hdev->rst_service_task); 1143 } 1144 } 1145 1146 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1147 { 1148 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1149 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1150 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1151 schedule_work(&hdev->mbx_service_task); 1152 } 1153 } 1154 1155 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1156 { 1157 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1158 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1159 schedule_work(&hdev->service_task); 1160 } 1161 1162 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1163 { 1164 /* if we have any pending mailbox event then schedule the mbx task */ 1165 if (hdev->mbx_event_pending) 1166 hclgevf_mbx_task_schedule(hdev); 1167 1168 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1169 hclgevf_reset_task_schedule(hdev); 1170 } 1171 1172 static void hclgevf_service_timer(struct timer_list *t) 1173 { 1174 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1175 1176 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1177 1178 hclgevf_task_schedule(hdev); 1179 } 1180 1181 static void hclgevf_reset_service_task(struct work_struct *work) 1182 { 1183 struct hclgevf_dev *hdev = 1184 container_of(work, struct hclgevf_dev, rst_service_task); 1185 int ret; 1186 1187 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1188 return; 1189 1190 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1191 1192 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1193 &hdev->reset_state)) { 1194 /* PF has initmated that it is about to reset the hardware. 1195 * We now have to poll & check if harware has actually completed 1196 * the reset sequence. On hardware reset completion, VF needs to 1197 * reset the client and ae device. 1198 */ 1199 hdev->reset_attempts = 0; 1200 1201 ret = hclgevf_reset(hdev); 1202 if (ret) 1203 dev_err(&hdev->pdev->dev, "VF stack reset failed.\n"); 1204 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1205 &hdev->reset_state)) { 1206 /* we could be here when either of below happens: 1207 * 1. reset was initiated due to watchdog timeout due to 1208 * a. IMP was earlier reset and our TX got choked down and 1209 * which resulted in watchdog reacting and inducing VF 1210 * reset. This also means our cmdq would be unreliable. 1211 * b. problem in TX due to other lower layer(example link 1212 * layer not functioning properly etc.) 1213 * 2. VF reset might have been initiated due to some config 1214 * change. 1215 * 1216 * NOTE: Theres no clear way to detect above cases than to react 1217 * to the response of PF for this reset request. PF will ack the 1218 * 1b and 2. cases but we will not get any intimation about 1a 1219 * from PF as cmdq would be in unreliable state i.e. mailbox 1220 * communication between PF and VF would be broken. 1221 */ 1222 1223 /* if we are never geting into pending state it means either: 1224 * 1. PF is not receiving our request which could be due to IMP 1225 * reset 1226 * 2. PF is screwed 1227 * We cannot do much for 2. but to check first we can try reset 1228 * our PCIe + stack and see if it alleviates the problem. 1229 */ 1230 if (hdev->reset_attempts > 3) { 1231 /* prepare for full reset of stack + pcie interface */ 1232 hdev->nic.reset_level = HNAE3_VF_FULL_RESET; 1233 1234 /* "defer" schedule the reset task again */ 1235 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1236 } else { 1237 hdev->reset_attempts++; 1238 1239 /* request PF for resetting this VF via mailbox */ 1240 ret = hclgevf_do_reset(hdev); 1241 if (ret) 1242 dev_warn(&hdev->pdev->dev, 1243 "VF rst fail, stack will call\n"); 1244 } 1245 } 1246 1247 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1248 } 1249 1250 static void hclgevf_mailbox_service_task(struct work_struct *work) 1251 { 1252 struct hclgevf_dev *hdev; 1253 1254 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1255 1256 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1257 return; 1258 1259 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1260 1261 hclgevf_mbx_async_handler(hdev); 1262 1263 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1264 } 1265 1266 static void hclgevf_service_task(struct work_struct *work) 1267 { 1268 struct hclgevf_dev *hdev; 1269 1270 hdev = container_of(work, struct hclgevf_dev, service_task); 1271 1272 /* request the link status from the PF. PF would be able to tell VF 1273 * about such updates in future so we might remove this later 1274 */ 1275 hclgevf_request_link_info(hdev); 1276 1277 hclgevf_deferred_task_schedule(hdev); 1278 1279 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1280 } 1281 1282 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1283 { 1284 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1285 } 1286 1287 static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) 1288 { 1289 u32 cmdq_src_reg; 1290 1291 /* fetch the events from their corresponding regs */ 1292 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1293 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1294 1295 /* check for vector0 mailbox(=CMDQ RX) event source */ 1296 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1297 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1298 *clearval = cmdq_src_reg; 1299 return true; 1300 } 1301 1302 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1303 1304 return false; 1305 } 1306 1307 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1308 { 1309 writel(en ? 1 : 0, vector->addr); 1310 } 1311 1312 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1313 { 1314 struct hclgevf_dev *hdev = data; 1315 u32 clearval; 1316 1317 hclgevf_enable_vector(&hdev->misc_vector, false); 1318 if (!hclgevf_check_event_cause(hdev, &clearval)) 1319 goto skip_sched; 1320 1321 hclgevf_mbx_handler(hdev); 1322 1323 hclgevf_clear_event_cause(hdev, clearval); 1324 1325 skip_sched: 1326 hclgevf_enable_vector(&hdev->misc_vector, true); 1327 1328 return IRQ_HANDLED; 1329 } 1330 1331 static int hclgevf_configure(struct hclgevf_dev *hdev) 1332 { 1333 int ret; 1334 1335 /* get queue configuration from PF */ 1336 ret = hclge_get_queue_info(hdev); 1337 if (ret) 1338 return ret; 1339 /* get tc configuration from PF */ 1340 return hclgevf_get_tc_info(hdev); 1341 } 1342 1343 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1344 { 1345 struct pci_dev *pdev = ae_dev->pdev; 1346 struct hclgevf_dev *hdev = ae_dev->priv; 1347 1348 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1349 if (!hdev) 1350 return -ENOMEM; 1351 1352 hdev->pdev = pdev; 1353 hdev->ae_dev = ae_dev; 1354 ae_dev->priv = hdev; 1355 1356 return 0; 1357 } 1358 1359 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1360 { 1361 struct hnae3_handle *roce = &hdev->roce; 1362 struct hnae3_handle *nic = &hdev->nic; 1363 1364 roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM; 1365 1366 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1367 hdev->num_msi_left == 0) 1368 return -EINVAL; 1369 1370 roce->rinfo.base_vector = 1371 hdev->vector_status[hdev->num_msi_used]; 1372 1373 roce->rinfo.netdev = nic->kinfo.netdev; 1374 roce->rinfo.roce_io_base = hdev->hw.io_base; 1375 1376 roce->pdev = nic->pdev; 1377 roce->ae_algo = nic->ae_algo; 1378 roce->numa_node_mask = nic->numa_node_mask; 1379 1380 return 0; 1381 } 1382 1383 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1384 { 1385 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1386 int i, ret; 1387 1388 rss_cfg->rss_size = hdev->rss_size_max; 1389 1390 /* Initialize RSS indirect table for each vport */ 1391 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1392 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1393 1394 ret = hclgevf_set_rss_indir_table(hdev); 1395 if (ret) 1396 return ret; 1397 1398 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1399 } 1400 1401 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1402 { 1403 /* other vlan config(like, VLAN TX/RX offload) would also be added 1404 * here later 1405 */ 1406 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1407 false); 1408 } 1409 1410 static int hclgevf_ae_start(struct hnae3_handle *handle) 1411 { 1412 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1413 int i, queue_id; 1414 1415 for (i = 0; i < handle->kinfo.num_tqps; i++) { 1416 /* ring enable */ 1417 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); 1418 if (queue_id < 0) { 1419 dev_warn(&hdev->pdev->dev, 1420 "Get invalid queue id, ignore it\n"); 1421 continue; 1422 } 1423 1424 hclgevf_tqp_enable(hdev, queue_id, 0, true); 1425 } 1426 1427 /* reset tqp stats */ 1428 hclgevf_reset_tqp_stats(handle); 1429 1430 hclgevf_request_link_info(hdev); 1431 1432 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1433 mod_timer(&hdev->service_timer, jiffies + HZ); 1434 1435 return 0; 1436 } 1437 1438 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1439 { 1440 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1441 int i, queue_id; 1442 1443 for (i = 0; i < hdev->num_tqps; i++) { 1444 /* Ring disable */ 1445 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); 1446 if (queue_id < 0) { 1447 dev_warn(&hdev->pdev->dev, 1448 "Get invalid queue id, ignore it\n"); 1449 continue; 1450 } 1451 1452 hclgevf_tqp_enable(hdev, queue_id, 0, false); 1453 } 1454 1455 /* reset tqp stats */ 1456 hclgevf_reset_tqp_stats(handle); 1457 del_timer_sync(&hdev->service_timer); 1458 cancel_work_sync(&hdev->service_task); 1459 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1460 hclgevf_update_link_status(hdev, 0); 1461 } 1462 1463 static void hclgevf_state_init(struct hclgevf_dev *hdev) 1464 { 1465 /* if this is on going reset then skip this initialization */ 1466 if (hclgevf_dev_ongoing_reset(hdev)) 1467 return; 1468 1469 /* setup tasks for the MBX */ 1470 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1471 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1472 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1473 1474 /* setup tasks for service timer */ 1475 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1476 1477 INIT_WORK(&hdev->service_task, hclgevf_service_task); 1478 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1479 1480 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 1481 1482 mutex_init(&hdev->mbx_resp.mbx_mutex); 1483 1484 /* bring the device down */ 1485 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1486 } 1487 1488 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 1489 { 1490 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1491 1492 if (hdev->service_timer.function) 1493 del_timer_sync(&hdev->service_timer); 1494 if (hdev->service_task.func) 1495 cancel_work_sync(&hdev->service_task); 1496 if (hdev->mbx_service_task.func) 1497 cancel_work_sync(&hdev->mbx_service_task); 1498 if (hdev->rst_service_task.func) 1499 cancel_work_sync(&hdev->rst_service_task); 1500 1501 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 1502 } 1503 1504 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 1505 { 1506 struct pci_dev *pdev = hdev->pdev; 1507 int vectors; 1508 int i; 1509 1510 /* if this is on going reset then skip this initialization */ 1511 if (hclgevf_dev_ongoing_reset(hdev)) 1512 return 0; 1513 1514 hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM; 1515 1516 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1517 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1518 if (vectors < 0) { 1519 dev_err(&pdev->dev, 1520 "failed(%d) to allocate MSI/MSI-X vectors\n", 1521 vectors); 1522 return vectors; 1523 } 1524 if (vectors < hdev->num_msi) 1525 dev_warn(&hdev->pdev->dev, 1526 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1527 hdev->num_msi, vectors); 1528 1529 hdev->num_msi = vectors; 1530 hdev->num_msi_left = vectors; 1531 hdev->base_msi_vector = pdev->irq; 1532 1533 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1534 sizeof(u16), GFP_KERNEL); 1535 if (!hdev->vector_status) { 1536 pci_free_irq_vectors(pdev); 1537 return -ENOMEM; 1538 } 1539 1540 for (i = 0; i < hdev->num_msi; i++) 1541 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 1542 1543 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 1544 sizeof(int), GFP_KERNEL); 1545 if (!hdev->vector_irq) { 1546 pci_free_irq_vectors(pdev); 1547 return -ENOMEM; 1548 } 1549 1550 return 0; 1551 } 1552 1553 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 1554 { 1555 struct pci_dev *pdev = hdev->pdev; 1556 1557 pci_free_irq_vectors(pdev); 1558 } 1559 1560 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 1561 { 1562 int ret = 0; 1563 1564 /* if this is on going reset then skip this initialization */ 1565 if (hclgevf_dev_ongoing_reset(hdev)) 1566 return 0; 1567 1568 hclgevf_get_misc_vector(hdev); 1569 1570 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 1571 0, "hclgevf_cmd", hdev); 1572 if (ret) { 1573 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 1574 hdev->misc_vector.vector_irq); 1575 return ret; 1576 } 1577 1578 hclgevf_clear_event_cause(hdev, 0); 1579 1580 /* enable misc. vector(vector 0) */ 1581 hclgevf_enable_vector(&hdev->misc_vector, true); 1582 1583 return ret; 1584 } 1585 1586 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 1587 { 1588 /* disable misc vector(vector 0) */ 1589 hclgevf_enable_vector(&hdev->misc_vector, false); 1590 synchronize_irq(hdev->misc_vector.vector_irq); 1591 free_irq(hdev->misc_vector.vector_irq, hdev); 1592 hclgevf_free_vector(hdev, 0); 1593 } 1594 1595 static int hclgevf_init_client_instance(struct hnae3_client *client, 1596 struct hnae3_ae_dev *ae_dev) 1597 { 1598 struct hclgevf_dev *hdev = ae_dev->priv; 1599 int ret; 1600 1601 switch (client->type) { 1602 case HNAE3_CLIENT_KNIC: 1603 hdev->nic_client = client; 1604 hdev->nic.client = client; 1605 1606 ret = client->ops->init_instance(&hdev->nic); 1607 if (ret) 1608 return ret; 1609 1610 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 1611 struct hnae3_client *rc = hdev->roce_client; 1612 1613 ret = hclgevf_init_roce_base_info(hdev); 1614 if (ret) 1615 return ret; 1616 ret = rc->ops->init_instance(&hdev->roce); 1617 if (ret) 1618 return ret; 1619 } 1620 break; 1621 case HNAE3_CLIENT_UNIC: 1622 hdev->nic_client = client; 1623 hdev->nic.client = client; 1624 1625 ret = client->ops->init_instance(&hdev->nic); 1626 if (ret) 1627 return ret; 1628 break; 1629 case HNAE3_CLIENT_ROCE: 1630 if (hnae3_dev_roce_supported(hdev)) { 1631 hdev->roce_client = client; 1632 hdev->roce.client = client; 1633 } 1634 1635 if (hdev->roce_client && hdev->nic_client) { 1636 ret = hclgevf_init_roce_base_info(hdev); 1637 if (ret) 1638 return ret; 1639 1640 ret = client->ops->init_instance(&hdev->roce); 1641 if (ret) 1642 return ret; 1643 } 1644 } 1645 1646 return 0; 1647 } 1648 1649 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 1650 struct hnae3_ae_dev *ae_dev) 1651 { 1652 struct hclgevf_dev *hdev = ae_dev->priv; 1653 1654 /* un-init roce, if it exists */ 1655 if (hdev->roce_client) 1656 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 1657 1658 /* un-init nic/unic, if this was not called by roce client */ 1659 if ((client->ops->uninit_instance) && 1660 (client->type != HNAE3_CLIENT_ROCE)) 1661 client->ops->uninit_instance(&hdev->nic, 0); 1662 } 1663 1664 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 1665 { 1666 struct pci_dev *pdev = hdev->pdev; 1667 struct hclgevf_hw *hw; 1668 int ret; 1669 1670 /* check if we need to skip initialization of pci. This will happen if 1671 * device is undergoing VF reset. Otherwise, we would need to 1672 * re-initialize pci interface again i.e. when device is not going 1673 * through *any* reset or actually undergoing full reset. 1674 */ 1675 if (hclgevf_dev_ongoing_reset(hdev)) 1676 return 0; 1677 1678 ret = pci_enable_device(pdev); 1679 if (ret) { 1680 dev_err(&pdev->dev, "failed to enable PCI device\n"); 1681 return ret; 1682 } 1683 1684 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1685 if (ret) { 1686 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 1687 goto err_disable_device; 1688 } 1689 1690 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 1691 if (ret) { 1692 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 1693 goto err_disable_device; 1694 } 1695 1696 pci_set_master(pdev); 1697 hw = &hdev->hw; 1698 hw->hdev = hdev; 1699 hw->io_base = pci_iomap(pdev, 2, 0); 1700 if (!hw->io_base) { 1701 dev_err(&pdev->dev, "can't map configuration register space\n"); 1702 ret = -ENOMEM; 1703 goto err_clr_master; 1704 } 1705 1706 return 0; 1707 1708 err_clr_master: 1709 pci_clear_master(pdev); 1710 pci_release_regions(pdev); 1711 err_disable_device: 1712 pci_disable_device(pdev); 1713 1714 return ret; 1715 } 1716 1717 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 1718 { 1719 struct pci_dev *pdev = hdev->pdev; 1720 1721 pci_iounmap(pdev, hdev->hw.io_base); 1722 pci_clear_master(pdev); 1723 pci_release_regions(pdev); 1724 pci_disable_device(pdev); 1725 } 1726 1727 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 1728 { 1729 struct pci_dev *pdev = hdev->pdev; 1730 int ret; 1731 1732 /* check if device is on-going full reset(i.e. pcie as well) */ 1733 if (hclgevf_dev_ongoing_full_reset(hdev)) { 1734 dev_warn(&pdev->dev, "device is going full reset\n"); 1735 hclgevf_uninit_hdev(hdev); 1736 } 1737 1738 ret = hclgevf_pci_init(hdev); 1739 if (ret) { 1740 dev_err(&pdev->dev, "PCI initialization failed\n"); 1741 return ret; 1742 } 1743 1744 ret = hclgevf_init_msi(hdev); 1745 if (ret) { 1746 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 1747 goto err_irq_init; 1748 } 1749 1750 hclgevf_state_init(hdev); 1751 1752 ret = hclgevf_cmd_init(hdev); 1753 if (ret) 1754 goto err_cmd_init; 1755 1756 ret = hclgevf_misc_irq_init(hdev); 1757 if (ret) { 1758 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 1759 ret); 1760 goto err_misc_irq_init; 1761 } 1762 1763 ret = hclgevf_configure(hdev); 1764 if (ret) { 1765 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 1766 goto err_config; 1767 } 1768 1769 ret = hclgevf_alloc_tqps(hdev); 1770 if (ret) { 1771 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 1772 goto err_config; 1773 } 1774 1775 ret = hclgevf_set_handle_info(hdev); 1776 if (ret) { 1777 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 1778 goto err_config; 1779 } 1780 1781 /* Initialize mta type for this VF */ 1782 ret = hclgevf_cfg_func_mta_type(hdev); 1783 if (ret) { 1784 dev_err(&hdev->pdev->dev, 1785 "failed(%d) to initialize MTA type\n", ret); 1786 goto err_config; 1787 } 1788 1789 /* Initialize RSS for this VF */ 1790 ret = hclgevf_rss_init_hw(hdev); 1791 if (ret) { 1792 dev_err(&hdev->pdev->dev, 1793 "failed(%d) to initialize RSS\n", ret); 1794 goto err_config; 1795 } 1796 1797 ret = hclgevf_init_vlan_config(hdev); 1798 if (ret) { 1799 dev_err(&hdev->pdev->dev, 1800 "failed(%d) to initialize VLAN config\n", ret); 1801 goto err_config; 1802 } 1803 1804 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 1805 1806 return 0; 1807 1808 err_config: 1809 hclgevf_misc_irq_uninit(hdev); 1810 err_misc_irq_init: 1811 hclgevf_cmd_uninit(hdev); 1812 err_cmd_init: 1813 hclgevf_state_uninit(hdev); 1814 hclgevf_uninit_msi(hdev); 1815 err_irq_init: 1816 hclgevf_pci_uninit(hdev); 1817 return ret; 1818 } 1819 1820 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 1821 { 1822 hclgevf_state_uninit(hdev); 1823 hclgevf_misc_irq_uninit(hdev); 1824 hclgevf_cmd_uninit(hdev); 1825 hclgevf_uninit_msi(hdev); 1826 hclgevf_pci_uninit(hdev); 1827 } 1828 1829 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 1830 { 1831 struct pci_dev *pdev = ae_dev->pdev; 1832 int ret; 1833 1834 ret = hclgevf_alloc_hdev(ae_dev); 1835 if (ret) { 1836 dev_err(&pdev->dev, "hclge device allocation failed\n"); 1837 return ret; 1838 } 1839 1840 ret = hclgevf_init_hdev(ae_dev->priv); 1841 if (ret) 1842 dev_err(&pdev->dev, "hclge device initialization failed\n"); 1843 1844 return ret; 1845 } 1846 1847 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 1848 { 1849 struct hclgevf_dev *hdev = ae_dev->priv; 1850 1851 hclgevf_uninit_hdev(hdev); 1852 ae_dev->priv = NULL; 1853 } 1854 1855 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 1856 { 1857 struct hnae3_handle *nic = &hdev->nic; 1858 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1859 1860 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 1861 } 1862 1863 /** 1864 * hclgevf_get_channels - Get the current channels enabled and max supported. 1865 * @handle: hardware information for network interface 1866 * @ch: ethtool channels structure 1867 * 1868 * We don't support separate tx and rx queues as channels. The other count 1869 * represents how many queues are being used for control. max_combined counts 1870 * how many queue pairs we can support. They may not be mapped 1 to 1 with 1871 * q_vectors since we support a lot more queue pairs than q_vectors. 1872 **/ 1873 static void hclgevf_get_channels(struct hnae3_handle *handle, 1874 struct ethtool_channels *ch) 1875 { 1876 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1877 1878 ch->max_combined = hclgevf_get_max_channels(hdev); 1879 ch->other_count = 0; 1880 ch->max_other = 0; 1881 ch->combined_count = hdev->num_tqps; 1882 } 1883 1884 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 1885 u16 *free_tqps, u16 *max_rss_size) 1886 { 1887 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1888 1889 *free_tqps = 0; 1890 *max_rss_size = hdev->rss_size_max; 1891 } 1892 1893 static int hclgevf_get_status(struct hnae3_handle *handle) 1894 { 1895 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1896 1897 return hdev->hw.mac.link; 1898 } 1899 1900 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 1901 u8 *auto_neg, u32 *speed, 1902 u8 *duplex) 1903 { 1904 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1905 1906 if (speed) 1907 *speed = hdev->hw.mac.speed; 1908 if (duplex) 1909 *duplex = hdev->hw.mac.duplex; 1910 if (auto_neg) 1911 *auto_neg = AUTONEG_DISABLE; 1912 } 1913 1914 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 1915 u8 duplex) 1916 { 1917 hdev->hw.mac.speed = speed; 1918 hdev->hw.mac.duplex = duplex; 1919 } 1920 1921 static const struct hnae3_ae_ops hclgevf_ops = { 1922 .init_ae_dev = hclgevf_init_ae_dev, 1923 .uninit_ae_dev = hclgevf_uninit_ae_dev, 1924 .init_client_instance = hclgevf_init_client_instance, 1925 .uninit_client_instance = hclgevf_uninit_client_instance, 1926 .start = hclgevf_ae_start, 1927 .stop = hclgevf_ae_stop, 1928 .map_ring_to_vector = hclgevf_map_ring_to_vector, 1929 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 1930 .get_vector = hclgevf_get_vector, 1931 .put_vector = hclgevf_put_vector, 1932 .reset_queue = hclgevf_reset_tqp, 1933 .set_promisc_mode = hclgevf_set_promisc_mode, 1934 .get_mac_addr = hclgevf_get_mac_addr, 1935 .set_mac_addr = hclgevf_set_mac_addr, 1936 .add_uc_addr = hclgevf_add_uc_addr, 1937 .rm_uc_addr = hclgevf_rm_uc_addr, 1938 .add_mc_addr = hclgevf_add_mc_addr, 1939 .rm_mc_addr = hclgevf_rm_mc_addr, 1940 .update_mta_status = hclgevf_update_mta_status, 1941 .get_stats = hclgevf_get_stats, 1942 .update_stats = hclgevf_update_stats, 1943 .get_strings = hclgevf_get_strings, 1944 .get_sset_count = hclgevf_get_sset_count, 1945 .get_rss_key_size = hclgevf_get_rss_key_size, 1946 .get_rss_indir_size = hclgevf_get_rss_indir_size, 1947 .get_rss = hclgevf_get_rss, 1948 .set_rss = hclgevf_set_rss, 1949 .get_tc_size = hclgevf_get_tc_size, 1950 .get_fw_version = hclgevf_get_fw_version, 1951 .set_vlan_filter = hclgevf_set_vlan_filter, 1952 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 1953 .reset_event = hclgevf_reset_event, 1954 .get_channels = hclgevf_get_channels, 1955 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 1956 .get_status = hclgevf_get_status, 1957 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 1958 }; 1959 1960 static struct hnae3_ae_algo ae_algovf = { 1961 .ops = &hclgevf_ops, 1962 .pdev_id_table = ae_algovf_pci_tbl, 1963 }; 1964 1965 static int hclgevf_init(void) 1966 { 1967 pr_info("%s is initializing\n", HCLGEVF_NAME); 1968 1969 hnae3_register_ae_algo(&ae_algovf); 1970 1971 return 0; 1972 } 1973 1974 static void hclgevf_exit(void) 1975 { 1976 hnae3_unregister_ae_algo(&ae_algovf); 1977 } 1978 module_init(hclgevf_init); 1979 module_exit(hclgevf_exit); 1980 1981 MODULE_LICENSE("GPL"); 1982 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 1983 MODULE_DESCRIPTION("HCLGEVF Driver"); 1984 MODULE_VERSION(HCLGEVF_MOD_VERSION); 1985