1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 static const u8 hclgevf_hash_key[] = { 25 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 26 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 27 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 28 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 29 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 30 }; 31 32 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 33 34 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 35 HCLGEVF_CMDQ_TX_ADDR_H_REG, 36 HCLGEVF_CMDQ_TX_DEPTH_REG, 37 HCLGEVF_CMDQ_TX_TAIL_REG, 38 HCLGEVF_CMDQ_TX_HEAD_REG, 39 HCLGEVF_CMDQ_RX_ADDR_L_REG, 40 HCLGEVF_CMDQ_RX_ADDR_H_REG, 41 HCLGEVF_CMDQ_RX_DEPTH_REG, 42 HCLGEVF_CMDQ_RX_TAIL_REG, 43 HCLGEVF_CMDQ_RX_HEAD_REG, 44 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 45 HCLGEVF_CMDQ_INTR_STS_REG, 46 HCLGEVF_CMDQ_INTR_EN_REG, 47 HCLGEVF_CMDQ_INTR_GEN_REG}; 48 49 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 50 HCLGEVF_RST_ING, 51 HCLGEVF_GRO_EN_REG}; 52 53 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 54 HCLGEVF_RING_RX_ADDR_H_REG, 55 HCLGEVF_RING_RX_BD_NUM_REG, 56 HCLGEVF_RING_RX_BD_LENGTH_REG, 57 HCLGEVF_RING_RX_MERGE_EN_REG, 58 HCLGEVF_RING_RX_TAIL_REG, 59 HCLGEVF_RING_RX_HEAD_REG, 60 HCLGEVF_RING_RX_FBD_NUM_REG, 61 HCLGEVF_RING_RX_OFFSET_REG, 62 HCLGEVF_RING_RX_FBD_OFFSET_REG, 63 HCLGEVF_RING_RX_STASH_REG, 64 HCLGEVF_RING_RX_BD_ERR_REG, 65 HCLGEVF_RING_TX_ADDR_L_REG, 66 HCLGEVF_RING_TX_ADDR_H_REG, 67 HCLGEVF_RING_TX_BD_NUM_REG, 68 HCLGEVF_RING_TX_PRIORITY_REG, 69 HCLGEVF_RING_TX_TC_REG, 70 HCLGEVF_RING_TX_MERGE_EN_REG, 71 HCLGEVF_RING_TX_TAIL_REG, 72 HCLGEVF_RING_TX_HEAD_REG, 73 HCLGEVF_RING_TX_FBD_NUM_REG, 74 HCLGEVF_RING_TX_OFFSET_REG, 75 HCLGEVF_RING_TX_EBD_NUM_REG, 76 HCLGEVF_RING_TX_EBD_OFFSET_REG, 77 HCLGEVF_RING_TX_BD_ERR_REG, 78 HCLGEVF_RING_EN_REG}; 79 80 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 81 HCLGEVF_TQP_INTR_GL0_REG, 82 HCLGEVF_TQP_INTR_GL1_REG, 83 HCLGEVF_TQP_INTR_GL2_REG, 84 HCLGEVF_TQP_INTR_RL_REG}; 85 86 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 87 struct hnae3_handle *handle) 88 { 89 if (!handle->client) 90 return container_of(handle, struct hclgevf_dev, nic); 91 else if (handle->client->type == HNAE3_CLIENT_ROCE) 92 return container_of(handle, struct hclgevf_dev, roce); 93 else 94 return container_of(handle, struct hclgevf_dev, nic); 95 } 96 97 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 98 { 99 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 100 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 101 struct hclgevf_desc desc; 102 struct hclgevf_tqp *tqp; 103 int status; 104 int i; 105 106 for (i = 0; i < kinfo->num_tqps; i++) { 107 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 108 hclgevf_cmd_setup_basic_desc(&desc, 109 HCLGEVF_OPC_QUERY_RX_STATUS, 110 true); 111 112 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 113 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 114 if (status) { 115 dev_err(&hdev->pdev->dev, 116 "Query tqp stat fail, status = %d,queue = %d\n", 117 status, i); 118 return status; 119 } 120 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 121 le32_to_cpu(desc.data[1]); 122 123 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 124 true); 125 126 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 127 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 128 if (status) { 129 dev_err(&hdev->pdev->dev, 130 "Query tqp stat fail, status = %d,queue = %d\n", 131 status, i); 132 return status; 133 } 134 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 135 le32_to_cpu(desc.data[1]); 136 } 137 138 return 0; 139 } 140 141 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 142 { 143 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 144 struct hclgevf_tqp *tqp; 145 u64 *buff = data; 146 int i; 147 148 for (i = 0; i < kinfo->num_tqps; i++) { 149 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 150 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 151 } 152 for (i = 0; i < kinfo->num_tqps; i++) { 153 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 154 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 155 } 156 157 return buff; 158 } 159 160 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 161 { 162 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 163 164 return kinfo->num_tqps * 2; 165 } 166 167 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 168 { 169 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 170 u8 *buff = data; 171 int i = 0; 172 173 for (i = 0; i < kinfo->num_tqps; i++) { 174 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 175 struct hclgevf_tqp, q); 176 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 177 tqp->index); 178 buff += ETH_GSTRING_LEN; 179 } 180 181 for (i = 0; i < kinfo->num_tqps; i++) { 182 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 183 struct hclgevf_tqp, q); 184 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 185 tqp->index); 186 buff += ETH_GSTRING_LEN; 187 } 188 189 return buff; 190 } 191 192 static void hclgevf_update_stats(struct hnae3_handle *handle, 193 struct net_device_stats *net_stats) 194 { 195 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 196 int status; 197 198 status = hclgevf_tqps_update_stats(handle); 199 if (status) 200 dev_err(&hdev->pdev->dev, 201 "VF update of TQPS stats fail, status = %d.\n", 202 status); 203 } 204 205 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 206 { 207 if (strset == ETH_SS_TEST) 208 return -EOPNOTSUPP; 209 else if (strset == ETH_SS_STATS) 210 return hclgevf_tqps_get_sset_count(handle, strset); 211 212 return 0; 213 } 214 215 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 216 u8 *data) 217 { 218 u8 *p = (char *)data; 219 220 if (strset == ETH_SS_STATS) 221 p = hclgevf_tqps_get_strings(handle, p); 222 } 223 224 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 225 { 226 hclgevf_tqps_get_stats(handle, data); 227 } 228 229 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 230 { 231 u8 resp_msg; 232 int status; 233 234 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 235 true, &resp_msg, sizeof(u8)); 236 if (status) { 237 dev_err(&hdev->pdev->dev, 238 "VF request to get TC info from PF failed %d", 239 status); 240 return status; 241 } 242 243 hdev->hw_tc_map = resp_msg; 244 245 return 0; 246 } 247 248 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 249 { 250 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 251 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 252 int status; 253 254 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 255 true, resp_msg, 256 HCLGEVF_TQPS_RSS_INFO_LEN); 257 if (status) { 258 dev_err(&hdev->pdev->dev, 259 "VF request to get tqp info from PF failed %d", 260 status); 261 return status; 262 } 263 264 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 265 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 266 memcpy(&hdev->rx_buf_len, &resp_msg[4], sizeof(u16)); 267 268 return 0; 269 } 270 271 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 272 { 273 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 274 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 275 int ret; 276 277 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QDEPTH, 0, NULL, 0, 278 true, resp_msg, 279 HCLGEVF_TQPS_DEPTH_INFO_LEN); 280 if (ret) { 281 dev_err(&hdev->pdev->dev, 282 "VF request to get tqp depth info from PF failed %d", 283 ret); 284 return ret; 285 } 286 287 memcpy(&hdev->num_tx_desc, &resp_msg[0], sizeof(u16)); 288 memcpy(&hdev->num_rx_desc, &resp_msg[2], sizeof(u16)); 289 290 return 0; 291 } 292 293 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 294 { 295 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 296 u8 msg_data[2], resp_data[2]; 297 u16 qid_in_pf = 0; 298 int ret; 299 300 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 301 302 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, 303 2, true, resp_data, 2); 304 if (!ret) 305 qid_in_pf = *(u16 *)resp_data; 306 307 return qid_in_pf; 308 } 309 310 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 311 { 312 u8 resp_msg; 313 int ret; 314 315 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0, 316 true, &resp_msg, sizeof(resp_msg)); 317 if (ret) { 318 dev_err(&hdev->pdev->dev, 319 "VF request to get the pf port media type failed %d", 320 ret); 321 return ret; 322 } 323 324 hdev->hw.mac.media_type = resp_msg; 325 326 return 0; 327 } 328 329 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 330 { 331 struct hclgevf_tqp *tqp; 332 int i; 333 334 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 335 sizeof(struct hclgevf_tqp), GFP_KERNEL); 336 if (!hdev->htqp) 337 return -ENOMEM; 338 339 tqp = hdev->htqp; 340 341 for (i = 0; i < hdev->num_tqps; i++) { 342 tqp->dev = &hdev->pdev->dev; 343 tqp->index = i; 344 345 tqp->q.ae_algo = &ae_algovf; 346 tqp->q.buf_size = hdev->rx_buf_len; 347 tqp->q.tx_desc_num = hdev->num_tx_desc; 348 tqp->q.rx_desc_num = hdev->num_rx_desc; 349 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 350 i * HCLGEVF_TQP_REG_SIZE; 351 352 tqp++; 353 } 354 355 return 0; 356 } 357 358 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 359 { 360 struct hnae3_handle *nic = &hdev->nic; 361 struct hnae3_knic_private_info *kinfo; 362 u16 new_tqps = hdev->num_tqps; 363 int i; 364 365 kinfo = &nic->kinfo; 366 kinfo->num_tc = 0; 367 kinfo->num_tx_desc = hdev->num_tx_desc; 368 kinfo->num_rx_desc = hdev->num_rx_desc; 369 kinfo->rx_buf_len = hdev->rx_buf_len; 370 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 371 if (hdev->hw_tc_map & BIT(i)) 372 kinfo->num_tc++; 373 374 kinfo->rss_size 375 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 376 new_tqps = kinfo->rss_size * kinfo->num_tc; 377 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 378 379 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 380 sizeof(struct hnae3_queue *), GFP_KERNEL); 381 if (!kinfo->tqp) 382 return -ENOMEM; 383 384 for (i = 0; i < kinfo->num_tqps; i++) { 385 hdev->htqp[i].q.handle = &hdev->nic; 386 hdev->htqp[i].q.tqp_index = i; 387 kinfo->tqp[i] = &hdev->htqp[i].q; 388 } 389 390 return 0; 391 } 392 393 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 394 { 395 int status; 396 u8 resp_msg; 397 398 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 399 0, false, &resp_msg, sizeof(u8)); 400 if (status) 401 dev_err(&hdev->pdev->dev, 402 "VF failed to fetch link status(%d) from PF", status); 403 } 404 405 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 406 { 407 struct hnae3_handle *rhandle = &hdev->roce; 408 struct hnae3_handle *handle = &hdev->nic; 409 struct hnae3_client *rclient; 410 struct hnae3_client *client; 411 412 client = handle->client; 413 rclient = hdev->roce_client; 414 415 link_state = 416 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 417 418 if (link_state != hdev->hw.mac.link) { 419 client->ops->link_status_change(handle, !!link_state); 420 if (rclient && rclient->ops->link_status_change) 421 rclient->ops->link_status_change(rhandle, !!link_state); 422 hdev->hw.mac.link = link_state; 423 } 424 } 425 426 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 427 { 428 #define HCLGEVF_ADVERTISING 0 429 #define HCLGEVF_SUPPORTED 1 430 u8 send_msg; 431 u8 resp_msg; 432 433 send_msg = HCLGEVF_ADVERTISING; 434 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, 435 sizeof(u8), false, &resp_msg, sizeof(u8)); 436 send_msg = HCLGEVF_SUPPORTED; 437 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, 438 sizeof(u8), false, &resp_msg, sizeof(u8)); 439 } 440 441 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 442 { 443 struct hnae3_handle *nic = &hdev->nic; 444 int ret; 445 446 nic->ae_algo = &ae_algovf; 447 nic->pdev = hdev->pdev; 448 nic->numa_node_mask = hdev->numa_node_mask; 449 nic->flags |= HNAE3_SUPPORT_VF; 450 451 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 452 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 453 hdev->ae_dev->dev_type); 454 return -EINVAL; 455 } 456 457 ret = hclgevf_knic_setup(hdev); 458 if (ret) 459 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 460 ret); 461 return ret; 462 } 463 464 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 465 { 466 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 467 dev_warn(&hdev->pdev->dev, 468 "vector(vector_id %d) has been freed.\n", vector_id); 469 return; 470 } 471 472 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 473 hdev->num_msi_left += 1; 474 hdev->num_msi_used -= 1; 475 } 476 477 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 478 struct hnae3_vector_info *vector_info) 479 { 480 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 481 struct hnae3_vector_info *vector = vector_info; 482 int alloc = 0; 483 int i, j; 484 485 vector_num = min(hdev->num_msi_left, vector_num); 486 487 for (j = 0; j < vector_num; j++) { 488 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 489 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 490 vector->vector = pci_irq_vector(hdev->pdev, i); 491 vector->io_addr = hdev->hw.io_base + 492 HCLGEVF_VECTOR_REG_BASE + 493 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 494 hdev->vector_status[i] = 0; 495 hdev->vector_irq[i] = vector->vector; 496 497 vector++; 498 alloc++; 499 500 break; 501 } 502 } 503 } 504 hdev->num_msi_left -= alloc; 505 hdev->num_msi_used += alloc; 506 507 return alloc; 508 } 509 510 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 511 { 512 int i; 513 514 for (i = 0; i < hdev->num_msi; i++) 515 if (vector == hdev->vector_irq[i]) 516 return i; 517 518 return -EINVAL; 519 } 520 521 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 522 const u8 hfunc, const u8 *key) 523 { 524 struct hclgevf_rss_config_cmd *req; 525 struct hclgevf_desc desc; 526 int key_offset; 527 int key_size; 528 int ret; 529 530 req = (struct hclgevf_rss_config_cmd *)desc.data; 531 532 for (key_offset = 0; key_offset < 3; key_offset++) { 533 hclgevf_cmd_setup_basic_desc(&desc, 534 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 535 false); 536 537 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 538 req->hash_config |= 539 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 540 541 if (key_offset == 2) 542 key_size = 543 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 544 else 545 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 546 547 memcpy(req->hash_key, 548 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 549 550 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 551 if (ret) { 552 dev_err(&hdev->pdev->dev, 553 "Configure RSS config fail, status = %d\n", 554 ret); 555 return ret; 556 } 557 } 558 559 return 0; 560 } 561 562 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 563 { 564 return HCLGEVF_RSS_KEY_SIZE; 565 } 566 567 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 568 { 569 return HCLGEVF_RSS_IND_TBL_SIZE; 570 } 571 572 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 573 { 574 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 575 struct hclgevf_rss_indirection_table_cmd *req; 576 struct hclgevf_desc desc; 577 int status; 578 int i, j; 579 580 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 581 582 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 583 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 584 false); 585 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 586 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 587 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 588 req->rss_result[j] = 589 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 590 591 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 592 if (status) { 593 dev_err(&hdev->pdev->dev, 594 "VF failed(=%d) to set RSS indirection table\n", 595 status); 596 return status; 597 } 598 } 599 600 return 0; 601 } 602 603 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 604 { 605 struct hclgevf_rss_tc_mode_cmd *req; 606 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 607 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 608 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 609 struct hclgevf_desc desc; 610 u16 roundup_size; 611 int status; 612 int i; 613 614 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 615 616 roundup_size = roundup_pow_of_two(rss_size); 617 roundup_size = ilog2(roundup_size); 618 619 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 620 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 621 tc_size[i] = roundup_size; 622 tc_offset[i] = rss_size * i; 623 } 624 625 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 626 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 627 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 628 (tc_valid[i] & 0x1)); 629 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 630 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 631 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 632 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 633 } 634 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 635 if (status) 636 dev_err(&hdev->pdev->dev, 637 "VF failed(=%d) to set rss tc mode\n", status); 638 639 return status; 640 } 641 642 /* for revision 0x20, vf shared the same rss config with pf */ 643 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 644 { 645 #define HCLGEVF_RSS_MBX_RESP_LEN 8 646 647 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 648 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 649 u16 msg_num, hash_key_index; 650 u8 index; 651 int ret; 652 653 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 654 HCLGEVF_RSS_MBX_RESP_LEN; 655 for (index = 0; index < msg_num; index++) { 656 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_RSS_KEY, 0, 657 &index, sizeof(index), 658 true, resp_msg, 659 HCLGEVF_RSS_MBX_RESP_LEN); 660 if (ret) { 661 dev_err(&hdev->pdev->dev, 662 "VF get rss hash key from PF failed, ret=%d", 663 ret); 664 return ret; 665 } 666 667 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 668 if (index == msg_num - 1) 669 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 670 &resp_msg[0], 671 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 672 else 673 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 674 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 675 } 676 677 return 0; 678 } 679 680 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 681 u8 *hfunc) 682 { 683 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 684 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 685 int i, ret; 686 687 if (handle->pdev->revision >= 0x21) { 688 /* Get hash algorithm */ 689 if (hfunc) { 690 switch (rss_cfg->hash_algo) { 691 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 692 *hfunc = ETH_RSS_HASH_TOP; 693 break; 694 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 695 *hfunc = ETH_RSS_HASH_XOR; 696 break; 697 default: 698 *hfunc = ETH_RSS_HASH_UNKNOWN; 699 break; 700 } 701 } 702 703 /* Get the RSS Key required by the user */ 704 if (key) 705 memcpy(key, rss_cfg->rss_hash_key, 706 HCLGEVF_RSS_KEY_SIZE); 707 } else { 708 if (hfunc) 709 *hfunc = ETH_RSS_HASH_TOP; 710 if (key) { 711 ret = hclgevf_get_rss_hash_key(hdev); 712 if (ret) 713 return ret; 714 memcpy(key, rss_cfg->rss_hash_key, 715 HCLGEVF_RSS_KEY_SIZE); 716 } 717 } 718 719 if (indir) 720 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 721 indir[i] = rss_cfg->rss_indirection_tbl[i]; 722 723 return 0; 724 } 725 726 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 727 const u8 *key, const u8 hfunc) 728 { 729 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 730 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 731 int ret, i; 732 733 if (handle->pdev->revision >= 0x21) { 734 /* Set the RSS Hash Key if specififed by the user */ 735 if (key) { 736 switch (hfunc) { 737 case ETH_RSS_HASH_TOP: 738 rss_cfg->hash_algo = 739 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 740 break; 741 case ETH_RSS_HASH_XOR: 742 rss_cfg->hash_algo = 743 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 744 break; 745 case ETH_RSS_HASH_NO_CHANGE: 746 break; 747 default: 748 return -EINVAL; 749 } 750 751 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 752 key); 753 if (ret) 754 return ret; 755 756 /* Update the shadow RSS key with user specified qids */ 757 memcpy(rss_cfg->rss_hash_key, key, 758 HCLGEVF_RSS_KEY_SIZE); 759 } 760 } 761 762 /* update the shadow RSS table with user specified qids */ 763 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 764 rss_cfg->rss_indirection_tbl[i] = indir[i]; 765 766 /* update the hardware */ 767 return hclgevf_set_rss_indir_table(hdev); 768 } 769 770 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 771 { 772 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 773 774 if (nfc->data & RXH_L4_B_2_3) 775 hash_sets |= HCLGEVF_D_PORT_BIT; 776 else 777 hash_sets &= ~HCLGEVF_D_PORT_BIT; 778 779 if (nfc->data & RXH_IP_SRC) 780 hash_sets |= HCLGEVF_S_IP_BIT; 781 else 782 hash_sets &= ~HCLGEVF_S_IP_BIT; 783 784 if (nfc->data & RXH_IP_DST) 785 hash_sets |= HCLGEVF_D_IP_BIT; 786 else 787 hash_sets &= ~HCLGEVF_D_IP_BIT; 788 789 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 790 hash_sets |= HCLGEVF_V_TAG_BIT; 791 792 return hash_sets; 793 } 794 795 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 796 struct ethtool_rxnfc *nfc) 797 { 798 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 799 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 800 struct hclgevf_rss_input_tuple_cmd *req; 801 struct hclgevf_desc desc; 802 u8 tuple_sets; 803 int ret; 804 805 if (handle->pdev->revision == 0x20) 806 return -EOPNOTSUPP; 807 808 if (nfc->data & 809 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 810 return -EINVAL; 811 812 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 813 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 814 815 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 816 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 817 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 818 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 819 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 820 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 821 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 822 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 823 824 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 825 switch (nfc->flow_type) { 826 case TCP_V4_FLOW: 827 req->ipv4_tcp_en = tuple_sets; 828 break; 829 case TCP_V6_FLOW: 830 req->ipv6_tcp_en = tuple_sets; 831 break; 832 case UDP_V4_FLOW: 833 req->ipv4_udp_en = tuple_sets; 834 break; 835 case UDP_V6_FLOW: 836 req->ipv6_udp_en = tuple_sets; 837 break; 838 case SCTP_V4_FLOW: 839 req->ipv4_sctp_en = tuple_sets; 840 break; 841 case SCTP_V6_FLOW: 842 if ((nfc->data & RXH_L4_B_0_1) || 843 (nfc->data & RXH_L4_B_2_3)) 844 return -EINVAL; 845 846 req->ipv6_sctp_en = tuple_sets; 847 break; 848 case IPV4_FLOW: 849 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 850 break; 851 case IPV6_FLOW: 852 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 853 break; 854 default: 855 return -EINVAL; 856 } 857 858 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 859 if (ret) { 860 dev_err(&hdev->pdev->dev, 861 "Set rss tuple fail, status = %d\n", ret); 862 return ret; 863 } 864 865 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 866 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 867 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 868 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 869 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 870 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 871 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 872 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 873 return 0; 874 } 875 876 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 877 struct ethtool_rxnfc *nfc) 878 { 879 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 880 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 881 u8 tuple_sets; 882 883 if (handle->pdev->revision == 0x20) 884 return -EOPNOTSUPP; 885 886 nfc->data = 0; 887 888 switch (nfc->flow_type) { 889 case TCP_V4_FLOW: 890 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 891 break; 892 case UDP_V4_FLOW: 893 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 894 break; 895 case TCP_V6_FLOW: 896 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 897 break; 898 case UDP_V6_FLOW: 899 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 900 break; 901 case SCTP_V4_FLOW: 902 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 903 break; 904 case SCTP_V6_FLOW: 905 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 906 break; 907 case IPV4_FLOW: 908 case IPV6_FLOW: 909 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 910 break; 911 default: 912 return -EINVAL; 913 } 914 915 if (!tuple_sets) 916 return 0; 917 918 if (tuple_sets & HCLGEVF_D_PORT_BIT) 919 nfc->data |= RXH_L4_B_2_3; 920 if (tuple_sets & HCLGEVF_S_PORT_BIT) 921 nfc->data |= RXH_L4_B_0_1; 922 if (tuple_sets & HCLGEVF_D_IP_BIT) 923 nfc->data |= RXH_IP_DST; 924 if (tuple_sets & HCLGEVF_S_IP_BIT) 925 nfc->data |= RXH_IP_SRC; 926 927 return 0; 928 } 929 930 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 931 struct hclgevf_rss_cfg *rss_cfg) 932 { 933 struct hclgevf_rss_input_tuple_cmd *req; 934 struct hclgevf_desc desc; 935 int ret; 936 937 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 938 939 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 940 941 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 942 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 943 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 944 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 945 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 946 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 947 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 948 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 949 950 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 951 if (ret) 952 dev_err(&hdev->pdev->dev, 953 "Configure rss input fail, status = %d\n", ret); 954 return ret; 955 } 956 957 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 958 { 959 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 960 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 961 962 return rss_cfg->rss_size; 963 } 964 965 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 966 int vector_id, 967 struct hnae3_ring_chain_node *ring_chain) 968 { 969 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 970 struct hnae3_ring_chain_node *node; 971 struct hclge_mbx_vf_to_pf_cmd *req; 972 struct hclgevf_desc desc; 973 int i = 0; 974 int status; 975 u8 type; 976 977 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 978 979 for (node = ring_chain; node; node = node->next) { 980 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 981 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 982 983 if (i == 0) { 984 hclgevf_cmd_setup_basic_desc(&desc, 985 HCLGEVF_OPC_MBX_VF_TO_PF, 986 false); 987 type = en ? 988 HCLGE_MBX_MAP_RING_TO_VECTOR : 989 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 990 req->msg[0] = type; 991 req->msg[1] = vector_id; 992 } 993 994 req->msg[idx_offset] = 995 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 996 req->msg[idx_offset + 1] = node->tqp_index; 997 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 998 HNAE3_RING_GL_IDX_M, 999 HNAE3_RING_GL_IDX_S); 1000 1001 i++; 1002 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 1003 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 1004 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 1005 !node->next) { 1006 req->msg[2] = i; 1007 1008 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1009 if (status) { 1010 dev_err(&hdev->pdev->dev, 1011 "Map TQP fail, status is %d.\n", 1012 status); 1013 return status; 1014 } 1015 i = 0; 1016 hclgevf_cmd_setup_basic_desc(&desc, 1017 HCLGEVF_OPC_MBX_VF_TO_PF, 1018 false); 1019 req->msg[0] = type; 1020 req->msg[1] = vector_id; 1021 } 1022 } 1023 1024 return 0; 1025 } 1026 1027 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1028 struct hnae3_ring_chain_node *ring_chain) 1029 { 1030 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1031 int vector_id; 1032 1033 vector_id = hclgevf_get_vector_index(hdev, vector); 1034 if (vector_id < 0) { 1035 dev_err(&handle->pdev->dev, 1036 "Get vector index fail. ret =%d\n", vector_id); 1037 return vector_id; 1038 } 1039 1040 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1041 } 1042 1043 static int hclgevf_unmap_ring_from_vector( 1044 struct hnae3_handle *handle, 1045 int vector, 1046 struct hnae3_ring_chain_node *ring_chain) 1047 { 1048 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1049 int ret, vector_id; 1050 1051 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1052 return 0; 1053 1054 vector_id = hclgevf_get_vector_index(hdev, vector); 1055 if (vector_id < 0) { 1056 dev_err(&handle->pdev->dev, 1057 "Get vector index fail. ret =%d\n", vector_id); 1058 return vector_id; 1059 } 1060 1061 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1062 if (ret) 1063 dev_err(&handle->pdev->dev, 1064 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1065 vector_id, 1066 ret); 1067 1068 return ret; 1069 } 1070 1071 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1072 { 1073 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1074 int vector_id; 1075 1076 vector_id = hclgevf_get_vector_index(hdev, vector); 1077 if (vector_id < 0) { 1078 dev_err(&handle->pdev->dev, 1079 "hclgevf_put_vector get vector index fail. ret =%d\n", 1080 vector_id); 1081 return vector_id; 1082 } 1083 1084 hclgevf_free_vector(hdev, vector_id); 1085 1086 return 0; 1087 } 1088 1089 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1090 bool en_bc_pmc) 1091 { 1092 struct hclge_mbx_vf_to_pf_cmd *req; 1093 struct hclgevf_desc desc; 1094 int ret; 1095 1096 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 1097 1098 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 1099 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 1100 req->msg[1] = en_bc_pmc ? 1 : 0; 1101 1102 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1103 if (ret) 1104 dev_err(&hdev->pdev->dev, 1105 "Set promisc mode fail, status is %d.\n", ret); 1106 1107 return ret; 1108 } 1109 1110 static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc) 1111 { 1112 return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc); 1113 } 1114 1115 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 1116 int stream_id, bool enable) 1117 { 1118 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1119 struct hclgevf_desc desc; 1120 int status; 1121 1122 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1123 1124 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1125 false); 1126 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1127 req->stream_id = cpu_to_le16(stream_id); 1128 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 1129 1130 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1131 if (status) 1132 dev_err(&hdev->pdev->dev, 1133 "TQP enable fail, status =%d.\n", status); 1134 1135 return status; 1136 } 1137 1138 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1139 { 1140 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1141 struct hclgevf_tqp *tqp; 1142 int i; 1143 1144 for (i = 0; i < kinfo->num_tqps; i++) { 1145 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1146 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1147 } 1148 } 1149 1150 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1151 { 1152 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1153 1154 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1155 } 1156 1157 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1158 bool is_first) 1159 { 1160 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1161 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1162 u8 *new_mac_addr = (u8 *)p; 1163 u8 msg_data[ETH_ALEN * 2]; 1164 u16 subcode; 1165 int status; 1166 1167 ether_addr_copy(msg_data, new_mac_addr); 1168 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 1169 1170 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 1171 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1172 1173 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1174 subcode, msg_data, ETH_ALEN * 2, 1175 true, NULL, 0); 1176 if (!status) 1177 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1178 1179 return status; 1180 } 1181 1182 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1183 const unsigned char *addr) 1184 { 1185 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1186 1187 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1188 HCLGE_MBX_MAC_VLAN_UC_ADD, 1189 addr, ETH_ALEN, false, NULL, 0); 1190 } 1191 1192 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1193 const unsigned char *addr) 1194 { 1195 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1196 1197 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1198 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1199 addr, ETH_ALEN, false, NULL, 0); 1200 } 1201 1202 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1203 const unsigned char *addr) 1204 { 1205 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1206 1207 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1208 HCLGE_MBX_MAC_VLAN_MC_ADD, 1209 addr, ETH_ALEN, false, NULL, 0); 1210 } 1211 1212 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1213 const unsigned char *addr) 1214 { 1215 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1216 1217 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1218 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1219 addr, ETH_ALEN, false, NULL, 0); 1220 } 1221 1222 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1223 __be16 proto, u16 vlan_id, 1224 bool is_kill) 1225 { 1226 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1227 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1228 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1229 1230 if (vlan_id > 4095) 1231 return -EINVAL; 1232 1233 if (proto != htons(ETH_P_8021Q)) 1234 return -EPROTONOSUPPORT; 1235 1236 msg_data[0] = is_kill; 1237 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1238 memcpy(&msg_data[3], &proto, sizeof(proto)); 1239 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1240 HCLGE_MBX_VLAN_FILTER, msg_data, 1241 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 1242 } 1243 1244 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1245 { 1246 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1247 u8 msg_data; 1248 1249 msg_data = enable ? 1 : 0; 1250 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1251 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1252 1, false, NULL, 0); 1253 } 1254 1255 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1256 { 1257 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1258 u8 msg_data[2]; 1259 int ret; 1260 1261 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 1262 1263 /* disable vf queue before send queue reset msg to PF */ 1264 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1265 if (ret) 1266 return ret; 1267 1268 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1269 2, true, NULL, 0); 1270 } 1271 1272 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1273 { 1274 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1275 1276 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu, 1277 sizeof(new_mtu), true, NULL, 0); 1278 } 1279 1280 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1281 enum hnae3_reset_notify_type type) 1282 { 1283 struct hnae3_client *client = hdev->nic_client; 1284 struct hnae3_handle *handle = &hdev->nic; 1285 int ret; 1286 1287 if (!client->ops->reset_notify) 1288 return -EOPNOTSUPP; 1289 1290 ret = client->ops->reset_notify(handle, type); 1291 if (ret) 1292 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1293 type, ret); 1294 1295 return ret; 1296 } 1297 1298 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 1299 { 1300 struct hclgevf_dev *hdev = ae_dev->priv; 1301 1302 set_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1303 } 1304 1305 static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev, 1306 unsigned long delay_us, 1307 unsigned long wait_cnt) 1308 { 1309 unsigned long cnt = 0; 1310 1311 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && 1312 cnt++ < wait_cnt) 1313 usleep_range(delay_us, delay_us * 2); 1314 1315 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { 1316 dev_err(&hdev->pdev->dev, 1317 "flr wait timeout\n"); 1318 return -ETIMEDOUT; 1319 } 1320 1321 return 0; 1322 } 1323 1324 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1325 { 1326 #define HCLGEVF_RESET_WAIT_US 20000 1327 #define HCLGEVF_RESET_WAIT_CNT 2000 1328 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1329 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1330 1331 u32 val; 1332 int ret; 1333 1334 /* wait to check the hardware reset completion status */ 1335 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1336 dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val); 1337 1338 if (hdev->reset_type == HNAE3_FLR_RESET) 1339 return hclgevf_flr_poll_timeout(hdev, 1340 HCLGEVF_RESET_WAIT_US, 1341 HCLGEVF_RESET_WAIT_CNT); 1342 1343 ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val, 1344 !(val & HCLGEVF_RST_ING_BITS), 1345 HCLGEVF_RESET_WAIT_US, 1346 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1347 1348 /* hardware completion status should be available by this time */ 1349 if (ret) { 1350 dev_err(&hdev->pdev->dev, 1351 "could'nt get reset done status from h/w, timeout!\n"); 1352 return ret; 1353 } 1354 1355 /* we will wait a bit more to let reset of the stack to complete. This 1356 * might happen in case reset assertion was made by PF. Yes, this also 1357 * means we might end up waiting bit more even for VF reset. 1358 */ 1359 msleep(5000); 1360 1361 return 0; 1362 } 1363 1364 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1365 { 1366 int ret; 1367 1368 /* uninitialize the nic client */ 1369 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1370 if (ret) 1371 return ret; 1372 1373 /* re-initialize the hclge device */ 1374 ret = hclgevf_reset_hdev(hdev); 1375 if (ret) { 1376 dev_err(&hdev->pdev->dev, 1377 "hclge device re-init failed, VF is disabled!\n"); 1378 return ret; 1379 } 1380 1381 /* bring up the nic client again */ 1382 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1383 if (ret) 1384 return ret; 1385 1386 return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); 1387 } 1388 1389 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1390 { 1391 int ret = 0; 1392 1393 switch (hdev->reset_type) { 1394 case HNAE3_VF_FUNC_RESET: 1395 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1396 0, true, NULL, sizeof(u8)); 1397 break; 1398 case HNAE3_FLR_RESET: 1399 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1400 break; 1401 default: 1402 break; 1403 } 1404 1405 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1406 1407 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1408 hdev->reset_type, ret); 1409 1410 return ret; 1411 } 1412 1413 static int hclgevf_reset(struct hclgevf_dev *hdev) 1414 { 1415 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1416 int ret; 1417 1418 /* Initialize ae_dev reset status as well, in case enet layer wants to 1419 * know if device is undergoing reset 1420 */ 1421 ae_dev->reset_type = hdev->reset_type; 1422 hdev->reset_count++; 1423 rtnl_lock(); 1424 1425 /* bring down the nic to stop any ongoing TX/RX */ 1426 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1427 if (ret) 1428 goto err_reset_lock; 1429 1430 rtnl_unlock(); 1431 1432 ret = hclgevf_reset_prepare_wait(hdev); 1433 if (ret) 1434 goto err_reset; 1435 1436 /* check if VF could successfully fetch the hardware reset completion 1437 * status from the hardware 1438 */ 1439 ret = hclgevf_reset_wait(hdev); 1440 if (ret) { 1441 /* can't do much in this situation, will disable VF */ 1442 dev_err(&hdev->pdev->dev, 1443 "VF failed(=%d) to fetch H/W reset completion status\n", 1444 ret); 1445 goto err_reset; 1446 } 1447 1448 rtnl_lock(); 1449 1450 /* now, re-initialize the nic client and ae device*/ 1451 ret = hclgevf_reset_stack(hdev); 1452 if (ret) { 1453 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1454 goto err_reset_lock; 1455 } 1456 1457 /* bring up the nic to enable TX/RX again */ 1458 ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1459 if (ret) 1460 goto err_reset_lock; 1461 1462 rtnl_unlock(); 1463 1464 hdev->last_reset_time = jiffies; 1465 ae_dev->reset_type = HNAE3_NONE_RESET; 1466 1467 return ret; 1468 err_reset_lock: 1469 rtnl_unlock(); 1470 err_reset: 1471 /* When VF reset failed, only the higher level reset asserted by PF 1472 * can restore it, so re-initialize the command queue to receive 1473 * this higher reset event. 1474 */ 1475 hclgevf_cmd_init(hdev); 1476 dev_err(&hdev->pdev->dev, "failed to reset VF\n"); 1477 if (hclgevf_is_reset_pending(hdev)) 1478 hclgevf_reset_task_schedule(hdev); 1479 1480 return ret; 1481 } 1482 1483 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1484 unsigned long *addr) 1485 { 1486 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1487 1488 /* return the highest priority reset level amongst all */ 1489 if (test_bit(HNAE3_VF_RESET, addr)) { 1490 rst_level = HNAE3_VF_RESET; 1491 clear_bit(HNAE3_VF_RESET, addr); 1492 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1493 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1494 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1495 rst_level = HNAE3_VF_FULL_RESET; 1496 clear_bit(HNAE3_VF_FULL_RESET, addr); 1497 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1498 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1499 rst_level = HNAE3_VF_PF_FUNC_RESET; 1500 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1501 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1502 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1503 rst_level = HNAE3_VF_FUNC_RESET; 1504 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1505 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1506 rst_level = HNAE3_FLR_RESET; 1507 clear_bit(HNAE3_FLR_RESET, addr); 1508 } 1509 1510 return rst_level; 1511 } 1512 1513 static void hclgevf_reset_event(struct pci_dev *pdev, 1514 struct hnae3_handle *handle) 1515 { 1516 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1517 struct hclgevf_dev *hdev = ae_dev->priv; 1518 1519 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1520 1521 if (hdev->default_reset_request) 1522 hdev->reset_level = 1523 hclgevf_get_reset_level(hdev, 1524 &hdev->default_reset_request); 1525 else 1526 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1527 1528 /* reset of this VF requested */ 1529 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1530 hclgevf_reset_task_schedule(hdev); 1531 1532 hdev->last_reset_time = jiffies; 1533 } 1534 1535 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1536 enum hnae3_reset_type rst_type) 1537 { 1538 struct hclgevf_dev *hdev = ae_dev->priv; 1539 1540 set_bit(rst_type, &hdev->default_reset_request); 1541 } 1542 1543 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1544 { 1545 #define HCLGEVF_FLR_WAIT_MS 100 1546 #define HCLGEVF_FLR_WAIT_CNT 50 1547 struct hclgevf_dev *hdev = ae_dev->priv; 1548 int cnt = 0; 1549 1550 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1551 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1552 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); 1553 hclgevf_reset_event(hdev->pdev, NULL); 1554 1555 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && 1556 cnt++ < HCLGEVF_FLR_WAIT_CNT) 1557 msleep(HCLGEVF_FLR_WAIT_MS); 1558 1559 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) 1560 dev_err(&hdev->pdev->dev, 1561 "flr wait down timeout: %d\n", cnt); 1562 } 1563 1564 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1565 { 1566 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1567 1568 return hdev->fw_version; 1569 } 1570 1571 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1572 { 1573 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1574 1575 vector->vector_irq = pci_irq_vector(hdev->pdev, 1576 HCLGEVF_MISC_VECTOR_NUM); 1577 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1578 /* vector status always valid for Vector 0 */ 1579 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1580 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1581 1582 hdev->num_msi_left -= 1; 1583 hdev->num_msi_used += 1; 1584 } 1585 1586 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1587 { 1588 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) { 1589 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1590 schedule_work(&hdev->rst_service_task); 1591 } 1592 } 1593 1594 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1595 { 1596 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1597 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1598 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1599 schedule_work(&hdev->mbx_service_task); 1600 } 1601 } 1602 1603 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1604 { 1605 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1606 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1607 schedule_work(&hdev->service_task); 1608 } 1609 1610 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1611 { 1612 /* if we have any pending mailbox event then schedule the mbx task */ 1613 if (hdev->mbx_event_pending) 1614 hclgevf_mbx_task_schedule(hdev); 1615 1616 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1617 hclgevf_reset_task_schedule(hdev); 1618 } 1619 1620 static void hclgevf_service_timer(struct timer_list *t) 1621 { 1622 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1623 1624 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1625 1626 hclgevf_task_schedule(hdev); 1627 } 1628 1629 static void hclgevf_reset_service_task(struct work_struct *work) 1630 { 1631 struct hclgevf_dev *hdev = 1632 container_of(work, struct hclgevf_dev, rst_service_task); 1633 int ret; 1634 1635 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1636 return; 1637 1638 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1639 1640 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1641 &hdev->reset_state)) { 1642 /* PF has initmated that it is about to reset the hardware. 1643 * We now have to poll & check if harware has actually completed 1644 * the reset sequence. On hardware reset completion, VF needs to 1645 * reset the client and ae device. 1646 */ 1647 hdev->reset_attempts = 0; 1648 1649 hdev->last_reset_time = jiffies; 1650 while ((hdev->reset_type = 1651 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 1652 != HNAE3_NONE_RESET) { 1653 ret = hclgevf_reset(hdev); 1654 if (ret) 1655 dev_err(&hdev->pdev->dev, 1656 "VF stack reset failed %d.\n", ret); 1657 } 1658 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1659 &hdev->reset_state)) { 1660 /* we could be here when either of below happens: 1661 * 1. reset was initiated due to watchdog timeout due to 1662 * a. IMP was earlier reset and our TX got choked down and 1663 * which resulted in watchdog reacting and inducing VF 1664 * reset. This also means our cmdq would be unreliable. 1665 * b. problem in TX due to other lower layer(example link 1666 * layer not functioning properly etc.) 1667 * 2. VF reset might have been initiated due to some config 1668 * change. 1669 * 1670 * NOTE: Theres no clear way to detect above cases than to react 1671 * to the response of PF for this reset request. PF will ack the 1672 * 1b and 2. cases but we will not get any intimation about 1a 1673 * from PF as cmdq would be in unreliable state i.e. mailbox 1674 * communication between PF and VF would be broken. 1675 */ 1676 1677 /* if we are never geting into pending state it means either: 1678 * 1. PF is not receiving our request which could be due to IMP 1679 * reset 1680 * 2. PF is screwed 1681 * We cannot do much for 2. but to check first we can try reset 1682 * our PCIe + stack and see if it alleviates the problem. 1683 */ 1684 if (hdev->reset_attempts > 3) { 1685 /* prepare for full reset of stack + pcie interface */ 1686 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1687 1688 /* "defer" schedule the reset task again */ 1689 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1690 } else { 1691 hdev->reset_attempts++; 1692 1693 set_bit(hdev->reset_level, &hdev->reset_pending); 1694 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1695 } 1696 hclgevf_reset_task_schedule(hdev); 1697 } 1698 1699 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1700 } 1701 1702 static void hclgevf_mailbox_service_task(struct work_struct *work) 1703 { 1704 struct hclgevf_dev *hdev; 1705 1706 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1707 1708 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1709 return; 1710 1711 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1712 1713 hclgevf_mbx_async_handler(hdev); 1714 1715 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1716 } 1717 1718 static void hclgevf_keep_alive_timer(struct timer_list *t) 1719 { 1720 struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); 1721 1722 schedule_work(&hdev->keep_alive_task); 1723 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 1724 } 1725 1726 static void hclgevf_keep_alive_task(struct work_struct *work) 1727 { 1728 struct hclgevf_dev *hdev; 1729 u8 respmsg; 1730 int ret; 1731 1732 hdev = container_of(work, struct hclgevf_dev, keep_alive_task); 1733 1734 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1735 return; 1736 1737 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, 1738 0, false, &respmsg, sizeof(u8)); 1739 if (ret) 1740 dev_err(&hdev->pdev->dev, 1741 "VF sends keep alive cmd failed(=%d)\n", ret); 1742 } 1743 1744 static void hclgevf_service_task(struct work_struct *work) 1745 { 1746 struct hclgevf_dev *hdev; 1747 1748 hdev = container_of(work, struct hclgevf_dev, service_task); 1749 1750 /* request the link status from the PF. PF would be able to tell VF 1751 * about such updates in future so we might remove this later 1752 */ 1753 hclgevf_request_link_info(hdev); 1754 1755 hclgevf_update_link_mode(hdev); 1756 1757 hclgevf_deferred_task_schedule(hdev); 1758 1759 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1760 } 1761 1762 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1763 { 1764 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1765 } 1766 1767 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1768 u32 *clearval) 1769 { 1770 u32 cmdq_src_reg, rst_ing_reg; 1771 1772 /* fetch the events from their corresponding regs */ 1773 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1774 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1775 1776 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) { 1777 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1778 dev_info(&hdev->pdev->dev, 1779 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1780 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1781 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1782 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1783 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); 1784 *clearval = cmdq_src_reg; 1785 return HCLGEVF_VECTOR0_EVENT_RST; 1786 } 1787 1788 /* check for vector0 mailbox(=CMDQ RX) event source */ 1789 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1790 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1791 *clearval = cmdq_src_reg; 1792 return HCLGEVF_VECTOR0_EVENT_MBX; 1793 } 1794 1795 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1796 1797 return HCLGEVF_VECTOR0_EVENT_OTHER; 1798 } 1799 1800 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1801 { 1802 writel(en ? 1 : 0, vector->addr); 1803 } 1804 1805 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1806 { 1807 enum hclgevf_evt_cause event_cause; 1808 struct hclgevf_dev *hdev = data; 1809 u32 clearval; 1810 1811 hclgevf_enable_vector(&hdev->misc_vector, false); 1812 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 1813 1814 switch (event_cause) { 1815 case HCLGEVF_VECTOR0_EVENT_RST: 1816 hclgevf_reset_task_schedule(hdev); 1817 break; 1818 case HCLGEVF_VECTOR0_EVENT_MBX: 1819 hclgevf_mbx_handler(hdev); 1820 break; 1821 default: 1822 break; 1823 } 1824 1825 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 1826 hclgevf_clear_event_cause(hdev, clearval); 1827 hclgevf_enable_vector(&hdev->misc_vector, true); 1828 } 1829 1830 return IRQ_HANDLED; 1831 } 1832 1833 static int hclgevf_configure(struct hclgevf_dev *hdev) 1834 { 1835 int ret; 1836 1837 /* get queue configuration from PF */ 1838 ret = hclgevf_get_queue_info(hdev); 1839 if (ret) 1840 return ret; 1841 1842 /* get queue depth info from PF */ 1843 ret = hclgevf_get_queue_depth(hdev); 1844 if (ret) 1845 return ret; 1846 1847 ret = hclgevf_get_pf_media_type(hdev); 1848 if (ret) 1849 return ret; 1850 1851 /* get tc configuration from PF */ 1852 return hclgevf_get_tc_info(hdev); 1853 } 1854 1855 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1856 { 1857 struct pci_dev *pdev = ae_dev->pdev; 1858 struct hclgevf_dev *hdev; 1859 1860 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1861 if (!hdev) 1862 return -ENOMEM; 1863 1864 hdev->pdev = pdev; 1865 hdev->ae_dev = ae_dev; 1866 ae_dev->priv = hdev; 1867 1868 return 0; 1869 } 1870 1871 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1872 { 1873 struct hnae3_handle *roce = &hdev->roce; 1874 struct hnae3_handle *nic = &hdev->nic; 1875 1876 roce->rinfo.num_vectors = hdev->num_roce_msix; 1877 1878 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1879 hdev->num_msi_left == 0) 1880 return -EINVAL; 1881 1882 roce->rinfo.base_vector = hdev->roce_base_vector; 1883 1884 roce->rinfo.netdev = nic->kinfo.netdev; 1885 roce->rinfo.roce_io_base = hdev->hw.io_base; 1886 1887 roce->pdev = nic->pdev; 1888 roce->ae_algo = nic->ae_algo; 1889 roce->numa_node_mask = nic->numa_node_mask; 1890 1891 return 0; 1892 } 1893 1894 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 1895 { 1896 struct hclgevf_cfg_gro_status_cmd *req; 1897 struct hclgevf_desc desc; 1898 int ret; 1899 1900 if (!hnae3_dev_gro_supported(hdev)) 1901 return 0; 1902 1903 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 1904 false); 1905 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 1906 1907 req->gro_en = cpu_to_le16(en ? 1 : 0); 1908 1909 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1910 if (ret) 1911 dev_err(&hdev->pdev->dev, 1912 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 1913 1914 return ret; 1915 } 1916 1917 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1918 { 1919 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1920 int i, ret; 1921 1922 rss_cfg->rss_size = hdev->rss_size_max; 1923 1924 if (hdev->pdev->revision >= 0x21) { 1925 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 1926 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 1927 HCLGEVF_RSS_KEY_SIZE); 1928 1929 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 1930 rss_cfg->rss_hash_key); 1931 if (ret) 1932 return ret; 1933 1934 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 1935 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1936 rss_cfg->rss_tuple_sets.ipv4_udp_en = 1937 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1938 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 1939 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1940 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 1941 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1942 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 1943 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1944 rss_cfg->rss_tuple_sets.ipv6_udp_en = 1945 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1946 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 1947 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1948 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 1949 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1950 1951 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 1952 if (ret) 1953 return ret; 1954 1955 } 1956 1957 /* Initialize RSS indirect table for each vport */ 1958 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1959 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1960 1961 ret = hclgevf_set_rss_indir_table(hdev); 1962 if (ret) 1963 return ret; 1964 1965 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1966 } 1967 1968 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1969 { 1970 /* other vlan config(like, VLAN TX/RX offload) would also be added 1971 * here later 1972 */ 1973 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1974 false); 1975 } 1976 1977 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 1978 { 1979 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1980 1981 if (enable) { 1982 mod_timer(&hdev->service_timer, jiffies + HZ); 1983 } else { 1984 del_timer_sync(&hdev->service_timer); 1985 cancel_work_sync(&hdev->service_task); 1986 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1987 } 1988 } 1989 1990 static int hclgevf_ae_start(struct hnae3_handle *handle) 1991 { 1992 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1993 1994 /* reset tqp stats */ 1995 hclgevf_reset_tqp_stats(handle); 1996 1997 hclgevf_request_link_info(hdev); 1998 1999 hclgevf_update_link_mode(hdev); 2000 2001 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2002 2003 return 0; 2004 } 2005 2006 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2007 { 2008 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2009 int i; 2010 2011 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2012 2013 for (i = 0; i < handle->kinfo.num_tqps; i++) 2014 hclgevf_reset_tqp(handle, i); 2015 2016 /* reset tqp stats */ 2017 hclgevf_reset_tqp_stats(handle); 2018 hclgevf_update_link_status(hdev, 0); 2019 } 2020 2021 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2022 { 2023 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2024 u8 msg_data; 2025 2026 msg_data = alive ? 1 : 0; 2027 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE, 2028 0, &msg_data, 1, false, NULL, 0); 2029 } 2030 2031 static int hclgevf_client_start(struct hnae3_handle *handle) 2032 { 2033 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2034 2035 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 2036 return hclgevf_set_alive(handle, true); 2037 } 2038 2039 static void hclgevf_client_stop(struct hnae3_handle *handle) 2040 { 2041 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2042 int ret; 2043 2044 ret = hclgevf_set_alive(handle, false); 2045 if (ret) 2046 dev_warn(&hdev->pdev->dev, 2047 "%s failed %d\n", __func__, ret); 2048 2049 del_timer_sync(&hdev->keep_alive_timer); 2050 cancel_work_sync(&hdev->keep_alive_task); 2051 } 2052 2053 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2054 { 2055 /* setup tasks for the MBX */ 2056 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 2057 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2058 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2059 2060 /* setup tasks for service timer */ 2061 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 2062 2063 INIT_WORK(&hdev->service_task, hclgevf_service_task); 2064 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 2065 2066 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 2067 2068 mutex_init(&hdev->mbx_resp.mbx_mutex); 2069 2070 /* bring the device down */ 2071 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2072 } 2073 2074 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2075 { 2076 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2077 2078 if (hdev->service_timer.function) 2079 del_timer_sync(&hdev->service_timer); 2080 if (hdev->service_task.func) 2081 cancel_work_sync(&hdev->service_task); 2082 if (hdev->mbx_service_task.func) 2083 cancel_work_sync(&hdev->mbx_service_task); 2084 if (hdev->rst_service_task.func) 2085 cancel_work_sync(&hdev->rst_service_task); 2086 2087 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2088 } 2089 2090 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2091 { 2092 struct pci_dev *pdev = hdev->pdev; 2093 int vectors; 2094 int i; 2095 2096 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 2097 vectors = pci_alloc_irq_vectors(pdev, 2098 hdev->roce_base_msix_offset + 1, 2099 hdev->num_msi, 2100 PCI_IRQ_MSIX); 2101 else 2102 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 2103 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2104 2105 if (vectors < 0) { 2106 dev_err(&pdev->dev, 2107 "failed(%d) to allocate MSI/MSI-X vectors\n", 2108 vectors); 2109 return vectors; 2110 } 2111 if (vectors < hdev->num_msi) 2112 dev_warn(&hdev->pdev->dev, 2113 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2114 hdev->num_msi, vectors); 2115 2116 hdev->num_msi = vectors; 2117 hdev->num_msi_left = vectors; 2118 hdev->base_msi_vector = pdev->irq; 2119 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2120 2121 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2122 sizeof(u16), GFP_KERNEL); 2123 if (!hdev->vector_status) { 2124 pci_free_irq_vectors(pdev); 2125 return -ENOMEM; 2126 } 2127 2128 for (i = 0; i < hdev->num_msi; i++) 2129 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2130 2131 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2132 sizeof(int), GFP_KERNEL); 2133 if (!hdev->vector_irq) { 2134 devm_kfree(&pdev->dev, hdev->vector_status); 2135 pci_free_irq_vectors(pdev); 2136 return -ENOMEM; 2137 } 2138 2139 return 0; 2140 } 2141 2142 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2143 { 2144 struct pci_dev *pdev = hdev->pdev; 2145 2146 devm_kfree(&pdev->dev, hdev->vector_status); 2147 devm_kfree(&pdev->dev, hdev->vector_irq); 2148 pci_free_irq_vectors(pdev); 2149 } 2150 2151 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2152 { 2153 int ret = 0; 2154 2155 hclgevf_get_misc_vector(hdev); 2156 2157 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2158 0, "hclgevf_cmd", hdev); 2159 if (ret) { 2160 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2161 hdev->misc_vector.vector_irq); 2162 return ret; 2163 } 2164 2165 hclgevf_clear_event_cause(hdev, 0); 2166 2167 /* enable misc. vector(vector 0) */ 2168 hclgevf_enable_vector(&hdev->misc_vector, true); 2169 2170 return ret; 2171 } 2172 2173 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2174 { 2175 /* disable misc vector(vector 0) */ 2176 hclgevf_enable_vector(&hdev->misc_vector, false); 2177 synchronize_irq(hdev->misc_vector.vector_irq); 2178 free_irq(hdev->misc_vector.vector_irq, hdev); 2179 hclgevf_free_vector(hdev, 0); 2180 } 2181 2182 static int hclgevf_init_client_instance(struct hnae3_client *client, 2183 struct hnae3_ae_dev *ae_dev) 2184 { 2185 struct hclgevf_dev *hdev = ae_dev->priv; 2186 int ret; 2187 2188 switch (client->type) { 2189 case HNAE3_CLIENT_KNIC: 2190 hdev->nic_client = client; 2191 hdev->nic.client = client; 2192 2193 ret = client->ops->init_instance(&hdev->nic); 2194 if (ret) 2195 goto clear_nic; 2196 2197 hnae3_set_client_init_flag(client, ae_dev, 1); 2198 2199 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 2200 struct hnae3_client *rc = hdev->roce_client; 2201 2202 ret = hclgevf_init_roce_base_info(hdev); 2203 if (ret) 2204 goto clear_roce; 2205 ret = rc->ops->init_instance(&hdev->roce); 2206 if (ret) 2207 goto clear_roce; 2208 2209 hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 2210 1); 2211 } 2212 break; 2213 case HNAE3_CLIENT_UNIC: 2214 hdev->nic_client = client; 2215 hdev->nic.client = client; 2216 2217 ret = client->ops->init_instance(&hdev->nic); 2218 if (ret) 2219 goto clear_nic; 2220 2221 hnae3_set_client_init_flag(client, ae_dev, 1); 2222 break; 2223 case HNAE3_CLIENT_ROCE: 2224 if (hnae3_dev_roce_supported(hdev)) { 2225 hdev->roce_client = client; 2226 hdev->roce.client = client; 2227 } 2228 2229 if (hdev->roce_client && hdev->nic_client) { 2230 ret = hclgevf_init_roce_base_info(hdev); 2231 if (ret) 2232 goto clear_roce; 2233 2234 ret = client->ops->init_instance(&hdev->roce); 2235 if (ret) 2236 goto clear_roce; 2237 } 2238 2239 hnae3_set_client_init_flag(client, ae_dev, 1); 2240 break; 2241 default: 2242 return -EINVAL; 2243 } 2244 2245 return 0; 2246 2247 clear_nic: 2248 hdev->nic_client = NULL; 2249 hdev->nic.client = NULL; 2250 return ret; 2251 clear_roce: 2252 hdev->roce_client = NULL; 2253 hdev->roce.client = NULL; 2254 return ret; 2255 } 2256 2257 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2258 struct hnae3_ae_dev *ae_dev) 2259 { 2260 struct hclgevf_dev *hdev = ae_dev->priv; 2261 2262 /* un-init roce, if it exists */ 2263 if (hdev->roce_client) { 2264 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2265 hdev->roce_client = NULL; 2266 hdev->roce.client = NULL; 2267 } 2268 2269 /* un-init nic/unic, if this was not called by roce client */ 2270 if (client->ops->uninit_instance && hdev->nic_client && 2271 client->type != HNAE3_CLIENT_ROCE) { 2272 client->ops->uninit_instance(&hdev->nic, 0); 2273 hdev->nic_client = NULL; 2274 hdev->nic.client = NULL; 2275 } 2276 } 2277 2278 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2279 { 2280 struct pci_dev *pdev = hdev->pdev; 2281 struct hclgevf_hw *hw; 2282 int ret; 2283 2284 ret = pci_enable_device(pdev); 2285 if (ret) { 2286 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2287 return ret; 2288 } 2289 2290 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2291 if (ret) { 2292 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2293 goto err_disable_device; 2294 } 2295 2296 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2297 if (ret) { 2298 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2299 goto err_disable_device; 2300 } 2301 2302 pci_set_master(pdev); 2303 hw = &hdev->hw; 2304 hw->hdev = hdev; 2305 hw->io_base = pci_iomap(pdev, 2, 0); 2306 if (!hw->io_base) { 2307 dev_err(&pdev->dev, "can't map configuration register space\n"); 2308 ret = -ENOMEM; 2309 goto err_clr_master; 2310 } 2311 2312 return 0; 2313 2314 err_clr_master: 2315 pci_clear_master(pdev); 2316 pci_release_regions(pdev); 2317 err_disable_device: 2318 pci_disable_device(pdev); 2319 2320 return ret; 2321 } 2322 2323 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2324 { 2325 struct pci_dev *pdev = hdev->pdev; 2326 2327 pci_iounmap(pdev, hdev->hw.io_base); 2328 pci_clear_master(pdev); 2329 pci_release_regions(pdev); 2330 pci_disable_device(pdev); 2331 } 2332 2333 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2334 { 2335 struct hclgevf_query_res_cmd *req; 2336 struct hclgevf_desc desc; 2337 int ret; 2338 2339 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2340 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2341 if (ret) { 2342 dev_err(&hdev->pdev->dev, 2343 "query vf resource failed, ret = %d.\n", ret); 2344 return ret; 2345 } 2346 2347 req = (struct hclgevf_query_res_cmd *)desc.data; 2348 2349 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 2350 hdev->roce_base_msix_offset = 2351 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 2352 HCLGEVF_MSIX_OFT_ROCEE_M, 2353 HCLGEVF_MSIX_OFT_ROCEE_S); 2354 hdev->num_roce_msix = 2355 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2356 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2357 2358 /* VF should have NIC vectors and Roce vectors, NIC vectors 2359 * are queued before Roce vectors. The offset is fixed to 64. 2360 */ 2361 hdev->num_msi = hdev->num_roce_msix + 2362 hdev->roce_base_msix_offset; 2363 } else { 2364 hdev->num_msi = 2365 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2366 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2367 } 2368 2369 return 0; 2370 } 2371 2372 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2373 { 2374 struct pci_dev *pdev = hdev->pdev; 2375 int ret = 0; 2376 2377 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2378 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2379 hclgevf_misc_irq_uninit(hdev); 2380 hclgevf_uninit_msi(hdev); 2381 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2382 } 2383 2384 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2385 pci_set_master(pdev); 2386 ret = hclgevf_init_msi(hdev); 2387 if (ret) { 2388 dev_err(&pdev->dev, 2389 "failed(%d) to init MSI/MSI-X\n", ret); 2390 return ret; 2391 } 2392 2393 ret = hclgevf_misc_irq_init(hdev); 2394 if (ret) { 2395 hclgevf_uninit_msi(hdev); 2396 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2397 ret); 2398 return ret; 2399 } 2400 2401 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2402 } 2403 2404 return ret; 2405 } 2406 2407 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2408 { 2409 struct pci_dev *pdev = hdev->pdev; 2410 int ret; 2411 2412 ret = hclgevf_pci_reset(hdev); 2413 if (ret) { 2414 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2415 return ret; 2416 } 2417 2418 ret = hclgevf_cmd_init(hdev); 2419 if (ret) { 2420 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2421 return ret; 2422 } 2423 2424 ret = hclgevf_rss_init_hw(hdev); 2425 if (ret) { 2426 dev_err(&hdev->pdev->dev, 2427 "failed(%d) to initialize RSS\n", ret); 2428 return ret; 2429 } 2430 2431 ret = hclgevf_config_gro(hdev, true); 2432 if (ret) 2433 return ret; 2434 2435 ret = hclgevf_init_vlan_config(hdev); 2436 if (ret) { 2437 dev_err(&hdev->pdev->dev, 2438 "failed(%d) to initialize VLAN config\n", ret); 2439 return ret; 2440 } 2441 2442 dev_info(&hdev->pdev->dev, "Reset done\n"); 2443 2444 return 0; 2445 } 2446 2447 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2448 { 2449 struct pci_dev *pdev = hdev->pdev; 2450 int ret; 2451 2452 ret = hclgevf_pci_init(hdev); 2453 if (ret) { 2454 dev_err(&pdev->dev, "PCI initialization failed\n"); 2455 return ret; 2456 } 2457 2458 ret = hclgevf_cmd_queue_init(hdev); 2459 if (ret) { 2460 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); 2461 goto err_cmd_queue_init; 2462 } 2463 2464 ret = hclgevf_cmd_init(hdev); 2465 if (ret) 2466 goto err_cmd_init; 2467 2468 /* Get vf resource */ 2469 ret = hclgevf_query_vf_resource(hdev); 2470 if (ret) { 2471 dev_err(&hdev->pdev->dev, 2472 "Query vf status error, ret = %d.\n", ret); 2473 goto err_cmd_init; 2474 } 2475 2476 ret = hclgevf_init_msi(hdev); 2477 if (ret) { 2478 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2479 goto err_cmd_init; 2480 } 2481 2482 hclgevf_state_init(hdev); 2483 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2484 2485 ret = hclgevf_misc_irq_init(hdev); 2486 if (ret) { 2487 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2488 ret); 2489 goto err_misc_irq_init; 2490 } 2491 2492 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2493 2494 ret = hclgevf_configure(hdev); 2495 if (ret) { 2496 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2497 goto err_config; 2498 } 2499 2500 ret = hclgevf_alloc_tqps(hdev); 2501 if (ret) { 2502 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2503 goto err_config; 2504 } 2505 2506 ret = hclgevf_set_handle_info(hdev); 2507 if (ret) { 2508 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2509 goto err_config; 2510 } 2511 2512 ret = hclgevf_config_gro(hdev, true); 2513 if (ret) 2514 goto err_config; 2515 2516 /* vf is not allowed to enable unicast/multicast promisc mode. 2517 * For revision 0x20, default to disable broadcast promisc mode, 2518 * firmware makes sure broadcast packets can be accepted. 2519 * For revision 0x21, default to enable broadcast promisc mode. 2520 */ 2521 ret = hclgevf_set_promisc_mode(hdev, true); 2522 if (ret) 2523 goto err_config; 2524 2525 /* Initialize RSS for this VF */ 2526 ret = hclgevf_rss_init_hw(hdev); 2527 if (ret) { 2528 dev_err(&hdev->pdev->dev, 2529 "failed(%d) to initialize RSS\n", ret); 2530 goto err_config; 2531 } 2532 2533 ret = hclgevf_init_vlan_config(hdev); 2534 if (ret) { 2535 dev_err(&hdev->pdev->dev, 2536 "failed(%d) to initialize VLAN config\n", ret); 2537 goto err_config; 2538 } 2539 2540 hdev->last_reset_time = jiffies; 2541 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 2542 2543 return 0; 2544 2545 err_config: 2546 hclgevf_misc_irq_uninit(hdev); 2547 err_misc_irq_init: 2548 hclgevf_state_uninit(hdev); 2549 hclgevf_uninit_msi(hdev); 2550 err_cmd_init: 2551 hclgevf_cmd_uninit(hdev); 2552 err_cmd_queue_init: 2553 hclgevf_pci_uninit(hdev); 2554 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2555 return ret; 2556 } 2557 2558 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2559 { 2560 hclgevf_state_uninit(hdev); 2561 2562 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2563 hclgevf_misc_irq_uninit(hdev); 2564 hclgevf_uninit_msi(hdev); 2565 } 2566 2567 hclgevf_pci_uninit(hdev); 2568 hclgevf_cmd_uninit(hdev); 2569 } 2570 2571 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2572 { 2573 struct pci_dev *pdev = ae_dev->pdev; 2574 struct hclgevf_dev *hdev; 2575 int ret; 2576 2577 ret = hclgevf_alloc_hdev(ae_dev); 2578 if (ret) { 2579 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2580 return ret; 2581 } 2582 2583 ret = hclgevf_init_hdev(ae_dev->priv); 2584 if (ret) { 2585 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2586 return ret; 2587 } 2588 2589 hdev = ae_dev->priv; 2590 timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0); 2591 INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task); 2592 2593 return 0; 2594 } 2595 2596 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2597 { 2598 struct hclgevf_dev *hdev = ae_dev->priv; 2599 2600 hclgevf_uninit_hdev(hdev); 2601 ae_dev->priv = NULL; 2602 } 2603 2604 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2605 { 2606 struct hnae3_handle *nic = &hdev->nic; 2607 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2608 2609 return min_t(u32, hdev->rss_size_max, 2610 hdev->num_tqps / kinfo->num_tc); 2611 } 2612 2613 /** 2614 * hclgevf_get_channels - Get the current channels enabled and max supported. 2615 * @handle: hardware information for network interface 2616 * @ch: ethtool channels structure 2617 * 2618 * We don't support separate tx and rx queues as channels. The other count 2619 * represents how many queues are being used for control. max_combined counts 2620 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2621 * q_vectors since we support a lot more queue pairs than q_vectors. 2622 **/ 2623 static void hclgevf_get_channels(struct hnae3_handle *handle, 2624 struct ethtool_channels *ch) 2625 { 2626 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2627 2628 ch->max_combined = hclgevf_get_max_channels(hdev); 2629 ch->other_count = 0; 2630 ch->max_other = 0; 2631 ch->combined_count = handle->kinfo.rss_size; 2632 } 2633 2634 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2635 u16 *alloc_tqps, u16 *max_rss_size) 2636 { 2637 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2638 2639 *alloc_tqps = hdev->num_tqps; 2640 *max_rss_size = hdev->rss_size_max; 2641 } 2642 2643 static int hclgevf_get_status(struct hnae3_handle *handle) 2644 { 2645 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2646 2647 return hdev->hw.mac.link; 2648 } 2649 2650 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2651 u8 *auto_neg, u32 *speed, 2652 u8 *duplex) 2653 { 2654 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2655 2656 if (speed) 2657 *speed = hdev->hw.mac.speed; 2658 if (duplex) 2659 *duplex = hdev->hw.mac.duplex; 2660 if (auto_neg) 2661 *auto_neg = AUTONEG_DISABLE; 2662 } 2663 2664 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 2665 u8 duplex) 2666 { 2667 hdev->hw.mac.speed = speed; 2668 hdev->hw.mac.duplex = duplex; 2669 } 2670 2671 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 2672 { 2673 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2674 2675 return hclgevf_config_gro(hdev, enable); 2676 } 2677 2678 static void hclgevf_get_media_type(struct hnae3_handle *handle, 2679 u8 *media_type) 2680 { 2681 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2682 if (media_type) 2683 *media_type = hdev->hw.mac.media_type; 2684 } 2685 2686 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 2687 { 2688 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2689 2690 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2691 } 2692 2693 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 2694 { 2695 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2696 2697 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2698 } 2699 2700 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 2701 { 2702 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2703 2704 return hdev->reset_count; 2705 } 2706 2707 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 2708 unsigned long *supported, 2709 unsigned long *advertising) 2710 { 2711 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2712 2713 *supported = hdev->hw.mac.supported; 2714 *advertising = hdev->hw.mac.advertising; 2715 } 2716 2717 #define MAX_SEPARATE_NUM 4 2718 #define SEPARATOR_VALUE 0xFFFFFFFF 2719 #define REG_NUM_PER_LINE 4 2720 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 2721 2722 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 2723 { 2724 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 2725 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2726 2727 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 2728 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 2729 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 2730 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 2731 2732 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 2733 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 2734 } 2735 2736 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 2737 void *data) 2738 { 2739 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2740 int i, j, reg_um, separator_num; 2741 u32 *reg = data; 2742 2743 *version = hdev->fw_version; 2744 2745 /* fetching per-VF registers values from VF PCIe register space */ 2746 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 2747 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2748 for (i = 0; i < reg_um; i++) 2749 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 2750 for (i = 0; i < separator_num; i++) 2751 *reg++ = SEPARATOR_VALUE; 2752 2753 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 2754 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2755 for (i = 0; i < reg_um; i++) 2756 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 2757 for (i = 0; i < separator_num; i++) 2758 *reg++ = SEPARATOR_VALUE; 2759 2760 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 2761 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2762 for (j = 0; j < hdev->num_tqps; j++) { 2763 for (i = 0; i < reg_um; i++) 2764 *reg++ = hclgevf_read_dev(&hdev->hw, 2765 ring_reg_addr_list[i] + 2766 0x200 * j); 2767 for (i = 0; i < separator_num; i++) 2768 *reg++ = SEPARATOR_VALUE; 2769 } 2770 2771 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 2772 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2773 for (j = 0; j < hdev->num_msi_used - 1; j++) { 2774 for (i = 0; i < reg_um; i++) 2775 *reg++ = hclgevf_read_dev(&hdev->hw, 2776 tqp_intr_reg_addr_list[i] + 2777 4 * j); 2778 for (i = 0; i < separator_num; i++) 2779 *reg++ = SEPARATOR_VALUE; 2780 } 2781 } 2782 2783 static const struct hnae3_ae_ops hclgevf_ops = { 2784 .init_ae_dev = hclgevf_init_ae_dev, 2785 .uninit_ae_dev = hclgevf_uninit_ae_dev, 2786 .flr_prepare = hclgevf_flr_prepare, 2787 .flr_done = hclgevf_flr_done, 2788 .init_client_instance = hclgevf_init_client_instance, 2789 .uninit_client_instance = hclgevf_uninit_client_instance, 2790 .start = hclgevf_ae_start, 2791 .stop = hclgevf_ae_stop, 2792 .client_start = hclgevf_client_start, 2793 .client_stop = hclgevf_client_stop, 2794 .map_ring_to_vector = hclgevf_map_ring_to_vector, 2795 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2796 .get_vector = hclgevf_get_vector, 2797 .put_vector = hclgevf_put_vector, 2798 .reset_queue = hclgevf_reset_tqp, 2799 .get_mac_addr = hclgevf_get_mac_addr, 2800 .set_mac_addr = hclgevf_set_mac_addr, 2801 .add_uc_addr = hclgevf_add_uc_addr, 2802 .rm_uc_addr = hclgevf_rm_uc_addr, 2803 .add_mc_addr = hclgevf_add_mc_addr, 2804 .rm_mc_addr = hclgevf_rm_mc_addr, 2805 .get_stats = hclgevf_get_stats, 2806 .update_stats = hclgevf_update_stats, 2807 .get_strings = hclgevf_get_strings, 2808 .get_sset_count = hclgevf_get_sset_count, 2809 .get_rss_key_size = hclgevf_get_rss_key_size, 2810 .get_rss_indir_size = hclgevf_get_rss_indir_size, 2811 .get_rss = hclgevf_get_rss, 2812 .set_rss = hclgevf_set_rss, 2813 .get_rss_tuple = hclgevf_get_rss_tuple, 2814 .set_rss_tuple = hclgevf_set_rss_tuple, 2815 .get_tc_size = hclgevf_get_tc_size, 2816 .get_fw_version = hclgevf_get_fw_version, 2817 .set_vlan_filter = hclgevf_set_vlan_filter, 2818 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 2819 .reset_event = hclgevf_reset_event, 2820 .set_default_reset_request = hclgevf_set_def_reset_request, 2821 .get_channels = hclgevf_get_channels, 2822 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2823 .get_regs_len = hclgevf_get_regs_len, 2824 .get_regs = hclgevf_get_regs, 2825 .get_status = hclgevf_get_status, 2826 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2827 .get_media_type = hclgevf_get_media_type, 2828 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 2829 .ae_dev_resetting = hclgevf_ae_dev_resetting, 2830 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 2831 .set_gro_en = hclgevf_gro_en, 2832 .set_mtu = hclgevf_set_mtu, 2833 .get_global_queue_id = hclgevf_get_qid_global, 2834 .set_timer_task = hclgevf_set_timer_task, 2835 .get_link_mode = hclgevf_get_link_mode, 2836 }; 2837 2838 static struct hnae3_ae_algo ae_algovf = { 2839 .ops = &hclgevf_ops, 2840 .pdev_id_table = ae_algovf_pci_tbl, 2841 }; 2842 2843 static int hclgevf_init(void) 2844 { 2845 pr_info("%s is initializing\n", HCLGEVF_NAME); 2846 2847 hnae3_register_ae_algo(&ae_algovf); 2848 2849 return 0; 2850 } 2851 2852 static void hclgevf_exit(void) 2853 { 2854 hnae3_unregister_ae_algo(&ae_algovf); 2855 } 2856 module_init(hclgevf_init); 2857 module_exit(hclgevf_exit); 2858 2859 MODULE_LICENSE("GPL"); 2860 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2861 MODULE_DESCRIPTION("HCLGEVF Driver"); 2862 MODULE_VERSION(HCLGEVF_MOD_VERSION); 2863