1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 static const u8 hclgevf_hash_key[] = { 25 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 26 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 27 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 28 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 29 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 30 }; 31 32 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 33 34 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 35 HCLGEVF_CMDQ_TX_ADDR_H_REG, 36 HCLGEVF_CMDQ_TX_DEPTH_REG, 37 HCLGEVF_CMDQ_TX_TAIL_REG, 38 HCLGEVF_CMDQ_TX_HEAD_REG, 39 HCLGEVF_CMDQ_RX_ADDR_L_REG, 40 HCLGEVF_CMDQ_RX_ADDR_H_REG, 41 HCLGEVF_CMDQ_RX_DEPTH_REG, 42 HCLGEVF_CMDQ_RX_TAIL_REG, 43 HCLGEVF_CMDQ_RX_HEAD_REG, 44 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 45 HCLGEVF_CMDQ_INTR_STS_REG, 46 HCLGEVF_CMDQ_INTR_EN_REG, 47 HCLGEVF_CMDQ_INTR_GEN_REG}; 48 49 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 50 HCLGEVF_RST_ING, 51 HCLGEVF_GRO_EN_REG}; 52 53 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 54 HCLGEVF_RING_RX_ADDR_H_REG, 55 HCLGEVF_RING_RX_BD_NUM_REG, 56 HCLGEVF_RING_RX_BD_LENGTH_REG, 57 HCLGEVF_RING_RX_MERGE_EN_REG, 58 HCLGEVF_RING_RX_TAIL_REG, 59 HCLGEVF_RING_RX_HEAD_REG, 60 HCLGEVF_RING_RX_FBD_NUM_REG, 61 HCLGEVF_RING_RX_OFFSET_REG, 62 HCLGEVF_RING_RX_FBD_OFFSET_REG, 63 HCLGEVF_RING_RX_STASH_REG, 64 HCLGEVF_RING_RX_BD_ERR_REG, 65 HCLGEVF_RING_TX_ADDR_L_REG, 66 HCLGEVF_RING_TX_ADDR_H_REG, 67 HCLGEVF_RING_TX_BD_NUM_REG, 68 HCLGEVF_RING_TX_PRIORITY_REG, 69 HCLGEVF_RING_TX_TC_REG, 70 HCLGEVF_RING_TX_MERGE_EN_REG, 71 HCLGEVF_RING_TX_TAIL_REG, 72 HCLGEVF_RING_TX_HEAD_REG, 73 HCLGEVF_RING_TX_FBD_NUM_REG, 74 HCLGEVF_RING_TX_OFFSET_REG, 75 HCLGEVF_RING_TX_EBD_NUM_REG, 76 HCLGEVF_RING_TX_EBD_OFFSET_REG, 77 HCLGEVF_RING_TX_BD_ERR_REG, 78 HCLGEVF_RING_EN_REG}; 79 80 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 81 HCLGEVF_TQP_INTR_GL0_REG, 82 HCLGEVF_TQP_INTR_GL1_REG, 83 HCLGEVF_TQP_INTR_GL2_REG, 84 HCLGEVF_TQP_INTR_RL_REG}; 85 86 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 87 struct hnae3_handle *handle) 88 { 89 if (!handle->client) 90 return container_of(handle, struct hclgevf_dev, nic); 91 else if (handle->client->type == HNAE3_CLIENT_ROCE) 92 return container_of(handle, struct hclgevf_dev, roce); 93 else 94 return container_of(handle, struct hclgevf_dev, nic); 95 } 96 97 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 98 { 99 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 100 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 101 struct hclgevf_desc desc; 102 struct hclgevf_tqp *tqp; 103 int status; 104 int i; 105 106 for (i = 0; i < kinfo->num_tqps; i++) { 107 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 108 hclgevf_cmd_setup_basic_desc(&desc, 109 HCLGEVF_OPC_QUERY_RX_STATUS, 110 true); 111 112 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 113 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 114 if (status) { 115 dev_err(&hdev->pdev->dev, 116 "Query tqp stat fail, status = %d,queue = %d\n", 117 status, i); 118 return status; 119 } 120 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 121 le32_to_cpu(desc.data[1]); 122 123 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 124 true); 125 126 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 127 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 128 if (status) { 129 dev_err(&hdev->pdev->dev, 130 "Query tqp stat fail, status = %d,queue = %d\n", 131 status, i); 132 return status; 133 } 134 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 135 le32_to_cpu(desc.data[1]); 136 } 137 138 return 0; 139 } 140 141 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 142 { 143 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 144 struct hclgevf_tqp *tqp; 145 u64 *buff = data; 146 int i; 147 148 for (i = 0; i < kinfo->num_tqps; i++) { 149 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 150 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 151 } 152 for (i = 0; i < kinfo->num_tqps; i++) { 153 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 154 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 155 } 156 157 return buff; 158 } 159 160 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 161 { 162 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 163 164 return kinfo->num_tqps * 2; 165 } 166 167 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 168 { 169 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 170 u8 *buff = data; 171 int i = 0; 172 173 for (i = 0; i < kinfo->num_tqps; i++) { 174 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 175 struct hclgevf_tqp, q); 176 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 177 tqp->index); 178 buff += ETH_GSTRING_LEN; 179 } 180 181 for (i = 0; i < kinfo->num_tqps; i++) { 182 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 183 struct hclgevf_tqp, q); 184 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 185 tqp->index); 186 buff += ETH_GSTRING_LEN; 187 } 188 189 return buff; 190 } 191 192 static void hclgevf_update_stats(struct hnae3_handle *handle, 193 struct net_device_stats *net_stats) 194 { 195 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 196 int status; 197 198 status = hclgevf_tqps_update_stats(handle); 199 if (status) 200 dev_err(&hdev->pdev->dev, 201 "VF update of TQPS stats fail, status = %d.\n", 202 status); 203 } 204 205 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 206 { 207 if (strset == ETH_SS_TEST) 208 return -EOPNOTSUPP; 209 else if (strset == ETH_SS_STATS) 210 return hclgevf_tqps_get_sset_count(handle, strset); 211 212 return 0; 213 } 214 215 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 216 u8 *data) 217 { 218 u8 *p = (char *)data; 219 220 if (strset == ETH_SS_STATS) 221 p = hclgevf_tqps_get_strings(handle, p); 222 } 223 224 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 225 { 226 hclgevf_tqps_get_stats(handle, data); 227 } 228 229 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 230 { 231 u8 resp_msg; 232 int status; 233 234 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 235 true, &resp_msg, sizeof(u8)); 236 if (status) { 237 dev_err(&hdev->pdev->dev, 238 "VF request to get TC info from PF failed %d", 239 status); 240 return status; 241 } 242 243 hdev->hw_tc_map = resp_msg; 244 245 return 0; 246 } 247 248 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 249 { 250 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 251 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 252 int status; 253 254 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 255 true, resp_msg, 256 HCLGEVF_TQPS_RSS_INFO_LEN); 257 if (status) { 258 dev_err(&hdev->pdev->dev, 259 "VF request to get tqp info from PF failed %d", 260 status); 261 return status; 262 } 263 264 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 265 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 266 memcpy(&hdev->rx_buf_len, &resp_msg[4], sizeof(u16)); 267 268 return 0; 269 } 270 271 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 272 { 273 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 274 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 275 int ret; 276 277 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QDEPTH, 0, NULL, 0, 278 true, resp_msg, 279 HCLGEVF_TQPS_DEPTH_INFO_LEN); 280 if (ret) { 281 dev_err(&hdev->pdev->dev, 282 "VF request to get tqp depth info from PF failed %d", 283 ret); 284 return ret; 285 } 286 287 memcpy(&hdev->num_tx_desc, &resp_msg[0], sizeof(u16)); 288 memcpy(&hdev->num_rx_desc, &resp_msg[2], sizeof(u16)); 289 290 return 0; 291 } 292 293 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 294 { 295 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 296 u8 msg_data[2], resp_data[2]; 297 u16 qid_in_pf = 0; 298 int ret; 299 300 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 301 302 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, 303 2, true, resp_data, 2); 304 if (!ret) 305 qid_in_pf = *(u16 *)resp_data; 306 307 return qid_in_pf; 308 } 309 310 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 311 { 312 u8 resp_msg; 313 int ret; 314 315 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0, 316 true, &resp_msg, sizeof(resp_msg)); 317 if (ret) { 318 dev_err(&hdev->pdev->dev, 319 "VF request to get the pf port media type failed %d", 320 ret); 321 return ret; 322 } 323 324 hdev->hw.mac.media_type = resp_msg; 325 326 return 0; 327 } 328 329 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 330 { 331 struct hclgevf_tqp *tqp; 332 int i; 333 334 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 335 sizeof(struct hclgevf_tqp), GFP_KERNEL); 336 if (!hdev->htqp) 337 return -ENOMEM; 338 339 tqp = hdev->htqp; 340 341 for (i = 0; i < hdev->num_tqps; i++) { 342 tqp->dev = &hdev->pdev->dev; 343 tqp->index = i; 344 345 tqp->q.ae_algo = &ae_algovf; 346 tqp->q.buf_size = hdev->rx_buf_len; 347 tqp->q.tx_desc_num = hdev->num_tx_desc; 348 tqp->q.rx_desc_num = hdev->num_rx_desc; 349 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 350 i * HCLGEVF_TQP_REG_SIZE; 351 352 tqp++; 353 } 354 355 return 0; 356 } 357 358 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 359 { 360 struct hnae3_handle *nic = &hdev->nic; 361 struct hnae3_knic_private_info *kinfo; 362 u16 new_tqps = hdev->num_tqps; 363 int i; 364 365 kinfo = &nic->kinfo; 366 kinfo->num_tc = 0; 367 kinfo->num_tx_desc = hdev->num_tx_desc; 368 kinfo->num_rx_desc = hdev->num_rx_desc; 369 kinfo->rx_buf_len = hdev->rx_buf_len; 370 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 371 if (hdev->hw_tc_map & BIT(i)) 372 kinfo->num_tc++; 373 374 kinfo->rss_size 375 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 376 new_tqps = kinfo->rss_size * kinfo->num_tc; 377 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 378 379 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 380 sizeof(struct hnae3_queue *), GFP_KERNEL); 381 if (!kinfo->tqp) 382 return -ENOMEM; 383 384 for (i = 0; i < kinfo->num_tqps; i++) { 385 hdev->htqp[i].q.handle = &hdev->nic; 386 hdev->htqp[i].q.tqp_index = i; 387 kinfo->tqp[i] = &hdev->htqp[i].q; 388 } 389 390 return 0; 391 } 392 393 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 394 { 395 int status; 396 u8 resp_msg; 397 398 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 399 0, false, &resp_msg, sizeof(u8)); 400 if (status) 401 dev_err(&hdev->pdev->dev, 402 "VF failed to fetch link status(%d) from PF", status); 403 } 404 405 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 406 { 407 struct hnae3_handle *rhandle = &hdev->roce; 408 struct hnae3_handle *handle = &hdev->nic; 409 struct hnae3_client *rclient; 410 struct hnae3_client *client; 411 412 client = handle->client; 413 rclient = hdev->roce_client; 414 415 link_state = 416 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 417 418 if (link_state != hdev->hw.mac.link) { 419 client->ops->link_status_change(handle, !!link_state); 420 if (rclient && rclient->ops->link_status_change) 421 rclient->ops->link_status_change(rhandle, !!link_state); 422 hdev->hw.mac.link = link_state; 423 } 424 } 425 426 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 427 { 428 #define HCLGEVF_ADVERTISING 0 429 #define HCLGEVF_SUPPORTED 1 430 u8 send_msg; 431 u8 resp_msg; 432 433 send_msg = HCLGEVF_ADVERTISING; 434 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, 435 sizeof(u8), false, &resp_msg, sizeof(u8)); 436 send_msg = HCLGEVF_SUPPORTED; 437 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, 438 sizeof(u8), false, &resp_msg, sizeof(u8)); 439 } 440 441 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 442 { 443 struct hnae3_handle *nic = &hdev->nic; 444 int ret; 445 446 nic->ae_algo = &ae_algovf; 447 nic->pdev = hdev->pdev; 448 nic->numa_node_mask = hdev->numa_node_mask; 449 nic->flags |= HNAE3_SUPPORT_VF; 450 451 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 452 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 453 hdev->ae_dev->dev_type); 454 return -EINVAL; 455 } 456 457 ret = hclgevf_knic_setup(hdev); 458 if (ret) 459 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 460 ret); 461 return ret; 462 } 463 464 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 465 { 466 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 467 dev_warn(&hdev->pdev->dev, 468 "vector(vector_id %d) has been freed.\n", vector_id); 469 return; 470 } 471 472 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 473 hdev->num_msi_left += 1; 474 hdev->num_msi_used -= 1; 475 } 476 477 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 478 struct hnae3_vector_info *vector_info) 479 { 480 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 481 struct hnae3_vector_info *vector = vector_info; 482 int alloc = 0; 483 int i, j; 484 485 vector_num = min(hdev->num_msi_left, vector_num); 486 487 for (j = 0; j < vector_num; j++) { 488 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 489 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 490 vector->vector = pci_irq_vector(hdev->pdev, i); 491 vector->io_addr = hdev->hw.io_base + 492 HCLGEVF_VECTOR_REG_BASE + 493 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 494 hdev->vector_status[i] = 0; 495 hdev->vector_irq[i] = vector->vector; 496 497 vector++; 498 alloc++; 499 500 break; 501 } 502 } 503 } 504 hdev->num_msi_left -= alloc; 505 hdev->num_msi_used += alloc; 506 507 return alloc; 508 } 509 510 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 511 { 512 int i; 513 514 for (i = 0; i < hdev->num_msi; i++) 515 if (vector == hdev->vector_irq[i]) 516 return i; 517 518 return -EINVAL; 519 } 520 521 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 522 const u8 hfunc, const u8 *key) 523 { 524 struct hclgevf_rss_config_cmd *req; 525 struct hclgevf_desc desc; 526 int key_offset; 527 int key_size; 528 int ret; 529 530 req = (struct hclgevf_rss_config_cmd *)desc.data; 531 532 for (key_offset = 0; key_offset < 3; key_offset++) { 533 hclgevf_cmd_setup_basic_desc(&desc, 534 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 535 false); 536 537 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 538 req->hash_config |= 539 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 540 541 if (key_offset == 2) 542 key_size = 543 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 544 else 545 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 546 547 memcpy(req->hash_key, 548 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 549 550 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 551 if (ret) { 552 dev_err(&hdev->pdev->dev, 553 "Configure RSS config fail, status = %d\n", 554 ret); 555 return ret; 556 } 557 } 558 559 return 0; 560 } 561 562 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 563 { 564 return HCLGEVF_RSS_KEY_SIZE; 565 } 566 567 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 568 { 569 return HCLGEVF_RSS_IND_TBL_SIZE; 570 } 571 572 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 573 { 574 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 575 struct hclgevf_rss_indirection_table_cmd *req; 576 struct hclgevf_desc desc; 577 int status; 578 int i, j; 579 580 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 581 582 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 583 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 584 false); 585 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 586 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 587 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 588 req->rss_result[j] = 589 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 590 591 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 592 if (status) { 593 dev_err(&hdev->pdev->dev, 594 "VF failed(=%d) to set RSS indirection table\n", 595 status); 596 return status; 597 } 598 } 599 600 return 0; 601 } 602 603 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 604 { 605 struct hclgevf_rss_tc_mode_cmd *req; 606 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 607 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 608 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 609 struct hclgevf_desc desc; 610 u16 roundup_size; 611 int status; 612 int i; 613 614 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 615 616 roundup_size = roundup_pow_of_two(rss_size); 617 roundup_size = ilog2(roundup_size); 618 619 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 620 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 621 tc_size[i] = roundup_size; 622 tc_offset[i] = rss_size * i; 623 } 624 625 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 626 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 627 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 628 (tc_valid[i] & 0x1)); 629 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 630 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 631 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 632 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 633 } 634 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 635 if (status) 636 dev_err(&hdev->pdev->dev, 637 "VF failed(=%d) to set rss tc mode\n", status); 638 639 return status; 640 } 641 642 /* for revision 0x20, vf shared the same rss config with pf */ 643 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 644 { 645 #define HCLGEVF_RSS_MBX_RESP_LEN 8 646 647 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 648 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 649 u16 msg_num, hash_key_index; 650 u8 index; 651 int ret; 652 653 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 654 HCLGEVF_RSS_MBX_RESP_LEN; 655 for (index = 0; index < msg_num; index++) { 656 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_RSS_KEY, 0, 657 &index, sizeof(index), 658 true, resp_msg, 659 HCLGEVF_RSS_MBX_RESP_LEN); 660 if (ret) { 661 dev_err(&hdev->pdev->dev, 662 "VF get rss hash key from PF failed, ret=%d", 663 ret); 664 return ret; 665 } 666 667 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 668 if (index == msg_num - 1) 669 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 670 &resp_msg[0], 671 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 672 else 673 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 674 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 675 } 676 677 return 0; 678 } 679 680 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 681 u8 *hfunc) 682 { 683 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 684 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 685 int i, ret; 686 687 if (handle->pdev->revision >= 0x21) { 688 /* Get hash algorithm */ 689 if (hfunc) { 690 switch (rss_cfg->hash_algo) { 691 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 692 *hfunc = ETH_RSS_HASH_TOP; 693 break; 694 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 695 *hfunc = ETH_RSS_HASH_XOR; 696 break; 697 default: 698 *hfunc = ETH_RSS_HASH_UNKNOWN; 699 break; 700 } 701 } 702 703 /* Get the RSS Key required by the user */ 704 if (key) 705 memcpy(key, rss_cfg->rss_hash_key, 706 HCLGEVF_RSS_KEY_SIZE); 707 } else { 708 if (hfunc) 709 *hfunc = ETH_RSS_HASH_TOP; 710 if (key) { 711 ret = hclgevf_get_rss_hash_key(hdev); 712 if (ret) 713 return ret; 714 memcpy(key, rss_cfg->rss_hash_key, 715 HCLGEVF_RSS_KEY_SIZE); 716 } 717 } 718 719 if (indir) 720 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 721 indir[i] = rss_cfg->rss_indirection_tbl[i]; 722 723 return 0; 724 } 725 726 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 727 const u8 *key, const u8 hfunc) 728 { 729 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 730 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 731 int ret, i; 732 733 if (handle->pdev->revision >= 0x21) { 734 /* Set the RSS Hash Key if specififed by the user */ 735 if (key) { 736 switch (hfunc) { 737 case ETH_RSS_HASH_TOP: 738 rss_cfg->hash_algo = 739 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 740 break; 741 case ETH_RSS_HASH_XOR: 742 rss_cfg->hash_algo = 743 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 744 break; 745 case ETH_RSS_HASH_NO_CHANGE: 746 break; 747 default: 748 return -EINVAL; 749 } 750 751 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 752 key); 753 if (ret) 754 return ret; 755 756 /* Update the shadow RSS key with user specified qids */ 757 memcpy(rss_cfg->rss_hash_key, key, 758 HCLGEVF_RSS_KEY_SIZE); 759 } 760 } 761 762 /* update the shadow RSS table with user specified qids */ 763 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 764 rss_cfg->rss_indirection_tbl[i] = indir[i]; 765 766 /* update the hardware */ 767 return hclgevf_set_rss_indir_table(hdev); 768 } 769 770 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 771 { 772 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 773 774 if (nfc->data & RXH_L4_B_2_3) 775 hash_sets |= HCLGEVF_D_PORT_BIT; 776 else 777 hash_sets &= ~HCLGEVF_D_PORT_BIT; 778 779 if (nfc->data & RXH_IP_SRC) 780 hash_sets |= HCLGEVF_S_IP_BIT; 781 else 782 hash_sets &= ~HCLGEVF_S_IP_BIT; 783 784 if (nfc->data & RXH_IP_DST) 785 hash_sets |= HCLGEVF_D_IP_BIT; 786 else 787 hash_sets &= ~HCLGEVF_D_IP_BIT; 788 789 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 790 hash_sets |= HCLGEVF_V_TAG_BIT; 791 792 return hash_sets; 793 } 794 795 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 796 struct ethtool_rxnfc *nfc) 797 { 798 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 799 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 800 struct hclgevf_rss_input_tuple_cmd *req; 801 struct hclgevf_desc desc; 802 u8 tuple_sets; 803 int ret; 804 805 if (handle->pdev->revision == 0x20) 806 return -EOPNOTSUPP; 807 808 if (nfc->data & 809 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 810 return -EINVAL; 811 812 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 813 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 814 815 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 816 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 817 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 818 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 819 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 820 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 821 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 822 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 823 824 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 825 switch (nfc->flow_type) { 826 case TCP_V4_FLOW: 827 req->ipv4_tcp_en = tuple_sets; 828 break; 829 case TCP_V6_FLOW: 830 req->ipv6_tcp_en = tuple_sets; 831 break; 832 case UDP_V4_FLOW: 833 req->ipv4_udp_en = tuple_sets; 834 break; 835 case UDP_V6_FLOW: 836 req->ipv6_udp_en = tuple_sets; 837 break; 838 case SCTP_V4_FLOW: 839 req->ipv4_sctp_en = tuple_sets; 840 break; 841 case SCTP_V6_FLOW: 842 if ((nfc->data & RXH_L4_B_0_1) || 843 (nfc->data & RXH_L4_B_2_3)) 844 return -EINVAL; 845 846 req->ipv6_sctp_en = tuple_sets; 847 break; 848 case IPV4_FLOW: 849 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 850 break; 851 case IPV6_FLOW: 852 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 853 break; 854 default: 855 return -EINVAL; 856 } 857 858 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 859 if (ret) { 860 dev_err(&hdev->pdev->dev, 861 "Set rss tuple fail, status = %d\n", ret); 862 return ret; 863 } 864 865 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 866 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 867 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 868 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 869 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 870 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 871 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 872 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 873 return 0; 874 } 875 876 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 877 struct ethtool_rxnfc *nfc) 878 { 879 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 880 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 881 u8 tuple_sets; 882 883 if (handle->pdev->revision == 0x20) 884 return -EOPNOTSUPP; 885 886 nfc->data = 0; 887 888 switch (nfc->flow_type) { 889 case TCP_V4_FLOW: 890 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 891 break; 892 case UDP_V4_FLOW: 893 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 894 break; 895 case TCP_V6_FLOW: 896 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 897 break; 898 case UDP_V6_FLOW: 899 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 900 break; 901 case SCTP_V4_FLOW: 902 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 903 break; 904 case SCTP_V6_FLOW: 905 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 906 break; 907 case IPV4_FLOW: 908 case IPV6_FLOW: 909 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 910 break; 911 default: 912 return -EINVAL; 913 } 914 915 if (!tuple_sets) 916 return 0; 917 918 if (tuple_sets & HCLGEVF_D_PORT_BIT) 919 nfc->data |= RXH_L4_B_2_3; 920 if (tuple_sets & HCLGEVF_S_PORT_BIT) 921 nfc->data |= RXH_L4_B_0_1; 922 if (tuple_sets & HCLGEVF_D_IP_BIT) 923 nfc->data |= RXH_IP_DST; 924 if (tuple_sets & HCLGEVF_S_IP_BIT) 925 nfc->data |= RXH_IP_SRC; 926 927 return 0; 928 } 929 930 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 931 struct hclgevf_rss_cfg *rss_cfg) 932 { 933 struct hclgevf_rss_input_tuple_cmd *req; 934 struct hclgevf_desc desc; 935 int ret; 936 937 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 938 939 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 940 941 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 942 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 943 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 944 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 945 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 946 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 947 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 948 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 949 950 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 951 if (ret) 952 dev_err(&hdev->pdev->dev, 953 "Configure rss input fail, status = %d\n", ret); 954 return ret; 955 } 956 957 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 958 { 959 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 960 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 961 962 return rss_cfg->rss_size; 963 } 964 965 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 966 int vector_id, 967 struct hnae3_ring_chain_node *ring_chain) 968 { 969 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 970 struct hnae3_ring_chain_node *node; 971 struct hclge_mbx_vf_to_pf_cmd *req; 972 struct hclgevf_desc desc; 973 int i = 0; 974 int status; 975 u8 type; 976 977 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 978 979 for (node = ring_chain; node; node = node->next) { 980 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 981 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 982 983 if (i == 0) { 984 hclgevf_cmd_setup_basic_desc(&desc, 985 HCLGEVF_OPC_MBX_VF_TO_PF, 986 false); 987 type = en ? 988 HCLGE_MBX_MAP_RING_TO_VECTOR : 989 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 990 req->msg[0] = type; 991 req->msg[1] = vector_id; 992 } 993 994 req->msg[idx_offset] = 995 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 996 req->msg[idx_offset + 1] = node->tqp_index; 997 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 998 HNAE3_RING_GL_IDX_M, 999 HNAE3_RING_GL_IDX_S); 1000 1001 i++; 1002 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 1003 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 1004 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 1005 !node->next) { 1006 req->msg[2] = i; 1007 1008 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1009 if (status) { 1010 dev_err(&hdev->pdev->dev, 1011 "Map TQP fail, status is %d.\n", 1012 status); 1013 return status; 1014 } 1015 i = 0; 1016 hclgevf_cmd_setup_basic_desc(&desc, 1017 HCLGEVF_OPC_MBX_VF_TO_PF, 1018 false); 1019 req->msg[0] = type; 1020 req->msg[1] = vector_id; 1021 } 1022 } 1023 1024 return 0; 1025 } 1026 1027 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1028 struct hnae3_ring_chain_node *ring_chain) 1029 { 1030 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1031 int vector_id; 1032 1033 vector_id = hclgevf_get_vector_index(hdev, vector); 1034 if (vector_id < 0) { 1035 dev_err(&handle->pdev->dev, 1036 "Get vector index fail. ret =%d\n", vector_id); 1037 return vector_id; 1038 } 1039 1040 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1041 } 1042 1043 static int hclgevf_unmap_ring_from_vector( 1044 struct hnae3_handle *handle, 1045 int vector, 1046 struct hnae3_ring_chain_node *ring_chain) 1047 { 1048 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1049 int ret, vector_id; 1050 1051 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1052 return 0; 1053 1054 vector_id = hclgevf_get_vector_index(hdev, vector); 1055 if (vector_id < 0) { 1056 dev_err(&handle->pdev->dev, 1057 "Get vector index fail. ret =%d\n", vector_id); 1058 return vector_id; 1059 } 1060 1061 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1062 if (ret) 1063 dev_err(&handle->pdev->dev, 1064 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1065 vector_id, 1066 ret); 1067 1068 return ret; 1069 } 1070 1071 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1072 { 1073 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1074 int vector_id; 1075 1076 vector_id = hclgevf_get_vector_index(hdev, vector); 1077 if (vector_id < 0) { 1078 dev_err(&handle->pdev->dev, 1079 "hclgevf_put_vector get vector index fail. ret =%d\n", 1080 vector_id); 1081 return vector_id; 1082 } 1083 1084 hclgevf_free_vector(hdev, vector_id); 1085 1086 return 0; 1087 } 1088 1089 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1090 bool en_bc_pmc) 1091 { 1092 struct hclge_mbx_vf_to_pf_cmd *req; 1093 struct hclgevf_desc desc; 1094 int ret; 1095 1096 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 1097 1098 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 1099 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 1100 req->msg[1] = en_bc_pmc ? 1 : 0; 1101 1102 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1103 if (ret) 1104 dev_err(&hdev->pdev->dev, 1105 "Set promisc mode fail, status is %d.\n", ret); 1106 1107 return ret; 1108 } 1109 1110 static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc) 1111 { 1112 return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc); 1113 } 1114 1115 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 1116 int stream_id, bool enable) 1117 { 1118 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1119 struct hclgevf_desc desc; 1120 int status; 1121 1122 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1123 1124 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1125 false); 1126 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1127 req->stream_id = cpu_to_le16(stream_id); 1128 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 1129 1130 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1131 if (status) 1132 dev_err(&hdev->pdev->dev, 1133 "TQP enable fail, status =%d.\n", status); 1134 1135 return status; 1136 } 1137 1138 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1139 { 1140 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1141 struct hclgevf_tqp *tqp; 1142 int i; 1143 1144 for (i = 0; i < kinfo->num_tqps; i++) { 1145 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1146 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1147 } 1148 } 1149 1150 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1151 { 1152 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1153 1154 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1155 } 1156 1157 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1158 bool is_first) 1159 { 1160 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1161 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1162 u8 *new_mac_addr = (u8 *)p; 1163 u8 msg_data[ETH_ALEN * 2]; 1164 u16 subcode; 1165 int status; 1166 1167 ether_addr_copy(msg_data, new_mac_addr); 1168 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 1169 1170 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 1171 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1172 1173 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1174 subcode, msg_data, ETH_ALEN * 2, 1175 true, NULL, 0); 1176 if (!status) 1177 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1178 1179 return status; 1180 } 1181 1182 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1183 const unsigned char *addr) 1184 { 1185 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1186 1187 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1188 HCLGE_MBX_MAC_VLAN_UC_ADD, 1189 addr, ETH_ALEN, false, NULL, 0); 1190 } 1191 1192 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1193 const unsigned char *addr) 1194 { 1195 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1196 1197 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1198 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1199 addr, ETH_ALEN, false, NULL, 0); 1200 } 1201 1202 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1203 const unsigned char *addr) 1204 { 1205 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1206 1207 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1208 HCLGE_MBX_MAC_VLAN_MC_ADD, 1209 addr, ETH_ALEN, false, NULL, 0); 1210 } 1211 1212 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1213 const unsigned char *addr) 1214 { 1215 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1216 1217 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1218 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1219 addr, ETH_ALEN, false, NULL, 0); 1220 } 1221 1222 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1223 __be16 proto, u16 vlan_id, 1224 bool is_kill) 1225 { 1226 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1227 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1228 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1229 1230 if (vlan_id > 4095) 1231 return -EINVAL; 1232 1233 if (proto != htons(ETH_P_8021Q)) 1234 return -EPROTONOSUPPORT; 1235 1236 msg_data[0] = is_kill; 1237 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1238 memcpy(&msg_data[3], &proto, sizeof(proto)); 1239 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1240 HCLGE_MBX_VLAN_FILTER, msg_data, 1241 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 1242 } 1243 1244 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1245 { 1246 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1247 u8 msg_data; 1248 1249 msg_data = enable ? 1 : 0; 1250 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1251 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1252 1, false, NULL, 0); 1253 } 1254 1255 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1256 { 1257 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1258 u8 msg_data[2]; 1259 int ret; 1260 1261 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 1262 1263 /* disable vf queue before send queue reset msg to PF */ 1264 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1265 if (ret) 1266 return ret; 1267 1268 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1269 2, true, NULL, 0); 1270 } 1271 1272 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1273 { 1274 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1275 1276 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu, 1277 sizeof(new_mtu), true, NULL, 0); 1278 } 1279 1280 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1281 enum hnae3_reset_notify_type type) 1282 { 1283 struct hnae3_client *client = hdev->nic_client; 1284 struct hnae3_handle *handle = &hdev->nic; 1285 int ret; 1286 1287 if (!client->ops->reset_notify) 1288 return -EOPNOTSUPP; 1289 1290 ret = client->ops->reset_notify(handle, type); 1291 if (ret) 1292 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1293 type, ret); 1294 1295 return ret; 1296 } 1297 1298 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 1299 { 1300 struct hclgevf_dev *hdev = ae_dev->priv; 1301 1302 set_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1303 } 1304 1305 static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev, 1306 unsigned long delay_us, 1307 unsigned long wait_cnt) 1308 { 1309 unsigned long cnt = 0; 1310 1311 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && 1312 cnt++ < wait_cnt) 1313 usleep_range(delay_us, delay_us * 2); 1314 1315 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { 1316 dev_err(&hdev->pdev->dev, 1317 "flr wait timeout\n"); 1318 return -ETIMEDOUT; 1319 } 1320 1321 return 0; 1322 } 1323 1324 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1325 { 1326 #define HCLGEVF_RESET_WAIT_US 20000 1327 #define HCLGEVF_RESET_WAIT_CNT 2000 1328 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1329 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1330 1331 u32 val; 1332 int ret; 1333 1334 /* wait to check the hardware reset completion status */ 1335 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1336 dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val); 1337 1338 if (hdev->reset_type == HNAE3_FLR_RESET) 1339 return hclgevf_flr_poll_timeout(hdev, 1340 HCLGEVF_RESET_WAIT_US, 1341 HCLGEVF_RESET_WAIT_CNT); 1342 1343 ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val, 1344 !(val & HCLGEVF_RST_ING_BITS), 1345 HCLGEVF_RESET_WAIT_US, 1346 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1347 1348 /* hardware completion status should be available by this time */ 1349 if (ret) { 1350 dev_err(&hdev->pdev->dev, 1351 "could'nt get reset done status from h/w, timeout!\n"); 1352 return ret; 1353 } 1354 1355 /* we will wait a bit more to let reset of the stack to complete. This 1356 * might happen in case reset assertion was made by PF. Yes, this also 1357 * means we might end up waiting bit more even for VF reset. 1358 */ 1359 msleep(5000); 1360 1361 return 0; 1362 } 1363 1364 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1365 { 1366 int ret; 1367 1368 /* uninitialize the nic client */ 1369 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1370 if (ret) 1371 return ret; 1372 1373 /* re-initialize the hclge device */ 1374 ret = hclgevf_reset_hdev(hdev); 1375 if (ret) { 1376 dev_err(&hdev->pdev->dev, 1377 "hclge device re-init failed, VF is disabled!\n"); 1378 return ret; 1379 } 1380 1381 /* bring up the nic client again */ 1382 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1383 if (ret) 1384 return ret; 1385 1386 return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); 1387 } 1388 1389 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1390 { 1391 int ret = 0; 1392 1393 switch (hdev->reset_type) { 1394 case HNAE3_VF_FUNC_RESET: 1395 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1396 0, true, NULL, sizeof(u8)); 1397 break; 1398 case HNAE3_FLR_RESET: 1399 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1400 break; 1401 default: 1402 break; 1403 } 1404 1405 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1406 1407 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1408 hdev->reset_type, ret); 1409 1410 return ret; 1411 } 1412 1413 static int hclgevf_reset(struct hclgevf_dev *hdev) 1414 { 1415 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1416 int ret; 1417 1418 /* Initialize ae_dev reset status as well, in case enet layer wants to 1419 * know if device is undergoing reset 1420 */ 1421 ae_dev->reset_type = hdev->reset_type; 1422 hdev->reset_count++; 1423 rtnl_lock(); 1424 1425 /* bring down the nic to stop any ongoing TX/RX */ 1426 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1427 if (ret) 1428 goto err_reset_lock; 1429 1430 rtnl_unlock(); 1431 1432 ret = hclgevf_reset_prepare_wait(hdev); 1433 if (ret) 1434 goto err_reset; 1435 1436 /* check if VF could successfully fetch the hardware reset completion 1437 * status from the hardware 1438 */ 1439 ret = hclgevf_reset_wait(hdev); 1440 if (ret) { 1441 /* can't do much in this situation, will disable VF */ 1442 dev_err(&hdev->pdev->dev, 1443 "VF failed(=%d) to fetch H/W reset completion status\n", 1444 ret); 1445 goto err_reset; 1446 } 1447 1448 rtnl_lock(); 1449 1450 /* now, re-initialize the nic client and ae device*/ 1451 ret = hclgevf_reset_stack(hdev); 1452 if (ret) { 1453 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1454 goto err_reset_lock; 1455 } 1456 1457 /* bring up the nic to enable TX/RX again */ 1458 ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1459 if (ret) 1460 goto err_reset_lock; 1461 1462 rtnl_unlock(); 1463 1464 hdev->last_reset_time = jiffies; 1465 ae_dev->reset_type = HNAE3_NONE_RESET; 1466 1467 return ret; 1468 err_reset_lock: 1469 rtnl_unlock(); 1470 err_reset: 1471 /* When VF reset failed, only the higher level reset asserted by PF 1472 * can restore it, so re-initialize the command queue to receive 1473 * this higher reset event. 1474 */ 1475 hclgevf_cmd_init(hdev); 1476 dev_err(&hdev->pdev->dev, "failed to reset VF\n"); 1477 1478 return ret; 1479 } 1480 1481 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1482 unsigned long *addr) 1483 { 1484 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1485 1486 /* return the highest priority reset level amongst all */ 1487 if (test_bit(HNAE3_VF_RESET, addr)) { 1488 rst_level = HNAE3_VF_RESET; 1489 clear_bit(HNAE3_VF_RESET, addr); 1490 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1491 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1492 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1493 rst_level = HNAE3_VF_FULL_RESET; 1494 clear_bit(HNAE3_VF_FULL_RESET, addr); 1495 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1496 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1497 rst_level = HNAE3_VF_PF_FUNC_RESET; 1498 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1499 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1500 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1501 rst_level = HNAE3_VF_FUNC_RESET; 1502 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1503 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1504 rst_level = HNAE3_FLR_RESET; 1505 clear_bit(HNAE3_FLR_RESET, addr); 1506 } 1507 1508 return rst_level; 1509 } 1510 1511 static void hclgevf_reset_event(struct pci_dev *pdev, 1512 struct hnae3_handle *handle) 1513 { 1514 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1515 struct hclgevf_dev *hdev = ae_dev->priv; 1516 1517 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1518 1519 if (hdev->default_reset_request) 1520 hdev->reset_level = 1521 hclgevf_get_reset_level(hdev, 1522 &hdev->default_reset_request); 1523 else 1524 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1525 1526 /* reset of this VF requested */ 1527 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1528 hclgevf_reset_task_schedule(hdev); 1529 1530 hdev->last_reset_time = jiffies; 1531 } 1532 1533 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1534 enum hnae3_reset_type rst_type) 1535 { 1536 struct hclgevf_dev *hdev = ae_dev->priv; 1537 1538 set_bit(rst_type, &hdev->default_reset_request); 1539 } 1540 1541 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1542 { 1543 #define HCLGEVF_FLR_WAIT_MS 100 1544 #define HCLGEVF_FLR_WAIT_CNT 50 1545 struct hclgevf_dev *hdev = ae_dev->priv; 1546 int cnt = 0; 1547 1548 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1549 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1550 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); 1551 hclgevf_reset_event(hdev->pdev, NULL); 1552 1553 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && 1554 cnt++ < HCLGEVF_FLR_WAIT_CNT) 1555 msleep(HCLGEVF_FLR_WAIT_MS); 1556 1557 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) 1558 dev_err(&hdev->pdev->dev, 1559 "flr wait down timeout: %d\n", cnt); 1560 } 1561 1562 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1563 { 1564 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1565 1566 return hdev->fw_version; 1567 } 1568 1569 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1570 { 1571 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1572 1573 vector->vector_irq = pci_irq_vector(hdev->pdev, 1574 HCLGEVF_MISC_VECTOR_NUM); 1575 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1576 /* vector status always valid for Vector 0 */ 1577 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1578 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1579 1580 hdev->num_msi_left -= 1; 1581 hdev->num_msi_used += 1; 1582 } 1583 1584 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1585 { 1586 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1587 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1588 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1589 schedule_work(&hdev->rst_service_task); 1590 } 1591 } 1592 1593 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1594 { 1595 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1596 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1597 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1598 schedule_work(&hdev->mbx_service_task); 1599 } 1600 } 1601 1602 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1603 { 1604 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1605 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1606 schedule_work(&hdev->service_task); 1607 } 1608 1609 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1610 { 1611 /* if we have any pending mailbox event then schedule the mbx task */ 1612 if (hdev->mbx_event_pending) 1613 hclgevf_mbx_task_schedule(hdev); 1614 1615 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1616 hclgevf_reset_task_schedule(hdev); 1617 } 1618 1619 static void hclgevf_service_timer(struct timer_list *t) 1620 { 1621 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1622 1623 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1624 1625 hclgevf_task_schedule(hdev); 1626 } 1627 1628 static void hclgevf_reset_service_task(struct work_struct *work) 1629 { 1630 struct hclgevf_dev *hdev = 1631 container_of(work, struct hclgevf_dev, rst_service_task); 1632 int ret; 1633 1634 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1635 return; 1636 1637 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1638 1639 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1640 &hdev->reset_state)) { 1641 /* PF has initmated that it is about to reset the hardware. 1642 * We now have to poll & check if harware has actually completed 1643 * the reset sequence. On hardware reset completion, VF needs to 1644 * reset the client and ae device. 1645 */ 1646 hdev->reset_attempts = 0; 1647 1648 hdev->last_reset_time = jiffies; 1649 while ((hdev->reset_type = 1650 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 1651 != HNAE3_NONE_RESET) { 1652 ret = hclgevf_reset(hdev); 1653 if (ret) 1654 dev_err(&hdev->pdev->dev, 1655 "VF stack reset failed %d.\n", ret); 1656 } 1657 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1658 &hdev->reset_state)) { 1659 /* we could be here when either of below happens: 1660 * 1. reset was initiated due to watchdog timeout due to 1661 * a. IMP was earlier reset and our TX got choked down and 1662 * which resulted in watchdog reacting and inducing VF 1663 * reset. This also means our cmdq would be unreliable. 1664 * b. problem in TX due to other lower layer(example link 1665 * layer not functioning properly etc.) 1666 * 2. VF reset might have been initiated due to some config 1667 * change. 1668 * 1669 * NOTE: Theres no clear way to detect above cases than to react 1670 * to the response of PF for this reset request. PF will ack the 1671 * 1b and 2. cases but we will not get any intimation about 1a 1672 * from PF as cmdq would be in unreliable state i.e. mailbox 1673 * communication between PF and VF would be broken. 1674 */ 1675 1676 /* if we are never geting into pending state it means either: 1677 * 1. PF is not receiving our request which could be due to IMP 1678 * reset 1679 * 2. PF is screwed 1680 * We cannot do much for 2. but to check first we can try reset 1681 * our PCIe + stack and see if it alleviates the problem. 1682 */ 1683 if (hdev->reset_attempts > 3) { 1684 /* prepare for full reset of stack + pcie interface */ 1685 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1686 1687 /* "defer" schedule the reset task again */ 1688 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1689 } else { 1690 hdev->reset_attempts++; 1691 1692 set_bit(hdev->reset_level, &hdev->reset_pending); 1693 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1694 } 1695 hclgevf_reset_task_schedule(hdev); 1696 } 1697 1698 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1699 } 1700 1701 static void hclgevf_mailbox_service_task(struct work_struct *work) 1702 { 1703 struct hclgevf_dev *hdev; 1704 1705 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1706 1707 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1708 return; 1709 1710 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1711 1712 hclgevf_mbx_async_handler(hdev); 1713 1714 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1715 } 1716 1717 static void hclgevf_keep_alive_timer(struct timer_list *t) 1718 { 1719 struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); 1720 1721 schedule_work(&hdev->keep_alive_task); 1722 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 1723 } 1724 1725 static void hclgevf_keep_alive_task(struct work_struct *work) 1726 { 1727 struct hclgevf_dev *hdev; 1728 u8 respmsg; 1729 int ret; 1730 1731 hdev = container_of(work, struct hclgevf_dev, keep_alive_task); 1732 1733 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1734 return; 1735 1736 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, 1737 0, false, &respmsg, sizeof(u8)); 1738 if (ret) 1739 dev_err(&hdev->pdev->dev, 1740 "VF sends keep alive cmd failed(=%d)\n", ret); 1741 } 1742 1743 static void hclgevf_service_task(struct work_struct *work) 1744 { 1745 struct hclgevf_dev *hdev; 1746 1747 hdev = container_of(work, struct hclgevf_dev, service_task); 1748 1749 /* request the link status from the PF. PF would be able to tell VF 1750 * about such updates in future so we might remove this later 1751 */ 1752 hclgevf_request_link_info(hdev); 1753 1754 hclgevf_update_link_mode(hdev); 1755 1756 hclgevf_deferred_task_schedule(hdev); 1757 1758 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1759 } 1760 1761 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1762 { 1763 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1764 } 1765 1766 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1767 u32 *clearval) 1768 { 1769 u32 cmdq_src_reg, rst_ing_reg; 1770 1771 /* fetch the events from their corresponding regs */ 1772 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1773 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1774 1775 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) { 1776 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1777 dev_info(&hdev->pdev->dev, 1778 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1779 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1780 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1781 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1782 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); 1783 *clearval = cmdq_src_reg; 1784 return HCLGEVF_VECTOR0_EVENT_RST; 1785 } 1786 1787 /* check for vector0 mailbox(=CMDQ RX) event source */ 1788 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1789 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1790 *clearval = cmdq_src_reg; 1791 return HCLGEVF_VECTOR0_EVENT_MBX; 1792 } 1793 1794 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1795 1796 return HCLGEVF_VECTOR0_EVENT_OTHER; 1797 } 1798 1799 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1800 { 1801 writel(en ? 1 : 0, vector->addr); 1802 } 1803 1804 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1805 { 1806 enum hclgevf_evt_cause event_cause; 1807 struct hclgevf_dev *hdev = data; 1808 u32 clearval; 1809 1810 hclgevf_enable_vector(&hdev->misc_vector, false); 1811 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 1812 1813 switch (event_cause) { 1814 case HCLGEVF_VECTOR0_EVENT_RST: 1815 hclgevf_reset_task_schedule(hdev); 1816 break; 1817 case HCLGEVF_VECTOR0_EVENT_MBX: 1818 hclgevf_mbx_handler(hdev); 1819 break; 1820 default: 1821 break; 1822 } 1823 1824 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 1825 hclgevf_clear_event_cause(hdev, clearval); 1826 hclgevf_enable_vector(&hdev->misc_vector, true); 1827 } 1828 1829 return IRQ_HANDLED; 1830 } 1831 1832 static int hclgevf_configure(struct hclgevf_dev *hdev) 1833 { 1834 int ret; 1835 1836 /* get queue configuration from PF */ 1837 ret = hclgevf_get_queue_info(hdev); 1838 if (ret) 1839 return ret; 1840 1841 /* get queue depth info from PF */ 1842 ret = hclgevf_get_queue_depth(hdev); 1843 if (ret) 1844 return ret; 1845 1846 ret = hclgevf_get_pf_media_type(hdev); 1847 if (ret) 1848 return ret; 1849 1850 /* get tc configuration from PF */ 1851 return hclgevf_get_tc_info(hdev); 1852 } 1853 1854 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1855 { 1856 struct pci_dev *pdev = ae_dev->pdev; 1857 struct hclgevf_dev *hdev; 1858 1859 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1860 if (!hdev) 1861 return -ENOMEM; 1862 1863 hdev->pdev = pdev; 1864 hdev->ae_dev = ae_dev; 1865 ae_dev->priv = hdev; 1866 1867 return 0; 1868 } 1869 1870 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1871 { 1872 struct hnae3_handle *roce = &hdev->roce; 1873 struct hnae3_handle *nic = &hdev->nic; 1874 1875 roce->rinfo.num_vectors = hdev->num_roce_msix; 1876 1877 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1878 hdev->num_msi_left == 0) 1879 return -EINVAL; 1880 1881 roce->rinfo.base_vector = hdev->roce_base_vector; 1882 1883 roce->rinfo.netdev = nic->kinfo.netdev; 1884 roce->rinfo.roce_io_base = hdev->hw.io_base; 1885 1886 roce->pdev = nic->pdev; 1887 roce->ae_algo = nic->ae_algo; 1888 roce->numa_node_mask = nic->numa_node_mask; 1889 1890 return 0; 1891 } 1892 1893 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 1894 { 1895 struct hclgevf_cfg_gro_status_cmd *req; 1896 struct hclgevf_desc desc; 1897 int ret; 1898 1899 if (!hnae3_dev_gro_supported(hdev)) 1900 return 0; 1901 1902 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 1903 false); 1904 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 1905 1906 req->gro_en = cpu_to_le16(en ? 1 : 0); 1907 1908 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1909 if (ret) 1910 dev_err(&hdev->pdev->dev, 1911 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 1912 1913 return ret; 1914 } 1915 1916 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1917 { 1918 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1919 int i, ret; 1920 1921 rss_cfg->rss_size = hdev->rss_size_max; 1922 1923 if (hdev->pdev->revision >= 0x21) { 1924 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 1925 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 1926 HCLGEVF_RSS_KEY_SIZE); 1927 1928 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 1929 rss_cfg->rss_hash_key); 1930 if (ret) 1931 return ret; 1932 1933 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 1934 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1935 rss_cfg->rss_tuple_sets.ipv4_udp_en = 1936 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1937 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 1938 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1939 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 1940 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1941 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 1942 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1943 rss_cfg->rss_tuple_sets.ipv6_udp_en = 1944 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1945 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 1946 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1947 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 1948 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1949 1950 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 1951 if (ret) 1952 return ret; 1953 1954 } 1955 1956 /* Initialize RSS indirect table for each vport */ 1957 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1958 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1959 1960 ret = hclgevf_set_rss_indir_table(hdev); 1961 if (ret) 1962 return ret; 1963 1964 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1965 } 1966 1967 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1968 { 1969 /* other vlan config(like, VLAN TX/RX offload) would also be added 1970 * here later 1971 */ 1972 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1973 false); 1974 } 1975 1976 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 1977 { 1978 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1979 1980 if (enable) { 1981 mod_timer(&hdev->service_timer, jiffies + HZ); 1982 } else { 1983 del_timer_sync(&hdev->service_timer); 1984 cancel_work_sync(&hdev->service_task); 1985 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1986 } 1987 } 1988 1989 static int hclgevf_ae_start(struct hnae3_handle *handle) 1990 { 1991 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1992 1993 /* reset tqp stats */ 1994 hclgevf_reset_tqp_stats(handle); 1995 1996 hclgevf_request_link_info(hdev); 1997 1998 hclgevf_update_link_mode(hdev); 1999 2000 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2001 2002 return 0; 2003 } 2004 2005 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2006 { 2007 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2008 int i; 2009 2010 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2011 2012 for (i = 0; i < handle->kinfo.num_tqps; i++) 2013 hclgevf_reset_tqp(handle, i); 2014 2015 /* reset tqp stats */ 2016 hclgevf_reset_tqp_stats(handle); 2017 hclgevf_update_link_status(hdev, 0); 2018 } 2019 2020 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2021 { 2022 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2023 u8 msg_data; 2024 2025 msg_data = alive ? 1 : 0; 2026 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE, 2027 0, &msg_data, 1, false, NULL, 0); 2028 } 2029 2030 static int hclgevf_client_start(struct hnae3_handle *handle) 2031 { 2032 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2033 2034 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 2035 return hclgevf_set_alive(handle, true); 2036 } 2037 2038 static void hclgevf_client_stop(struct hnae3_handle *handle) 2039 { 2040 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2041 int ret; 2042 2043 ret = hclgevf_set_alive(handle, false); 2044 if (ret) 2045 dev_warn(&hdev->pdev->dev, 2046 "%s failed %d\n", __func__, ret); 2047 2048 del_timer_sync(&hdev->keep_alive_timer); 2049 cancel_work_sync(&hdev->keep_alive_task); 2050 } 2051 2052 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2053 { 2054 /* setup tasks for the MBX */ 2055 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 2056 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2057 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2058 2059 /* setup tasks for service timer */ 2060 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 2061 2062 INIT_WORK(&hdev->service_task, hclgevf_service_task); 2063 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 2064 2065 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 2066 2067 mutex_init(&hdev->mbx_resp.mbx_mutex); 2068 2069 /* bring the device down */ 2070 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2071 } 2072 2073 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2074 { 2075 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2076 2077 if (hdev->service_timer.function) 2078 del_timer_sync(&hdev->service_timer); 2079 if (hdev->service_task.func) 2080 cancel_work_sync(&hdev->service_task); 2081 if (hdev->mbx_service_task.func) 2082 cancel_work_sync(&hdev->mbx_service_task); 2083 if (hdev->rst_service_task.func) 2084 cancel_work_sync(&hdev->rst_service_task); 2085 2086 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2087 } 2088 2089 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2090 { 2091 struct pci_dev *pdev = hdev->pdev; 2092 int vectors; 2093 int i; 2094 2095 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 2096 vectors = pci_alloc_irq_vectors(pdev, 2097 hdev->roce_base_msix_offset + 1, 2098 hdev->num_msi, 2099 PCI_IRQ_MSIX); 2100 else 2101 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 2102 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2103 2104 if (vectors < 0) { 2105 dev_err(&pdev->dev, 2106 "failed(%d) to allocate MSI/MSI-X vectors\n", 2107 vectors); 2108 return vectors; 2109 } 2110 if (vectors < hdev->num_msi) 2111 dev_warn(&hdev->pdev->dev, 2112 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2113 hdev->num_msi, vectors); 2114 2115 hdev->num_msi = vectors; 2116 hdev->num_msi_left = vectors; 2117 hdev->base_msi_vector = pdev->irq; 2118 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2119 2120 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2121 sizeof(u16), GFP_KERNEL); 2122 if (!hdev->vector_status) { 2123 pci_free_irq_vectors(pdev); 2124 return -ENOMEM; 2125 } 2126 2127 for (i = 0; i < hdev->num_msi; i++) 2128 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2129 2130 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2131 sizeof(int), GFP_KERNEL); 2132 if (!hdev->vector_irq) { 2133 devm_kfree(&pdev->dev, hdev->vector_status); 2134 pci_free_irq_vectors(pdev); 2135 return -ENOMEM; 2136 } 2137 2138 return 0; 2139 } 2140 2141 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2142 { 2143 struct pci_dev *pdev = hdev->pdev; 2144 2145 devm_kfree(&pdev->dev, hdev->vector_status); 2146 devm_kfree(&pdev->dev, hdev->vector_irq); 2147 pci_free_irq_vectors(pdev); 2148 } 2149 2150 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2151 { 2152 int ret = 0; 2153 2154 hclgevf_get_misc_vector(hdev); 2155 2156 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2157 0, "hclgevf_cmd", hdev); 2158 if (ret) { 2159 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2160 hdev->misc_vector.vector_irq); 2161 return ret; 2162 } 2163 2164 hclgevf_clear_event_cause(hdev, 0); 2165 2166 /* enable misc. vector(vector 0) */ 2167 hclgevf_enable_vector(&hdev->misc_vector, true); 2168 2169 return ret; 2170 } 2171 2172 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2173 { 2174 /* disable misc vector(vector 0) */ 2175 hclgevf_enable_vector(&hdev->misc_vector, false); 2176 synchronize_irq(hdev->misc_vector.vector_irq); 2177 free_irq(hdev->misc_vector.vector_irq, hdev); 2178 hclgevf_free_vector(hdev, 0); 2179 } 2180 2181 static int hclgevf_init_client_instance(struct hnae3_client *client, 2182 struct hnae3_ae_dev *ae_dev) 2183 { 2184 struct hclgevf_dev *hdev = ae_dev->priv; 2185 int ret; 2186 2187 switch (client->type) { 2188 case HNAE3_CLIENT_KNIC: 2189 hdev->nic_client = client; 2190 hdev->nic.client = client; 2191 2192 ret = client->ops->init_instance(&hdev->nic); 2193 if (ret) 2194 goto clear_nic; 2195 2196 hnae3_set_client_init_flag(client, ae_dev, 1); 2197 2198 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 2199 struct hnae3_client *rc = hdev->roce_client; 2200 2201 ret = hclgevf_init_roce_base_info(hdev); 2202 if (ret) 2203 goto clear_roce; 2204 ret = rc->ops->init_instance(&hdev->roce); 2205 if (ret) 2206 goto clear_roce; 2207 2208 hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 2209 1); 2210 } 2211 break; 2212 case HNAE3_CLIENT_UNIC: 2213 hdev->nic_client = client; 2214 hdev->nic.client = client; 2215 2216 ret = client->ops->init_instance(&hdev->nic); 2217 if (ret) 2218 goto clear_nic; 2219 2220 hnae3_set_client_init_flag(client, ae_dev, 1); 2221 break; 2222 case HNAE3_CLIENT_ROCE: 2223 if (hnae3_dev_roce_supported(hdev)) { 2224 hdev->roce_client = client; 2225 hdev->roce.client = client; 2226 } 2227 2228 if (hdev->roce_client && hdev->nic_client) { 2229 ret = hclgevf_init_roce_base_info(hdev); 2230 if (ret) 2231 goto clear_roce; 2232 2233 ret = client->ops->init_instance(&hdev->roce); 2234 if (ret) 2235 goto clear_roce; 2236 } 2237 2238 hnae3_set_client_init_flag(client, ae_dev, 1); 2239 break; 2240 default: 2241 return -EINVAL; 2242 } 2243 2244 return 0; 2245 2246 clear_nic: 2247 hdev->nic_client = NULL; 2248 hdev->nic.client = NULL; 2249 return ret; 2250 clear_roce: 2251 hdev->roce_client = NULL; 2252 hdev->roce.client = NULL; 2253 return ret; 2254 } 2255 2256 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2257 struct hnae3_ae_dev *ae_dev) 2258 { 2259 struct hclgevf_dev *hdev = ae_dev->priv; 2260 2261 /* un-init roce, if it exists */ 2262 if (hdev->roce_client) { 2263 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2264 hdev->roce_client = NULL; 2265 hdev->roce.client = NULL; 2266 } 2267 2268 /* un-init nic/unic, if this was not called by roce client */ 2269 if (client->ops->uninit_instance && hdev->nic_client && 2270 client->type != HNAE3_CLIENT_ROCE) { 2271 client->ops->uninit_instance(&hdev->nic, 0); 2272 hdev->nic_client = NULL; 2273 hdev->nic.client = NULL; 2274 } 2275 } 2276 2277 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2278 { 2279 struct pci_dev *pdev = hdev->pdev; 2280 struct hclgevf_hw *hw; 2281 int ret; 2282 2283 ret = pci_enable_device(pdev); 2284 if (ret) { 2285 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2286 return ret; 2287 } 2288 2289 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2290 if (ret) { 2291 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2292 goto err_disable_device; 2293 } 2294 2295 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2296 if (ret) { 2297 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2298 goto err_disable_device; 2299 } 2300 2301 pci_set_master(pdev); 2302 hw = &hdev->hw; 2303 hw->hdev = hdev; 2304 hw->io_base = pci_iomap(pdev, 2, 0); 2305 if (!hw->io_base) { 2306 dev_err(&pdev->dev, "can't map configuration register space\n"); 2307 ret = -ENOMEM; 2308 goto err_clr_master; 2309 } 2310 2311 return 0; 2312 2313 err_clr_master: 2314 pci_clear_master(pdev); 2315 pci_release_regions(pdev); 2316 err_disable_device: 2317 pci_disable_device(pdev); 2318 2319 return ret; 2320 } 2321 2322 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2323 { 2324 struct pci_dev *pdev = hdev->pdev; 2325 2326 pci_iounmap(pdev, hdev->hw.io_base); 2327 pci_clear_master(pdev); 2328 pci_release_regions(pdev); 2329 pci_disable_device(pdev); 2330 } 2331 2332 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2333 { 2334 struct hclgevf_query_res_cmd *req; 2335 struct hclgevf_desc desc; 2336 int ret; 2337 2338 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2339 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2340 if (ret) { 2341 dev_err(&hdev->pdev->dev, 2342 "query vf resource failed, ret = %d.\n", ret); 2343 return ret; 2344 } 2345 2346 req = (struct hclgevf_query_res_cmd *)desc.data; 2347 2348 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 2349 hdev->roce_base_msix_offset = 2350 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 2351 HCLGEVF_MSIX_OFT_ROCEE_M, 2352 HCLGEVF_MSIX_OFT_ROCEE_S); 2353 hdev->num_roce_msix = 2354 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2355 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2356 2357 /* VF should have NIC vectors and Roce vectors, NIC vectors 2358 * are queued before Roce vectors. The offset is fixed to 64. 2359 */ 2360 hdev->num_msi = hdev->num_roce_msix + 2361 hdev->roce_base_msix_offset; 2362 } else { 2363 hdev->num_msi = 2364 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2365 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2366 } 2367 2368 return 0; 2369 } 2370 2371 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2372 { 2373 struct pci_dev *pdev = hdev->pdev; 2374 int ret = 0; 2375 2376 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2377 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2378 hclgevf_misc_irq_uninit(hdev); 2379 hclgevf_uninit_msi(hdev); 2380 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2381 } 2382 2383 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2384 pci_set_master(pdev); 2385 ret = hclgevf_init_msi(hdev); 2386 if (ret) { 2387 dev_err(&pdev->dev, 2388 "failed(%d) to init MSI/MSI-X\n", ret); 2389 return ret; 2390 } 2391 2392 ret = hclgevf_misc_irq_init(hdev); 2393 if (ret) { 2394 hclgevf_uninit_msi(hdev); 2395 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2396 ret); 2397 return ret; 2398 } 2399 2400 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2401 } 2402 2403 return ret; 2404 } 2405 2406 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2407 { 2408 struct pci_dev *pdev = hdev->pdev; 2409 int ret; 2410 2411 ret = hclgevf_pci_reset(hdev); 2412 if (ret) { 2413 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2414 return ret; 2415 } 2416 2417 ret = hclgevf_cmd_init(hdev); 2418 if (ret) { 2419 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2420 return ret; 2421 } 2422 2423 ret = hclgevf_rss_init_hw(hdev); 2424 if (ret) { 2425 dev_err(&hdev->pdev->dev, 2426 "failed(%d) to initialize RSS\n", ret); 2427 return ret; 2428 } 2429 2430 ret = hclgevf_config_gro(hdev, true); 2431 if (ret) 2432 return ret; 2433 2434 ret = hclgevf_init_vlan_config(hdev); 2435 if (ret) { 2436 dev_err(&hdev->pdev->dev, 2437 "failed(%d) to initialize VLAN config\n", ret); 2438 return ret; 2439 } 2440 2441 dev_info(&hdev->pdev->dev, "Reset done\n"); 2442 2443 return 0; 2444 } 2445 2446 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2447 { 2448 struct pci_dev *pdev = hdev->pdev; 2449 int ret; 2450 2451 ret = hclgevf_pci_init(hdev); 2452 if (ret) { 2453 dev_err(&pdev->dev, "PCI initialization failed\n"); 2454 return ret; 2455 } 2456 2457 ret = hclgevf_cmd_queue_init(hdev); 2458 if (ret) { 2459 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); 2460 goto err_cmd_queue_init; 2461 } 2462 2463 ret = hclgevf_cmd_init(hdev); 2464 if (ret) 2465 goto err_cmd_init; 2466 2467 /* Get vf resource */ 2468 ret = hclgevf_query_vf_resource(hdev); 2469 if (ret) { 2470 dev_err(&hdev->pdev->dev, 2471 "Query vf status error, ret = %d.\n", ret); 2472 goto err_cmd_init; 2473 } 2474 2475 ret = hclgevf_init_msi(hdev); 2476 if (ret) { 2477 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2478 goto err_cmd_init; 2479 } 2480 2481 hclgevf_state_init(hdev); 2482 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2483 2484 ret = hclgevf_misc_irq_init(hdev); 2485 if (ret) { 2486 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2487 ret); 2488 goto err_misc_irq_init; 2489 } 2490 2491 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2492 2493 ret = hclgevf_configure(hdev); 2494 if (ret) { 2495 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2496 goto err_config; 2497 } 2498 2499 ret = hclgevf_alloc_tqps(hdev); 2500 if (ret) { 2501 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2502 goto err_config; 2503 } 2504 2505 ret = hclgevf_set_handle_info(hdev); 2506 if (ret) { 2507 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2508 goto err_config; 2509 } 2510 2511 ret = hclgevf_config_gro(hdev, true); 2512 if (ret) 2513 goto err_config; 2514 2515 /* vf is not allowed to enable unicast/multicast promisc mode. 2516 * For revision 0x20, default to disable broadcast promisc mode, 2517 * firmware makes sure broadcast packets can be accepted. 2518 * For revision 0x21, default to enable broadcast promisc mode. 2519 */ 2520 ret = hclgevf_set_promisc_mode(hdev, true); 2521 if (ret) 2522 goto err_config; 2523 2524 /* Initialize RSS for this VF */ 2525 ret = hclgevf_rss_init_hw(hdev); 2526 if (ret) { 2527 dev_err(&hdev->pdev->dev, 2528 "failed(%d) to initialize RSS\n", ret); 2529 goto err_config; 2530 } 2531 2532 ret = hclgevf_init_vlan_config(hdev); 2533 if (ret) { 2534 dev_err(&hdev->pdev->dev, 2535 "failed(%d) to initialize VLAN config\n", ret); 2536 goto err_config; 2537 } 2538 2539 hdev->last_reset_time = jiffies; 2540 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 2541 2542 return 0; 2543 2544 err_config: 2545 hclgevf_misc_irq_uninit(hdev); 2546 err_misc_irq_init: 2547 hclgevf_state_uninit(hdev); 2548 hclgevf_uninit_msi(hdev); 2549 err_cmd_init: 2550 hclgevf_cmd_uninit(hdev); 2551 err_cmd_queue_init: 2552 hclgevf_pci_uninit(hdev); 2553 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2554 return ret; 2555 } 2556 2557 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2558 { 2559 hclgevf_state_uninit(hdev); 2560 2561 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2562 hclgevf_misc_irq_uninit(hdev); 2563 hclgevf_uninit_msi(hdev); 2564 } 2565 2566 hclgevf_pci_uninit(hdev); 2567 hclgevf_cmd_uninit(hdev); 2568 } 2569 2570 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2571 { 2572 struct pci_dev *pdev = ae_dev->pdev; 2573 struct hclgevf_dev *hdev; 2574 int ret; 2575 2576 ret = hclgevf_alloc_hdev(ae_dev); 2577 if (ret) { 2578 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2579 return ret; 2580 } 2581 2582 ret = hclgevf_init_hdev(ae_dev->priv); 2583 if (ret) { 2584 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2585 return ret; 2586 } 2587 2588 hdev = ae_dev->priv; 2589 timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0); 2590 INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task); 2591 2592 return 0; 2593 } 2594 2595 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2596 { 2597 struct hclgevf_dev *hdev = ae_dev->priv; 2598 2599 hclgevf_uninit_hdev(hdev); 2600 ae_dev->priv = NULL; 2601 } 2602 2603 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2604 { 2605 struct hnae3_handle *nic = &hdev->nic; 2606 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2607 2608 return min_t(u32, hdev->rss_size_max, 2609 hdev->num_tqps / kinfo->num_tc); 2610 } 2611 2612 /** 2613 * hclgevf_get_channels - Get the current channels enabled and max supported. 2614 * @handle: hardware information for network interface 2615 * @ch: ethtool channels structure 2616 * 2617 * We don't support separate tx and rx queues as channels. The other count 2618 * represents how many queues are being used for control. max_combined counts 2619 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2620 * q_vectors since we support a lot more queue pairs than q_vectors. 2621 **/ 2622 static void hclgevf_get_channels(struct hnae3_handle *handle, 2623 struct ethtool_channels *ch) 2624 { 2625 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2626 2627 ch->max_combined = hclgevf_get_max_channels(hdev); 2628 ch->other_count = 0; 2629 ch->max_other = 0; 2630 ch->combined_count = handle->kinfo.rss_size; 2631 } 2632 2633 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2634 u16 *alloc_tqps, u16 *max_rss_size) 2635 { 2636 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2637 2638 *alloc_tqps = hdev->num_tqps; 2639 *max_rss_size = hdev->rss_size_max; 2640 } 2641 2642 static int hclgevf_get_status(struct hnae3_handle *handle) 2643 { 2644 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2645 2646 return hdev->hw.mac.link; 2647 } 2648 2649 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2650 u8 *auto_neg, u32 *speed, 2651 u8 *duplex) 2652 { 2653 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2654 2655 if (speed) 2656 *speed = hdev->hw.mac.speed; 2657 if (duplex) 2658 *duplex = hdev->hw.mac.duplex; 2659 if (auto_neg) 2660 *auto_neg = AUTONEG_DISABLE; 2661 } 2662 2663 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 2664 u8 duplex) 2665 { 2666 hdev->hw.mac.speed = speed; 2667 hdev->hw.mac.duplex = duplex; 2668 } 2669 2670 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 2671 { 2672 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2673 2674 return hclgevf_config_gro(hdev, enable); 2675 } 2676 2677 static void hclgevf_get_media_type(struct hnae3_handle *handle, 2678 u8 *media_type) 2679 { 2680 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2681 if (media_type) 2682 *media_type = hdev->hw.mac.media_type; 2683 } 2684 2685 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 2686 { 2687 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2688 2689 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2690 } 2691 2692 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 2693 { 2694 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2695 2696 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2697 } 2698 2699 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 2700 { 2701 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2702 2703 return hdev->reset_count; 2704 } 2705 2706 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 2707 unsigned long *supported, 2708 unsigned long *advertising) 2709 { 2710 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2711 2712 *supported = hdev->hw.mac.supported; 2713 *advertising = hdev->hw.mac.advertising; 2714 } 2715 2716 #define MAX_SEPARATE_NUM 4 2717 #define SEPARATOR_VALUE 0xFFFFFFFF 2718 #define REG_NUM_PER_LINE 4 2719 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 2720 2721 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 2722 { 2723 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 2724 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2725 2726 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 2727 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 2728 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 2729 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 2730 2731 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 2732 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 2733 } 2734 2735 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 2736 void *data) 2737 { 2738 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2739 int i, j, reg_um, separator_num; 2740 u32 *reg = data; 2741 2742 *version = hdev->fw_version; 2743 2744 /* fetching per-VF registers values from VF PCIe register space */ 2745 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 2746 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2747 for (i = 0; i < reg_um; i++) 2748 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 2749 for (i = 0; i < separator_num; i++) 2750 *reg++ = SEPARATOR_VALUE; 2751 2752 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 2753 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2754 for (i = 0; i < reg_um; i++) 2755 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 2756 for (i = 0; i < separator_num; i++) 2757 *reg++ = SEPARATOR_VALUE; 2758 2759 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 2760 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2761 for (j = 0; j < hdev->num_tqps; j++) { 2762 for (i = 0; i < reg_um; i++) 2763 *reg++ = hclgevf_read_dev(&hdev->hw, 2764 ring_reg_addr_list[i] + 2765 0x200 * j); 2766 for (i = 0; i < separator_num; i++) 2767 *reg++ = SEPARATOR_VALUE; 2768 } 2769 2770 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 2771 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2772 for (j = 0; j < hdev->num_msi_used - 1; j++) { 2773 for (i = 0; i < reg_um; i++) 2774 *reg++ = hclgevf_read_dev(&hdev->hw, 2775 tqp_intr_reg_addr_list[i] + 2776 4 * j); 2777 for (i = 0; i < separator_num; i++) 2778 *reg++ = SEPARATOR_VALUE; 2779 } 2780 } 2781 2782 static const struct hnae3_ae_ops hclgevf_ops = { 2783 .init_ae_dev = hclgevf_init_ae_dev, 2784 .uninit_ae_dev = hclgevf_uninit_ae_dev, 2785 .flr_prepare = hclgevf_flr_prepare, 2786 .flr_done = hclgevf_flr_done, 2787 .init_client_instance = hclgevf_init_client_instance, 2788 .uninit_client_instance = hclgevf_uninit_client_instance, 2789 .start = hclgevf_ae_start, 2790 .stop = hclgevf_ae_stop, 2791 .client_start = hclgevf_client_start, 2792 .client_stop = hclgevf_client_stop, 2793 .map_ring_to_vector = hclgevf_map_ring_to_vector, 2794 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2795 .get_vector = hclgevf_get_vector, 2796 .put_vector = hclgevf_put_vector, 2797 .reset_queue = hclgevf_reset_tqp, 2798 .get_mac_addr = hclgevf_get_mac_addr, 2799 .set_mac_addr = hclgevf_set_mac_addr, 2800 .add_uc_addr = hclgevf_add_uc_addr, 2801 .rm_uc_addr = hclgevf_rm_uc_addr, 2802 .add_mc_addr = hclgevf_add_mc_addr, 2803 .rm_mc_addr = hclgevf_rm_mc_addr, 2804 .get_stats = hclgevf_get_stats, 2805 .update_stats = hclgevf_update_stats, 2806 .get_strings = hclgevf_get_strings, 2807 .get_sset_count = hclgevf_get_sset_count, 2808 .get_rss_key_size = hclgevf_get_rss_key_size, 2809 .get_rss_indir_size = hclgevf_get_rss_indir_size, 2810 .get_rss = hclgevf_get_rss, 2811 .set_rss = hclgevf_set_rss, 2812 .get_rss_tuple = hclgevf_get_rss_tuple, 2813 .set_rss_tuple = hclgevf_set_rss_tuple, 2814 .get_tc_size = hclgevf_get_tc_size, 2815 .get_fw_version = hclgevf_get_fw_version, 2816 .set_vlan_filter = hclgevf_set_vlan_filter, 2817 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 2818 .reset_event = hclgevf_reset_event, 2819 .set_default_reset_request = hclgevf_set_def_reset_request, 2820 .get_channels = hclgevf_get_channels, 2821 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2822 .get_regs_len = hclgevf_get_regs_len, 2823 .get_regs = hclgevf_get_regs, 2824 .get_status = hclgevf_get_status, 2825 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2826 .get_media_type = hclgevf_get_media_type, 2827 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 2828 .ae_dev_resetting = hclgevf_ae_dev_resetting, 2829 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 2830 .set_gro_en = hclgevf_gro_en, 2831 .set_mtu = hclgevf_set_mtu, 2832 .get_global_queue_id = hclgevf_get_qid_global, 2833 .set_timer_task = hclgevf_set_timer_task, 2834 .get_link_mode = hclgevf_get_link_mode, 2835 }; 2836 2837 static struct hnae3_ae_algo ae_algovf = { 2838 .ops = &hclgevf_ops, 2839 .pdev_id_table = ae_algovf_pci_tbl, 2840 }; 2841 2842 static int hclgevf_init(void) 2843 { 2844 pr_info("%s is initializing\n", HCLGEVF_NAME); 2845 2846 hnae3_register_ae_algo(&ae_algovf); 2847 2848 return 0; 2849 } 2850 2851 static void hclgevf_exit(void) 2852 { 2853 hnae3_unregister_ae_algo(&ae_algovf); 2854 } 2855 module_init(hclgevf_init); 2856 module_exit(hclgevf_exit); 2857 2858 MODULE_LICENSE("GPL"); 2859 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2860 MODULE_DESCRIPTION("HCLGEVF Driver"); 2861 MODULE_VERSION(HCLGEVF_MOD_VERSION); 2862