1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 static const u8 hclgevf_hash_key[] = { 25 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 26 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 27 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 28 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 29 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 30 }; 31 32 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 33 34 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 35 HCLGEVF_CMDQ_TX_ADDR_H_REG, 36 HCLGEVF_CMDQ_TX_DEPTH_REG, 37 HCLGEVF_CMDQ_TX_TAIL_REG, 38 HCLGEVF_CMDQ_TX_HEAD_REG, 39 HCLGEVF_CMDQ_RX_ADDR_L_REG, 40 HCLGEVF_CMDQ_RX_ADDR_H_REG, 41 HCLGEVF_CMDQ_RX_DEPTH_REG, 42 HCLGEVF_CMDQ_RX_TAIL_REG, 43 HCLGEVF_CMDQ_RX_HEAD_REG, 44 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 45 HCLGEVF_CMDQ_INTR_STS_REG, 46 HCLGEVF_CMDQ_INTR_EN_REG, 47 HCLGEVF_CMDQ_INTR_GEN_REG}; 48 49 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 50 HCLGEVF_RST_ING, 51 HCLGEVF_GRO_EN_REG}; 52 53 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 54 HCLGEVF_RING_RX_ADDR_H_REG, 55 HCLGEVF_RING_RX_BD_NUM_REG, 56 HCLGEVF_RING_RX_BD_LENGTH_REG, 57 HCLGEVF_RING_RX_MERGE_EN_REG, 58 HCLGEVF_RING_RX_TAIL_REG, 59 HCLGEVF_RING_RX_HEAD_REG, 60 HCLGEVF_RING_RX_FBD_NUM_REG, 61 HCLGEVF_RING_RX_OFFSET_REG, 62 HCLGEVF_RING_RX_FBD_OFFSET_REG, 63 HCLGEVF_RING_RX_STASH_REG, 64 HCLGEVF_RING_RX_BD_ERR_REG, 65 HCLGEVF_RING_TX_ADDR_L_REG, 66 HCLGEVF_RING_TX_ADDR_H_REG, 67 HCLGEVF_RING_TX_BD_NUM_REG, 68 HCLGEVF_RING_TX_PRIORITY_REG, 69 HCLGEVF_RING_TX_TC_REG, 70 HCLGEVF_RING_TX_MERGE_EN_REG, 71 HCLGEVF_RING_TX_TAIL_REG, 72 HCLGEVF_RING_TX_HEAD_REG, 73 HCLGEVF_RING_TX_FBD_NUM_REG, 74 HCLGEVF_RING_TX_OFFSET_REG, 75 HCLGEVF_RING_TX_EBD_NUM_REG, 76 HCLGEVF_RING_TX_EBD_OFFSET_REG, 77 HCLGEVF_RING_TX_BD_ERR_REG, 78 HCLGEVF_RING_EN_REG}; 79 80 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 81 HCLGEVF_TQP_INTR_GL0_REG, 82 HCLGEVF_TQP_INTR_GL1_REG, 83 HCLGEVF_TQP_INTR_GL2_REG, 84 HCLGEVF_TQP_INTR_RL_REG}; 85 86 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 87 struct hnae3_handle *handle) 88 { 89 if (!handle->client) 90 return container_of(handle, struct hclgevf_dev, nic); 91 else if (handle->client->type == HNAE3_CLIENT_ROCE) 92 return container_of(handle, struct hclgevf_dev, roce); 93 else 94 return container_of(handle, struct hclgevf_dev, nic); 95 } 96 97 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 98 { 99 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 100 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 101 struct hclgevf_desc desc; 102 struct hclgevf_tqp *tqp; 103 int status; 104 int i; 105 106 for (i = 0; i < kinfo->num_tqps; i++) { 107 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 108 hclgevf_cmd_setup_basic_desc(&desc, 109 HCLGEVF_OPC_QUERY_RX_STATUS, 110 true); 111 112 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 113 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 114 if (status) { 115 dev_err(&hdev->pdev->dev, 116 "Query tqp stat fail, status = %d,queue = %d\n", 117 status, i); 118 return status; 119 } 120 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 121 le32_to_cpu(desc.data[1]); 122 123 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 124 true); 125 126 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 127 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 128 if (status) { 129 dev_err(&hdev->pdev->dev, 130 "Query tqp stat fail, status = %d,queue = %d\n", 131 status, i); 132 return status; 133 } 134 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 135 le32_to_cpu(desc.data[1]); 136 } 137 138 return 0; 139 } 140 141 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 142 { 143 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 144 struct hclgevf_tqp *tqp; 145 u64 *buff = data; 146 int i; 147 148 for (i = 0; i < kinfo->num_tqps; i++) { 149 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 150 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 151 } 152 for (i = 0; i < kinfo->num_tqps; i++) { 153 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 154 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 155 } 156 157 return buff; 158 } 159 160 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 161 { 162 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 163 164 return kinfo->num_tqps * 2; 165 } 166 167 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 168 { 169 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 170 u8 *buff = data; 171 int i = 0; 172 173 for (i = 0; i < kinfo->num_tqps; i++) { 174 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 175 struct hclgevf_tqp, q); 176 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 177 tqp->index); 178 buff += ETH_GSTRING_LEN; 179 } 180 181 for (i = 0; i < kinfo->num_tqps; i++) { 182 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 183 struct hclgevf_tqp, q); 184 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 185 tqp->index); 186 buff += ETH_GSTRING_LEN; 187 } 188 189 return buff; 190 } 191 192 static void hclgevf_update_stats(struct hnae3_handle *handle, 193 struct net_device_stats *net_stats) 194 { 195 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 196 int status; 197 198 status = hclgevf_tqps_update_stats(handle); 199 if (status) 200 dev_err(&hdev->pdev->dev, 201 "VF update of TQPS stats fail, status = %d.\n", 202 status); 203 } 204 205 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 206 { 207 if (strset == ETH_SS_TEST) 208 return -EOPNOTSUPP; 209 else if (strset == ETH_SS_STATS) 210 return hclgevf_tqps_get_sset_count(handle, strset); 211 212 return 0; 213 } 214 215 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 216 u8 *data) 217 { 218 u8 *p = (char *)data; 219 220 if (strset == ETH_SS_STATS) 221 p = hclgevf_tqps_get_strings(handle, p); 222 } 223 224 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 225 { 226 hclgevf_tqps_get_stats(handle, data); 227 } 228 229 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 230 { 231 u8 resp_msg; 232 int status; 233 234 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 235 true, &resp_msg, sizeof(u8)); 236 if (status) { 237 dev_err(&hdev->pdev->dev, 238 "VF request to get TC info from PF failed %d", 239 status); 240 return status; 241 } 242 243 hdev->hw_tc_map = resp_msg; 244 245 return 0; 246 } 247 248 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 249 { 250 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 251 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 252 int status; 253 254 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 255 true, resp_msg, 256 HCLGEVF_TQPS_RSS_INFO_LEN); 257 if (status) { 258 dev_err(&hdev->pdev->dev, 259 "VF request to get tqp info from PF failed %d", 260 status); 261 return status; 262 } 263 264 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 265 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 266 memcpy(&hdev->rx_buf_len, &resp_msg[4], sizeof(u16)); 267 268 return 0; 269 } 270 271 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 272 { 273 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 274 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 275 int ret; 276 277 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QDEPTH, 0, NULL, 0, 278 true, resp_msg, 279 HCLGEVF_TQPS_DEPTH_INFO_LEN); 280 if (ret) { 281 dev_err(&hdev->pdev->dev, 282 "VF request to get tqp depth info from PF failed %d", 283 ret); 284 return ret; 285 } 286 287 memcpy(&hdev->num_tx_desc, &resp_msg[0], sizeof(u16)); 288 memcpy(&hdev->num_rx_desc, &resp_msg[2], sizeof(u16)); 289 290 return 0; 291 } 292 293 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 294 { 295 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 296 u8 msg_data[2], resp_data[2]; 297 u16 qid_in_pf = 0; 298 int ret; 299 300 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 301 302 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, 303 2, true, resp_data, 2); 304 if (!ret) 305 qid_in_pf = *(u16 *)resp_data; 306 307 return qid_in_pf; 308 } 309 310 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 311 { 312 struct hclgevf_tqp *tqp; 313 int i; 314 315 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 316 sizeof(struct hclgevf_tqp), GFP_KERNEL); 317 if (!hdev->htqp) 318 return -ENOMEM; 319 320 tqp = hdev->htqp; 321 322 for (i = 0; i < hdev->num_tqps; i++) { 323 tqp->dev = &hdev->pdev->dev; 324 tqp->index = i; 325 326 tqp->q.ae_algo = &ae_algovf; 327 tqp->q.buf_size = hdev->rx_buf_len; 328 tqp->q.tx_desc_num = hdev->num_tx_desc; 329 tqp->q.rx_desc_num = hdev->num_rx_desc; 330 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 331 i * HCLGEVF_TQP_REG_SIZE; 332 333 tqp++; 334 } 335 336 return 0; 337 } 338 339 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 340 { 341 struct hnae3_handle *nic = &hdev->nic; 342 struct hnae3_knic_private_info *kinfo; 343 u16 new_tqps = hdev->num_tqps; 344 int i; 345 346 kinfo = &nic->kinfo; 347 kinfo->num_tc = 0; 348 kinfo->num_tx_desc = hdev->num_tx_desc; 349 kinfo->num_rx_desc = hdev->num_rx_desc; 350 kinfo->rx_buf_len = hdev->rx_buf_len; 351 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 352 if (hdev->hw_tc_map & BIT(i)) 353 kinfo->num_tc++; 354 355 kinfo->rss_size 356 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 357 new_tqps = kinfo->rss_size * kinfo->num_tc; 358 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 359 360 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 361 sizeof(struct hnae3_queue *), GFP_KERNEL); 362 if (!kinfo->tqp) 363 return -ENOMEM; 364 365 for (i = 0; i < kinfo->num_tqps; i++) { 366 hdev->htqp[i].q.handle = &hdev->nic; 367 hdev->htqp[i].q.tqp_index = i; 368 kinfo->tqp[i] = &hdev->htqp[i].q; 369 } 370 371 return 0; 372 } 373 374 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 375 { 376 int status; 377 u8 resp_msg; 378 379 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 380 0, false, &resp_msg, sizeof(u8)); 381 if (status) 382 dev_err(&hdev->pdev->dev, 383 "VF failed to fetch link status(%d) from PF", status); 384 } 385 386 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 387 { 388 struct hnae3_handle *rhandle = &hdev->roce; 389 struct hnae3_handle *handle = &hdev->nic; 390 struct hnae3_client *rclient; 391 struct hnae3_client *client; 392 393 client = handle->client; 394 rclient = hdev->roce_client; 395 396 link_state = 397 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 398 399 if (link_state != hdev->hw.mac.link) { 400 client->ops->link_status_change(handle, !!link_state); 401 if (rclient && rclient->ops->link_status_change) 402 rclient->ops->link_status_change(rhandle, !!link_state); 403 hdev->hw.mac.link = link_state; 404 } 405 } 406 407 void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 408 { 409 #define HCLGEVF_ADVERTISING 0 410 #define HCLGEVF_SUPPORTED 1 411 u8 send_msg; 412 u8 resp_msg; 413 414 send_msg = HCLGEVF_ADVERTISING; 415 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, 416 sizeof(u8), false, &resp_msg, sizeof(u8)); 417 send_msg = HCLGEVF_SUPPORTED; 418 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, 419 sizeof(u8), false, &resp_msg, sizeof(u8)); 420 } 421 422 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 423 { 424 struct hnae3_handle *nic = &hdev->nic; 425 int ret; 426 427 nic->ae_algo = &ae_algovf; 428 nic->pdev = hdev->pdev; 429 nic->numa_node_mask = hdev->numa_node_mask; 430 nic->flags |= HNAE3_SUPPORT_VF; 431 432 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 433 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 434 hdev->ae_dev->dev_type); 435 return -EINVAL; 436 } 437 438 ret = hclgevf_knic_setup(hdev); 439 if (ret) 440 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 441 ret); 442 return ret; 443 } 444 445 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 446 { 447 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 448 dev_warn(&hdev->pdev->dev, 449 "vector(vector_id %d) has been freed.\n", vector_id); 450 return; 451 } 452 453 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 454 hdev->num_msi_left += 1; 455 hdev->num_msi_used -= 1; 456 } 457 458 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 459 struct hnae3_vector_info *vector_info) 460 { 461 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 462 struct hnae3_vector_info *vector = vector_info; 463 int alloc = 0; 464 int i, j; 465 466 vector_num = min(hdev->num_msi_left, vector_num); 467 468 for (j = 0; j < vector_num; j++) { 469 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 470 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 471 vector->vector = pci_irq_vector(hdev->pdev, i); 472 vector->io_addr = hdev->hw.io_base + 473 HCLGEVF_VECTOR_REG_BASE + 474 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 475 hdev->vector_status[i] = 0; 476 hdev->vector_irq[i] = vector->vector; 477 478 vector++; 479 alloc++; 480 481 break; 482 } 483 } 484 } 485 hdev->num_msi_left -= alloc; 486 hdev->num_msi_used += alloc; 487 488 return alloc; 489 } 490 491 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 492 { 493 int i; 494 495 for (i = 0; i < hdev->num_msi; i++) 496 if (vector == hdev->vector_irq[i]) 497 return i; 498 499 return -EINVAL; 500 } 501 502 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 503 const u8 hfunc, const u8 *key) 504 { 505 struct hclgevf_rss_config_cmd *req; 506 struct hclgevf_desc desc; 507 int key_offset; 508 int key_size; 509 int ret; 510 511 req = (struct hclgevf_rss_config_cmd *)desc.data; 512 513 for (key_offset = 0; key_offset < 3; key_offset++) { 514 hclgevf_cmd_setup_basic_desc(&desc, 515 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 516 false); 517 518 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 519 req->hash_config |= 520 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 521 522 if (key_offset == 2) 523 key_size = 524 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 525 else 526 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 527 528 memcpy(req->hash_key, 529 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 530 531 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 532 if (ret) { 533 dev_err(&hdev->pdev->dev, 534 "Configure RSS config fail, status = %d\n", 535 ret); 536 return ret; 537 } 538 } 539 540 return 0; 541 } 542 543 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 544 { 545 return HCLGEVF_RSS_KEY_SIZE; 546 } 547 548 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 549 { 550 return HCLGEVF_RSS_IND_TBL_SIZE; 551 } 552 553 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 554 { 555 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 556 struct hclgevf_rss_indirection_table_cmd *req; 557 struct hclgevf_desc desc; 558 int status; 559 int i, j; 560 561 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 562 563 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 564 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 565 false); 566 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 567 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 568 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 569 req->rss_result[j] = 570 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 571 572 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 573 if (status) { 574 dev_err(&hdev->pdev->dev, 575 "VF failed(=%d) to set RSS indirection table\n", 576 status); 577 return status; 578 } 579 } 580 581 return 0; 582 } 583 584 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 585 { 586 struct hclgevf_rss_tc_mode_cmd *req; 587 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 588 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 589 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 590 struct hclgevf_desc desc; 591 u16 roundup_size; 592 int status; 593 int i; 594 595 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 596 597 roundup_size = roundup_pow_of_two(rss_size); 598 roundup_size = ilog2(roundup_size); 599 600 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 601 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 602 tc_size[i] = roundup_size; 603 tc_offset[i] = rss_size * i; 604 } 605 606 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 607 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 608 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 609 (tc_valid[i] & 0x1)); 610 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 611 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 612 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 613 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 614 } 615 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 616 if (status) 617 dev_err(&hdev->pdev->dev, 618 "VF failed(=%d) to set rss tc mode\n", status); 619 620 return status; 621 } 622 623 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 624 u8 *hfunc) 625 { 626 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 627 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 628 int i; 629 630 if (handle->pdev->revision >= 0x21) { 631 /* Get hash algorithm */ 632 if (hfunc) { 633 switch (rss_cfg->hash_algo) { 634 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 635 *hfunc = ETH_RSS_HASH_TOP; 636 break; 637 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 638 *hfunc = ETH_RSS_HASH_XOR; 639 break; 640 default: 641 *hfunc = ETH_RSS_HASH_UNKNOWN; 642 break; 643 } 644 } 645 646 /* Get the RSS Key required by the user */ 647 if (key) 648 memcpy(key, rss_cfg->rss_hash_key, 649 HCLGEVF_RSS_KEY_SIZE); 650 } 651 652 if (indir) 653 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 654 indir[i] = rss_cfg->rss_indirection_tbl[i]; 655 656 return 0; 657 } 658 659 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 660 const u8 *key, const u8 hfunc) 661 { 662 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 663 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 664 int ret, i; 665 666 if (handle->pdev->revision >= 0x21) { 667 /* Set the RSS Hash Key if specififed by the user */ 668 if (key) { 669 switch (hfunc) { 670 case ETH_RSS_HASH_TOP: 671 rss_cfg->hash_algo = 672 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 673 break; 674 case ETH_RSS_HASH_XOR: 675 rss_cfg->hash_algo = 676 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 677 break; 678 case ETH_RSS_HASH_NO_CHANGE: 679 break; 680 default: 681 return -EINVAL; 682 } 683 684 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 685 key); 686 if (ret) 687 return ret; 688 689 /* Update the shadow RSS key with user specified qids */ 690 memcpy(rss_cfg->rss_hash_key, key, 691 HCLGEVF_RSS_KEY_SIZE); 692 } 693 } 694 695 /* update the shadow RSS table with user specified qids */ 696 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 697 rss_cfg->rss_indirection_tbl[i] = indir[i]; 698 699 /* update the hardware */ 700 return hclgevf_set_rss_indir_table(hdev); 701 } 702 703 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 704 { 705 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 706 707 if (nfc->data & RXH_L4_B_2_3) 708 hash_sets |= HCLGEVF_D_PORT_BIT; 709 else 710 hash_sets &= ~HCLGEVF_D_PORT_BIT; 711 712 if (nfc->data & RXH_IP_SRC) 713 hash_sets |= HCLGEVF_S_IP_BIT; 714 else 715 hash_sets &= ~HCLGEVF_S_IP_BIT; 716 717 if (nfc->data & RXH_IP_DST) 718 hash_sets |= HCLGEVF_D_IP_BIT; 719 else 720 hash_sets &= ~HCLGEVF_D_IP_BIT; 721 722 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 723 hash_sets |= HCLGEVF_V_TAG_BIT; 724 725 return hash_sets; 726 } 727 728 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 729 struct ethtool_rxnfc *nfc) 730 { 731 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 732 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 733 struct hclgevf_rss_input_tuple_cmd *req; 734 struct hclgevf_desc desc; 735 u8 tuple_sets; 736 int ret; 737 738 if (handle->pdev->revision == 0x20) 739 return -EOPNOTSUPP; 740 741 if (nfc->data & 742 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 743 return -EINVAL; 744 745 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 746 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 747 748 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 749 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 750 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 751 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 752 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 753 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 754 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 755 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 756 757 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 758 switch (nfc->flow_type) { 759 case TCP_V4_FLOW: 760 req->ipv4_tcp_en = tuple_sets; 761 break; 762 case TCP_V6_FLOW: 763 req->ipv6_tcp_en = tuple_sets; 764 break; 765 case UDP_V4_FLOW: 766 req->ipv4_udp_en = tuple_sets; 767 break; 768 case UDP_V6_FLOW: 769 req->ipv6_udp_en = tuple_sets; 770 break; 771 case SCTP_V4_FLOW: 772 req->ipv4_sctp_en = tuple_sets; 773 break; 774 case SCTP_V6_FLOW: 775 if ((nfc->data & RXH_L4_B_0_1) || 776 (nfc->data & RXH_L4_B_2_3)) 777 return -EINVAL; 778 779 req->ipv6_sctp_en = tuple_sets; 780 break; 781 case IPV4_FLOW: 782 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 783 break; 784 case IPV6_FLOW: 785 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 786 break; 787 default: 788 return -EINVAL; 789 } 790 791 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 792 if (ret) { 793 dev_err(&hdev->pdev->dev, 794 "Set rss tuple fail, status = %d\n", ret); 795 return ret; 796 } 797 798 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 799 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 800 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 801 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 802 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 803 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 804 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 805 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 806 return 0; 807 } 808 809 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 810 struct ethtool_rxnfc *nfc) 811 { 812 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 813 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 814 u8 tuple_sets; 815 816 if (handle->pdev->revision == 0x20) 817 return -EOPNOTSUPP; 818 819 nfc->data = 0; 820 821 switch (nfc->flow_type) { 822 case TCP_V4_FLOW: 823 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 824 break; 825 case UDP_V4_FLOW: 826 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 827 break; 828 case TCP_V6_FLOW: 829 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 830 break; 831 case UDP_V6_FLOW: 832 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 833 break; 834 case SCTP_V4_FLOW: 835 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 836 break; 837 case SCTP_V6_FLOW: 838 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 839 break; 840 case IPV4_FLOW: 841 case IPV6_FLOW: 842 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 843 break; 844 default: 845 return -EINVAL; 846 } 847 848 if (!tuple_sets) 849 return 0; 850 851 if (tuple_sets & HCLGEVF_D_PORT_BIT) 852 nfc->data |= RXH_L4_B_2_3; 853 if (tuple_sets & HCLGEVF_S_PORT_BIT) 854 nfc->data |= RXH_L4_B_0_1; 855 if (tuple_sets & HCLGEVF_D_IP_BIT) 856 nfc->data |= RXH_IP_DST; 857 if (tuple_sets & HCLGEVF_S_IP_BIT) 858 nfc->data |= RXH_IP_SRC; 859 860 return 0; 861 } 862 863 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 864 struct hclgevf_rss_cfg *rss_cfg) 865 { 866 struct hclgevf_rss_input_tuple_cmd *req; 867 struct hclgevf_desc desc; 868 int ret; 869 870 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 871 872 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 873 874 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 875 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 876 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 877 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 878 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 879 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 880 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 881 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 882 883 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 884 if (ret) 885 dev_err(&hdev->pdev->dev, 886 "Configure rss input fail, status = %d\n", ret); 887 return ret; 888 } 889 890 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 891 { 892 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 893 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 894 895 return rss_cfg->rss_size; 896 } 897 898 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 899 int vector_id, 900 struct hnae3_ring_chain_node *ring_chain) 901 { 902 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 903 struct hnae3_ring_chain_node *node; 904 struct hclge_mbx_vf_to_pf_cmd *req; 905 struct hclgevf_desc desc; 906 int i = 0; 907 int status; 908 u8 type; 909 910 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 911 912 for (node = ring_chain; node; node = node->next) { 913 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 914 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 915 916 if (i == 0) { 917 hclgevf_cmd_setup_basic_desc(&desc, 918 HCLGEVF_OPC_MBX_VF_TO_PF, 919 false); 920 type = en ? 921 HCLGE_MBX_MAP_RING_TO_VECTOR : 922 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 923 req->msg[0] = type; 924 req->msg[1] = vector_id; 925 } 926 927 req->msg[idx_offset] = 928 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 929 req->msg[idx_offset + 1] = node->tqp_index; 930 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 931 HNAE3_RING_GL_IDX_M, 932 HNAE3_RING_GL_IDX_S); 933 934 i++; 935 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 936 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 937 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 938 !node->next) { 939 req->msg[2] = i; 940 941 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 942 if (status) { 943 dev_err(&hdev->pdev->dev, 944 "Map TQP fail, status is %d.\n", 945 status); 946 return status; 947 } 948 i = 0; 949 hclgevf_cmd_setup_basic_desc(&desc, 950 HCLGEVF_OPC_MBX_VF_TO_PF, 951 false); 952 req->msg[0] = type; 953 req->msg[1] = vector_id; 954 } 955 } 956 957 return 0; 958 } 959 960 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 961 struct hnae3_ring_chain_node *ring_chain) 962 { 963 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 964 int vector_id; 965 966 vector_id = hclgevf_get_vector_index(hdev, vector); 967 if (vector_id < 0) { 968 dev_err(&handle->pdev->dev, 969 "Get vector index fail. ret =%d\n", vector_id); 970 return vector_id; 971 } 972 973 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 974 } 975 976 static int hclgevf_unmap_ring_from_vector( 977 struct hnae3_handle *handle, 978 int vector, 979 struct hnae3_ring_chain_node *ring_chain) 980 { 981 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 982 int ret, vector_id; 983 984 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 985 return 0; 986 987 vector_id = hclgevf_get_vector_index(hdev, vector); 988 if (vector_id < 0) { 989 dev_err(&handle->pdev->dev, 990 "Get vector index fail. ret =%d\n", vector_id); 991 return vector_id; 992 } 993 994 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 995 if (ret) 996 dev_err(&handle->pdev->dev, 997 "Unmap ring from vector fail. vector=%d, ret =%d\n", 998 vector_id, 999 ret); 1000 1001 return ret; 1002 } 1003 1004 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1005 { 1006 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1007 int vector_id; 1008 1009 vector_id = hclgevf_get_vector_index(hdev, vector); 1010 if (vector_id < 0) { 1011 dev_err(&handle->pdev->dev, 1012 "hclgevf_put_vector get vector index fail. ret =%d\n", 1013 vector_id); 1014 return vector_id; 1015 } 1016 1017 hclgevf_free_vector(hdev, vector_id); 1018 1019 return 0; 1020 } 1021 1022 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1023 bool en_bc_pmc) 1024 { 1025 struct hclge_mbx_vf_to_pf_cmd *req; 1026 struct hclgevf_desc desc; 1027 int ret; 1028 1029 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 1030 1031 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 1032 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 1033 req->msg[1] = en_bc_pmc ? 1 : 0; 1034 1035 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1036 if (ret) 1037 dev_err(&hdev->pdev->dev, 1038 "Set promisc mode fail, status is %d.\n", ret); 1039 1040 return ret; 1041 } 1042 1043 static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc) 1044 { 1045 return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc); 1046 } 1047 1048 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 1049 int stream_id, bool enable) 1050 { 1051 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1052 struct hclgevf_desc desc; 1053 int status; 1054 1055 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1056 1057 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1058 false); 1059 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1060 req->stream_id = cpu_to_le16(stream_id); 1061 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 1062 1063 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1064 if (status) 1065 dev_err(&hdev->pdev->dev, 1066 "TQP enable fail, status =%d.\n", status); 1067 1068 return status; 1069 } 1070 1071 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1072 { 1073 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1074 struct hclgevf_tqp *tqp; 1075 int i; 1076 1077 for (i = 0; i < kinfo->num_tqps; i++) { 1078 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1079 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1080 } 1081 } 1082 1083 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1084 { 1085 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1086 1087 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1088 } 1089 1090 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1091 bool is_first) 1092 { 1093 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1094 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1095 u8 *new_mac_addr = (u8 *)p; 1096 u8 msg_data[ETH_ALEN * 2]; 1097 u16 subcode; 1098 int status; 1099 1100 ether_addr_copy(msg_data, new_mac_addr); 1101 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 1102 1103 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 1104 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1105 1106 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1107 subcode, msg_data, ETH_ALEN * 2, 1108 true, NULL, 0); 1109 if (!status) 1110 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1111 1112 return status; 1113 } 1114 1115 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1116 const unsigned char *addr) 1117 { 1118 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1119 1120 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1121 HCLGE_MBX_MAC_VLAN_UC_ADD, 1122 addr, ETH_ALEN, false, NULL, 0); 1123 } 1124 1125 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1126 const unsigned char *addr) 1127 { 1128 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1129 1130 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1131 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1132 addr, ETH_ALEN, false, NULL, 0); 1133 } 1134 1135 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1136 const unsigned char *addr) 1137 { 1138 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1139 1140 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1141 HCLGE_MBX_MAC_VLAN_MC_ADD, 1142 addr, ETH_ALEN, false, NULL, 0); 1143 } 1144 1145 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1146 const unsigned char *addr) 1147 { 1148 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1149 1150 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1151 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1152 addr, ETH_ALEN, false, NULL, 0); 1153 } 1154 1155 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1156 __be16 proto, u16 vlan_id, 1157 bool is_kill) 1158 { 1159 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1160 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1161 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1162 1163 if (vlan_id > 4095) 1164 return -EINVAL; 1165 1166 if (proto != htons(ETH_P_8021Q)) 1167 return -EPROTONOSUPPORT; 1168 1169 msg_data[0] = is_kill; 1170 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1171 memcpy(&msg_data[3], &proto, sizeof(proto)); 1172 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1173 HCLGE_MBX_VLAN_FILTER, msg_data, 1174 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 1175 } 1176 1177 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1178 { 1179 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1180 u8 msg_data; 1181 1182 msg_data = enable ? 1 : 0; 1183 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1184 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1185 1, false, NULL, 0); 1186 } 1187 1188 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1189 { 1190 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1191 u8 msg_data[2]; 1192 int ret; 1193 1194 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 1195 1196 /* disable vf queue before send queue reset msg to PF */ 1197 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1198 if (ret) 1199 return ret; 1200 1201 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1202 2, true, NULL, 0); 1203 } 1204 1205 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1206 { 1207 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1208 1209 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu, 1210 sizeof(new_mtu), true, NULL, 0); 1211 } 1212 1213 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1214 enum hnae3_reset_notify_type type) 1215 { 1216 struct hnae3_client *client = hdev->nic_client; 1217 struct hnae3_handle *handle = &hdev->nic; 1218 int ret; 1219 1220 if (!client->ops->reset_notify) 1221 return -EOPNOTSUPP; 1222 1223 ret = client->ops->reset_notify(handle, type); 1224 if (ret) 1225 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1226 type, ret); 1227 1228 return ret; 1229 } 1230 1231 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 1232 { 1233 struct hclgevf_dev *hdev = ae_dev->priv; 1234 1235 set_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1236 } 1237 1238 static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev, 1239 unsigned long delay_us, 1240 unsigned long wait_cnt) 1241 { 1242 unsigned long cnt = 0; 1243 1244 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && 1245 cnt++ < wait_cnt) 1246 usleep_range(delay_us, delay_us * 2); 1247 1248 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { 1249 dev_err(&hdev->pdev->dev, 1250 "flr wait timeout\n"); 1251 return -ETIMEDOUT; 1252 } 1253 1254 return 0; 1255 } 1256 1257 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1258 { 1259 #define HCLGEVF_RESET_WAIT_US 20000 1260 #define HCLGEVF_RESET_WAIT_CNT 2000 1261 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1262 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1263 1264 u32 val; 1265 int ret; 1266 1267 /* wait to check the hardware reset completion status */ 1268 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1269 dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val); 1270 1271 if (hdev->reset_type == HNAE3_FLR_RESET) 1272 return hclgevf_flr_poll_timeout(hdev, 1273 HCLGEVF_RESET_WAIT_US, 1274 HCLGEVF_RESET_WAIT_CNT); 1275 1276 ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val, 1277 !(val & HCLGEVF_RST_ING_BITS), 1278 HCLGEVF_RESET_WAIT_US, 1279 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1280 1281 /* hardware completion status should be available by this time */ 1282 if (ret) { 1283 dev_err(&hdev->pdev->dev, 1284 "could'nt get reset done status from h/w, timeout!\n"); 1285 return ret; 1286 } 1287 1288 /* we will wait a bit more to let reset of the stack to complete. This 1289 * might happen in case reset assertion was made by PF. Yes, this also 1290 * means we might end up waiting bit more even for VF reset. 1291 */ 1292 msleep(5000); 1293 1294 return 0; 1295 } 1296 1297 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1298 { 1299 int ret; 1300 1301 /* uninitialize the nic client */ 1302 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1303 if (ret) 1304 return ret; 1305 1306 /* re-initialize the hclge device */ 1307 ret = hclgevf_reset_hdev(hdev); 1308 if (ret) { 1309 dev_err(&hdev->pdev->dev, 1310 "hclge device re-init failed, VF is disabled!\n"); 1311 return ret; 1312 } 1313 1314 /* bring up the nic client again */ 1315 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1316 if (ret) 1317 return ret; 1318 1319 return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); 1320 } 1321 1322 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1323 { 1324 int ret = 0; 1325 1326 switch (hdev->reset_type) { 1327 case HNAE3_VF_FUNC_RESET: 1328 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1329 0, true, NULL, sizeof(u8)); 1330 break; 1331 case HNAE3_FLR_RESET: 1332 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1333 break; 1334 default: 1335 break; 1336 } 1337 1338 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1339 1340 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1341 hdev->reset_type, ret); 1342 1343 return ret; 1344 } 1345 1346 static int hclgevf_reset(struct hclgevf_dev *hdev) 1347 { 1348 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1349 int ret; 1350 1351 /* Initialize ae_dev reset status as well, in case enet layer wants to 1352 * know if device is undergoing reset 1353 */ 1354 ae_dev->reset_type = hdev->reset_type; 1355 hdev->reset_count++; 1356 rtnl_lock(); 1357 1358 /* bring down the nic to stop any ongoing TX/RX */ 1359 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1360 if (ret) 1361 goto err_reset_lock; 1362 1363 rtnl_unlock(); 1364 1365 ret = hclgevf_reset_prepare_wait(hdev); 1366 if (ret) 1367 goto err_reset; 1368 1369 /* check if VF could successfully fetch the hardware reset completion 1370 * status from the hardware 1371 */ 1372 ret = hclgevf_reset_wait(hdev); 1373 if (ret) { 1374 /* can't do much in this situation, will disable VF */ 1375 dev_err(&hdev->pdev->dev, 1376 "VF failed(=%d) to fetch H/W reset completion status\n", 1377 ret); 1378 goto err_reset; 1379 } 1380 1381 rtnl_lock(); 1382 1383 /* now, re-initialize the nic client and ae device*/ 1384 ret = hclgevf_reset_stack(hdev); 1385 if (ret) { 1386 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1387 goto err_reset_lock; 1388 } 1389 1390 /* bring up the nic to enable TX/RX again */ 1391 ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1392 if (ret) 1393 goto err_reset_lock; 1394 1395 rtnl_unlock(); 1396 1397 hdev->last_reset_time = jiffies; 1398 ae_dev->reset_type = HNAE3_NONE_RESET; 1399 1400 return ret; 1401 err_reset_lock: 1402 rtnl_unlock(); 1403 err_reset: 1404 /* When VF reset failed, only the higher level reset asserted by PF 1405 * can restore it, so re-initialize the command queue to receive 1406 * this higher reset event. 1407 */ 1408 hclgevf_cmd_init(hdev); 1409 dev_err(&hdev->pdev->dev, "failed to reset VF\n"); 1410 1411 return ret; 1412 } 1413 1414 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1415 unsigned long *addr) 1416 { 1417 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1418 1419 /* return the highest priority reset level amongst all */ 1420 if (test_bit(HNAE3_VF_RESET, addr)) { 1421 rst_level = HNAE3_VF_RESET; 1422 clear_bit(HNAE3_VF_RESET, addr); 1423 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1424 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1425 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1426 rst_level = HNAE3_VF_FULL_RESET; 1427 clear_bit(HNAE3_VF_FULL_RESET, addr); 1428 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1429 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1430 rst_level = HNAE3_VF_PF_FUNC_RESET; 1431 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1432 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1433 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1434 rst_level = HNAE3_VF_FUNC_RESET; 1435 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1436 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1437 rst_level = HNAE3_FLR_RESET; 1438 clear_bit(HNAE3_FLR_RESET, addr); 1439 } 1440 1441 return rst_level; 1442 } 1443 1444 static void hclgevf_reset_event(struct pci_dev *pdev, 1445 struct hnae3_handle *handle) 1446 { 1447 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1448 struct hclgevf_dev *hdev = ae_dev->priv; 1449 1450 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1451 1452 if (hdev->default_reset_request) 1453 hdev->reset_level = 1454 hclgevf_get_reset_level(hdev, 1455 &hdev->default_reset_request); 1456 else 1457 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1458 1459 /* reset of this VF requested */ 1460 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1461 hclgevf_reset_task_schedule(hdev); 1462 1463 hdev->last_reset_time = jiffies; 1464 } 1465 1466 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1467 enum hnae3_reset_type rst_type) 1468 { 1469 struct hclgevf_dev *hdev = ae_dev->priv; 1470 1471 set_bit(rst_type, &hdev->default_reset_request); 1472 } 1473 1474 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1475 { 1476 #define HCLGEVF_FLR_WAIT_MS 100 1477 #define HCLGEVF_FLR_WAIT_CNT 50 1478 struct hclgevf_dev *hdev = ae_dev->priv; 1479 int cnt = 0; 1480 1481 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1482 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1483 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); 1484 hclgevf_reset_event(hdev->pdev, NULL); 1485 1486 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && 1487 cnt++ < HCLGEVF_FLR_WAIT_CNT) 1488 msleep(HCLGEVF_FLR_WAIT_MS); 1489 1490 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) 1491 dev_err(&hdev->pdev->dev, 1492 "flr wait down timeout: %d\n", cnt); 1493 } 1494 1495 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1496 { 1497 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1498 1499 return hdev->fw_version; 1500 } 1501 1502 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1503 { 1504 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1505 1506 vector->vector_irq = pci_irq_vector(hdev->pdev, 1507 HCLGEVF_MISC_VECTOR_NUM); 1508 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1509 /* vector status always valid for Vector 0 */ 1510 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1511 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1512 1513 hdev->num_msi_left -= 1; 1514 hdev->num_msi_used += 1; 1515 } 1516 1517 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1518 { 1519 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1520 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1521 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1522 schedule_work(&hdev->rst_service_task); 1523 } 1524 } 1525 1526 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1527 { 1528 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1529 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1530 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1531 schedule_work(&hdev->mbx_service_task); 1532 } 1533 } 1534 1535 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1536 { 1537 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1538 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1539 schedule_work(&hdev->service_task); 1540 } 1541 1542 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1543 { 1544 /* if we have any pending mailbox event then schedule the mbx task */ 1545 if (hdev->mbx_event_pending) 1546 hclgevf_mbx_task_schedule(hdev); 1547 1548 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1549 hclgevf_reset_task_schedule(hdev); 1550 } 1551 1552 static void hclgevf_service_timer(struct timer_list *t) 1553 { 1554 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1555 1556 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1557 1558 hclgevf_task_schedule(hdev); 1559 } 1560 1561 static void hclgevf_reset_service_task(struct work_struct *work) 1562 { 1563 struct hclgevf_dev *hdev = 1564 container_of(work, struct hclgevf_dev, rst_service_task); 1565 int ret; 1566 1567 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1568 return; 1569 1570 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1571 1572 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1573 &hdev->reset_state)) { 1574 /* PF has initmated that it is about to reset the hardware. 1575 * We now have to poll & check if harware has actually completed 1576 * the reset sequence. On hardware reset completion, VF needs to 1577 * reset the client and ae device. 1578 */ 1579 hdev->reset_attempts = 0; 1580 1581 hdev->last_reset_time = jiffies; 1582 while ((hdev->reset_type = 1583 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 1584 != HNAE3_NONE_RESET) { 1585 ret = hclgevf_reset(hdev); 1586 if (ret) 1587 dev_err(&hdev->pdev->dev, 1588 "VF stack reset failed %d.\n", ret); 1589 } 1590 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1591 &hdev->reset_state)) { 1592 /* we could be here when either of below happens: 1593 * 1. reset was initiated due to watchdog timeout due to 1594 * a. IMP was earlier reset and our TX got choked down and 1595 * which resulted in watchdog reacting and inducing VF 1596 * reset. This also means our cmdq would be unreliable. 1597 * b. problem in TX due to other lower layer(example link 1598 * layer not functioning properly etc.) 1599 * 2. VF reset might have been initiated due to some config 1600 * change. 1601 * 1602 * NOTE: Theres no clear way to detect above cases than to react 1603 * to the response of PF for this reset request. PF will ack the 1604 * 1b and 2. cases but we will not get any intimation about 1a 1605 * from PF as cmdq would be in unreliable state i.e. mailbox 1606 * communication between PF and VF would be broken. 1607 */ 1608 1609 /* if we are never geting into pending state it means either: 1610 * 1. PF is not receiving our request which could be due to IMP 1611 * reset 1612 * 2. PF is screwed 1613 * We cannot do much for 2. but to check first we can try reset 1614 * our PCIe + stack and see if it alleviates the problem. 1615 */ 1616 if (hdev->reset_attempts > 3) { 1617 /* prepare for full reset of stack + pcie interface */ 1618 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1619 1620 /* "defer" schedule the reset task again */ 1621 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1622 } else { 1623 hdev->reset_attempts++; 1624 1625 set_bit(hdev->reset_level, &hdev->reset_pending); 1626 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1627 } 1628 hclgevf_reset_task_schedule(hdev); 1629 } 1630 1631 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1632 } 1633 1634 static void hclgevf_mailbox_service_task(struct work_struct *work) 1635 { 1636 struct hclgevf_dev *hdev; 1637 1638 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1639 1640 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1641 return; 1642 1643 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1644 1645 hclgevf_mbx_async_handler(hdev); 1646 1647 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1648 } 1649 1650 static void hclgevf_keep_alive_timer(struct timer_list *t) 1651 { 1652 struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); 1653 1654 schedule_work(&hdev->keep_alive_task); 1655 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 1656 } 1657 1658 static void hclgevf_keep_alive_task(struct work_struct *work) 1659 { 1660 struct hclgevf_dev *hdev; 1661 u8 respmsg; 1662 int ret; 1663 1664 hdev = container_of(work, struct hclgevf_dev, keep_alive_task); 1665 1666 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1667 return; 1668 1669 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, 1670 0, false, &respmsg, sizeof(u8)); 1671 if (ret) 1672 dev_err(&hdev->pdev->dev, 1673 "VF sends keep alive cmd failed(=%d)\n", ret); 1674 } 1675 1676 static void hclgevf_service_task(struct work_struct *work) 1677 { 1678 struct hclgevf_dev *hdev; 1679 1680 hdev = container_of(work, struct hclgevf_dev, service_task); 1681 1682 /* request the link status from the PF. PF would be able to tell VF 1683 * about such updates in future so we might remove this later 1684 */ 1685 hclgevf_request_link_info(hdev); 1686 1687 hclgevf_update_link_mode(hdev); 1688 1689 hclgevf_deferred_task_schedule(hdev); 1690 1691 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1692 } 1693 1694 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1695 { 1696 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1697 } 1698 1699 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1700 u32 *clearval) 1701 { 1702 u32 cmdq_src_reg, rst_ing_reg; 1703 1704 /* fetch the events from their corresponding regs */ 1705 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1706 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1707 1708 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) { 1709 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1710 dev_info(&hdev->pdev->dev, 1711 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1712 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1713 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1714 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1715 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); 1716 *clearval = cmdq_src_reg; 1717 return HCLGEVF_VECTOR0_EVENT_RST; 1718 } 1719 1720 /* check for vector0 mailbox(=CMDQ RX) event source */ 1721 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1722 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1723 *clearval = cmdq_src_reg; 1724 return HCLGEVF_VECTOR0_EVENT_MBX; 1725 } 1726 1727 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1728 1729 return HCLGEVF_VECTOR0_EVENT_OTHER; 1730 } 1731 1732 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1733 { 1734 writel(en ? 1 : 0, vector->addr); 1735 } 1736 1737 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1738 { 1739 enum hclgevf_evt_cause event_cause; 1740 struct hclgevf_dev *hdev = data; 1741 u32 clearval; 1742 1743 hclgevf_enable_vector(&hdev->misc_vector, false); 1744 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 1745 1746 switch (event_cause) { 1747 case HCLGEVF_VECTOR0_EVENT_RST: 1748 hclgevf_reset_task_schedule(hdev); 1749 break; 1750 case HCLGEVF_VECTOR0_EVENT_MBX: 1751 hclgevf_mbx_handler(hdev); 1752 break; 1753 default: 1754 break; 1755 } 1756 1757 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 1758 hclgevf_clear_event_cause(hdev, clearval); 1759 hclgevf_enable_vector(&hdev->misc_vector, true); 1760 } 1761 1762 return IRQ_HANDLED; 1763 } 1764 1765 static int hclgevf_configure(struct hclgevf_dev *hdev) 1766 { 1767 int ret; 1768 1769 /* get queue configuration from PF */ 1770 ret = hclgevf_get_queue_info(hdev); 1771 if (ret) 1772 return ret; 1773 1774 /* get queue depth info from PF */ 1775 ret = hclgevf_get_queue_depth(hdev); 1776 if (ret) 1777 return ret; 1778 1779 /* get tc configuration from PF */ 1780 return hclgevf_get_tc_info(hdev); 1781 } 1782 1783 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1784 { 1785 struct pci_dev *pdev = ae_dev->pdev; 1786 struct hclgevf_dev *hdev; 1787 1788 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1789 if (!hdev) 1790 return -ENOMEM; 1791 1792 hdev->pdev = pdev; 1793 hdev->ae_dev = ae_dev; 1794 ae_dev->priv = hdev; 1795 1796 return 0; 1797 } 1798 1799 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1800 { 1801 struct hnae3_handle *roce = &hdev->roce; 1802 struct hnae3_handle *nic = &hdev->nic; 1803 1804 roce->rinfo.num_vectors = hdev->num_roce_msix; 1805 1806 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1807 hdev->num_msi_left == 0) 1808 return -EINVAL; 1809 1810 roce->rinfo.base_vector = hdev->roce_base_vector; 1811 1812 roce->rinfo.netdev = nic->kinfo.netdev; 1813 roce->rinfo.roce_io_base = hdev->hw.io_base; 1814 1815 roce->pdev = nic->pdev; 1816 roce->ae_algo = nic->ae_algo; 1817 roce->numa_node_mask = nic->numa_node_mask; 1818 1819 return 0; 1820 } 1821 1822 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 1823 { 1824 struct hclgevf_cfg_gro_status_cmd *req; 1825 struct hclgevf_desc desc; 1826 int ret; 1827 1828 if (!hnae3_dev_gro_supported(hdev)) 1829 return 0; 1830 1831 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 1832 false); 1833 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 1834 1835 req->gro_en = cpu_to_le16(en ? 1 : 0); 1836 1837 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1838 if (ret) 1839 dev_err(&hdev->pdev->dev, 1840 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 1841 1842 return ret; 1843 } 1844 1845 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1846 { 1847 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1848 int i, ret; 1849 1850 rss_cfg->rss_size = hdev->rss_size_max; 1851 1852 if (hdev->pdev->revision >= 0x21) { 1853 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 1854 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 1855 HCLGEVF_RSS_KEY_SIZE); 1856 1857 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 1858 rss_cfg->rss_hash_key); 1859 if (ret) 1860 return ret; 1861 1862 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 1863 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1864 rss_cfg->rss_tuple_sets.ipv4_udp_en = 1865 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1866 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 1867 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1868 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 1869 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1870 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 1871 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1872 rss_cfg->rss_tuple_sets.ipv6_udp_en = 1873 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1874 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 1875 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1876 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 1877 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1878 1879 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 1880 if (ret) 1881 return ret; 1882 1883 } 1884 1885 /* Initialize RSS indirect table for each vport */ 1886 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1887 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1888 1889 ret = hclgevf_set_rss_indir_table(hdev); 1890 if (ret) 1891 return ret; 1892 1893 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1894 } 1895 1896 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1897 { 1898 /* other vlan config(like, VLAN TX/RX offload) would also be added 1899 * here later 1900 */ 1901 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1902 false); 1903 } 1904 1905 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 1906 { 1907 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1908 1909 if (enable) { 1910 mod_timer(&hdev->service_timer, jiffies + HZ); 1911 } else { 1912 del_timer_sync(&hdev->service_timer); 1913 cancel_work_sync(&hdev->service_task); 1914 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1915 } 1916 } 1917 1918 static int hclgevf_ae_start(struct hnae3_handle *handle) 1919 { 1920 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1921 1922 /* reset tqp stats */ 1923 hclgevf_reset_tqp_stats(handle); 1924 1925 hclgevf_request_link_info(hdev); 1926 1927 hclgevf_update_link_mode(hdev); 1928 1929 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1930 1931 return 0; 1932 } 1933 1934 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1935 { 1936 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1937 int i; 1938 1939 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1940 1941 for (i = 0; i < handle->kinfo.num_tqps; i++) 1942 hclgevf_reset_tqp(handle, i); 1943 1944 /* reset tqp stats */ 1945 hclgevf_reset_tqp_stats(handle); 1946 hclgevf_update_link_status(hdev, 0); 1947 } 1948 1949 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 1950 { 1951 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1952 u8 msg_data; 1953 1954 msg_data = alive ? 1 : 0; 1955 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE, 1956 0, &msg_data, 1, false, NULL, 0); 1957 } 1958 1959 static int hclgevf_client_start(struct hnae3_handle *handle) 1960 { 1961 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1962 1963 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 1964 return hclgevf_set_alive(handle, true); 1965 } 1966 1967 static void hclgevf_client_stop(struct hnae3_handle *handle) 1968 { 1969 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1970 int ret; 1971 1972 ret = hclgevf_set_alive(handle, false); 1973 if (ret) 1974 dev_warn(&hdev->pdev->dev, 1975 "%s failed %d\n", __func__, ret); 1976 1977 del_timer_sync(&hdev->keep_alive_timer); 1978 cancel_work_sync(&hdev->keep_alive_task); 1979 } 1980 1981 static void hclgevf_state_init(struct hclgevf_dev *hdev) 1982 { 1983 /* setup tasks for the MBX */ 1984 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 1985 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1986 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1987 1988 /* setup tasks for service timer */ 1989 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 1990 1991 INIT_WORK(&hdev->service_task, hclgevf_service_task); 1992 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1993 1994 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 1995 1996 mutex_init(&hdev->mbx_resp.mbx_mutex); 1997 1998 /* bring the device down */ 1999 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2000 } 2001 2002 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2003 { 2004 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2005 2006 if (hdev->service_timer.function) 2007 del_timer_sync(&hdev->service_timer); 2008 if (hdev->service_task.func) 2009 cancel_work_sync(&hdev->service_task); 2010 if (hdev->mbx_service_task.func) 2011 cancel_work_sync(&hdev->mbx_service_task); 2012 if (hdev->rst_service_task.func) 2013 cancel_work_sync(&hdev->rst_service_task); 2014 2015 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2016 } 2017 2018 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2019 { 2020 struct pci_dev *pdev = hdev->pdev; 2021 int vectors; 2022 int i; 2023 2024 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 2025 vectors = pci_alloc_irq_vectors(pdev, 2026 hdev->roce_base_msix_offset + 1, 2027 hdev->num_msi, 2028 PCI_IRQ_MSIX); 2029 else 2030 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 2031 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2032 2033 if (vectors < 0) { 2034 dev_err(&pdev->dev, 2035 "failed(%d) to allocate MSI/MSI-X vectors\n", 2036 vectors); 2037 return vectors; 2038 } 2039 if (vectors < hdev->num_msi) 2040 dev_warn(&hdev->pdev->dev, 2041 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2042 hdev->num_msi, vectors); 2043 2044 hdev->num_msi = vectors; 2045 hdev->num_msi_left = vectors; 2046 hdev->base_msi_vector = pdev->irq; 2047 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2048 2049 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2050 sizeof(u16), GFP_KERNEL); 2051 if (!hdev->vector_status) { 2052 pci_free_irq_vectors(pdev); 2053 return -ENOMEM; 2054 } 2055 2056 for (i = 0; i < hdev->num_msi; i++) 2057 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2058 2059 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2060 sizeof(int), GFP_KERNEL); 2061 if (!hdev->vector_irq) { 2062 devm_kfree(&pdev->dev, hdev->vector_status); 2063 pci_free_irq_vectors(pdev); 2064 return -ENOMEM; 2065 } 2066 2067 return 0; 2068 } 2069 2070 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2071 { 2072 struct pci_dev *pdev = hdev->pdev; 2073 2074 devm_kfree(&pdev->dev, hdev->vector_status); 2075 devm_kfree(&pdev->dev, hdev->vector_irq); 2076 pci_free_irq_vectors(pdev); 2077 } 2078 2079 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2080 { 2081 int ret = 0; 2082 2083 hclgevf_get_misc_vector(hdev); 2084 2085 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2086 0, "hclgevf_cmd", hdev); 2087 if (ret) { 2088 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2089 hdev->misc_vector.vector_irq); 2090 return ret; 2091 } 2092 2093 hclgevf_clear_event_cause(hdev, 0); 2094 2095 /* enable misc. vector(vector 0) */ 2096 hclgevf_enable_vector(&hdev->misc_vector, true); 2097 2098 return ret; 2099 } 2100 2101 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2102 { 2103 /* disable misc vector(vector 0) */ 2104 hclgevf_enable_vector(&hdev->misc_vector, false); 2105 synchronize_irq(hdev->misc_vector.vector_irq); 2106 free_irq(hdev->misc_vector.vector_irq, hdev); 2107 hclgevf_free_vector(hdev, 0); 2108 } 2109 2110 static int hclgevf_init_client_instance(struct hnae3_client *client, 2111 struct hnae3_ae_dev *ae_dev) 2112 { 2113 struct hclgevf_dev *hdev = ae_dev->priv; 2114 int ret; 2115 2116 switch (client->type) { 2117 case HNAE3_CLIENT_KNIC: 2118 hdev->nic_client = client; 2119 hdev->nic.client = client; 2120 2121 ret = client->ops->init_instance(&hdev->nic); 2122 if (ret) 2123 goto clear_nic; 2124 2125 hnae3_set_client_init_flag(client, ae_dev, 1); 2126 2127 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 2128 struct hnae3_client *rc = hdev->roce_client; 2129 2130 ret = hclgevf_init_roce_base_info(hdev); 2131 if (ret) 2132 goto clear_roce; 2133 ret = rc->ops->init_instance(&hdev->roce); 2134 if (ret) 2135 goto clear_roce; 2136 2137 hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 2138 1); 2139 } 2140 break; 2141 case HNAE3_CLIENT_UNIC: 2142 hdev->nic_client = client; 2143 hdev->nic.client = client; 2144 2145 ret = client->ops->init_instance(&hdev->nic); 2146 if (ret) 2147 goto clear_nic; 2148 2149 hnae3_set_client_init_flag(client, ae_dev, 1); 2150 break; 2151 case HNAE3_CLIENT_ROCE: 2152 if (hnae3_dev_roce_supported(hdev)) { 2153 hdev->roce_client = client; 2154 hdev->roce.client = client; 2155 } 2156 2157 if (hdev->roce_client && hdev->nic_client) { 2158 ret = hclgevf_init_roce_base_info(hdev); 2159 if (ret) 2160 goto clear_roce; 2161 2162 ret = client->ops->init_instance(&hdev->roce); 2163 if (ret) 2164 goto clear_roce; 2165 } 2166 2167 hnae3_set_client_init_flag(client, ae_dev, 1); 2168 break; 2169 default: 2170 return -EINVAL; 2171 } 2172 2173 return 0; 2174 2175 clear_nic: 2176 hdev->nic_client = NULL; 2177 hdev->nic.client = NULL; 2178 return ret; 2179 clear_roce: 2180 hdev->roce_client = NULL; 2181 hdev->roce.client = NULL; 2182 return ret; 2183 } 2184 2185 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2186 struct hnae3_ae_dev *ae_dev) 2187 { 2188 struct hclgevf_dev *hdev = ae_dev->priv; 2189 2190 /* un-init roce, if it exists */ 2191 if (hdev->roce_client) { 2192 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2193 hdev->roce_client = NULL; 2194 hdev->roce.client = NULL; 2195 } 2196 2197 /* un-init nic/unic, if this was not called by roce client */ 2198 if (client->ops->uninit_instance && hdev->nic_client && 2199 client->type != HNAE3_CLIENT_ROCE) { 2200 client->ops->uninit_instance(&hdev->nic, 0); 2201 hdev->nic_client = NULL; 2202 hdev->nic.client = NULL; 2203 } 2204 } 2205 2206 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2207 { 2208 struct pci_dev *pdev = hdev->pdev; 2209 struct hclgevf_hw *hw; 2210 int ret; 2211 2212 ret = pci_enable_device(pdev); 2213 if (ret) { 2214 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2215 return ret; 2216 } 2217 2218 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2219 if (ret) { 2220 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2221 goto err_disable_device; 2222 } 2223 2224 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2225 if (ret) { 2226 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2227 goto err_disable_device; 2228 } 2229 2230 pci_set_master(pdev); 2231 hw = &hdev->hw; 2232 hw->hdev = hdev; 2233 hw->io_base = pci_iomap(pdev, 2, 0); 2234 if (!hw->io_base) { 2235 dev_err(&pdev->dev, "can't map configuration register space\n"); 2236 ret = -ENOMEM; 2237 goto err_clr_master; 2238 } 2239 2240 return 0; 2241 2242 err_clr_master: 2243 pci_clear_master(pdev); 2244 pci_release_regions(pdev); 2245 err_disable_device: 2246 pci_disable_device(pdev); 2247 2248 return ret; 2249 } 2250 2251 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2252 { 2253 struct pci_dev *pdev = hdev->pdev; 2254 2255 pci_iounmap(pdev, hdev->hw.io_base); 2256 pci_clear_master(pdev); 2257 pci_release_regions(pdev); 2258 pci_disable_device(pdev); 2259 } 2260 2261 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2262 { 2263 struct hclgevf_query_res_cmd *req; 2264 struct hclgevf_desc desc; 2265 int ret; 2266 2267 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2268 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2269 if (ret) { 2270 dev_err(&hdev->pdev->dev, 2271 "query vf resource failed, ret = %d.\n", ret); 2272 return ret; 2273 } 2274 2275 req = (struct hclgevf_query_res_cmd *)desc.data; 2276 2277 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 2278 hdev->roce_base_msix_offset = 2279 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 2280 HCLGEVF_MSIX_OFT_ROCEE_M, 2281 HCLGEVF_MSIX_OFT_ROCEE_S); 2282 hdev->num_roce_msix = 2283 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2284 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2285 2286 /* VF should have NIC vectors and Roce vectors, NIC vectors 2287 * are queued before Roce vectors. The offset is fixed to 64. 2288 */ 2289 hdev->num_msi = hdev->num_roce_msix + 2290 hdev->roce_base_msix_offset; 2291 } else { 2292 hdev->num_msi = 2293 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2294 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2295 } 2296 2297 return 0; 2298 } 2299 2300 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2301 { 2302 struct pci_dev *pdev = hdev->pdev; 2303 int ret = 0; 2304 2305 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2306 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2307 hclgevf_misc_irq_uninit(hdev); 2308 hclgevf_uninit_msi(hdev); 2309 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2310 } 2311 2312 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2313 pci_set_master(pdev); 2314 ret = hclgevf_init_msi(hdev); 2315 if (ret) { 2316 dev_err(&pdev->dev, 2317 "failed(%d) to init MSI/MSI-X\n", ret); 2318 return ret; 2319 } 2320 2321 ret = hclgevf_misc_irq_init(hdev); 2322 if (ret) { 2323 hclgevf_uninit_msi(hdev); 2324 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2325 ret); 2326 return ret; 2327 } 2328 2329 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2330 } 2331 2332 return ret; 2333 } 2334 2335 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2336 { 2337 struct pci_dev *pdev = hdev->pdev; 2338 int ret; 2339 2340 ret = hclgevf_pci_reset(hdev); 2341 if (ret) { 2342 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2343 return ret; 2344 } 2345 2346 ret = hclgevf_cmd_init(hdev); 2347 if (ret) { 2348 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2349 return ret; 2350 } 2351 2352 ret = hclgevf_rss_init_hw(hdev); 2353 if (ret) { 2354 dev_err(&hdev->pdev->dev, 2355 "failed(%d) to initialize RSS\n", ret); 2356 return ret; 2357 } 2358 2359 ret = hclgevf_config_gro(hdev, true); 2360 if (ret) 2361 return ret; 2362 2363 ret = hclgevf_init_vlan_config(hdev); 2364 if (ret) { 2365 dev_err(&hdev->pdev->dev, 2366 "failed(%d) to initialize VLAN config\n", ret); 2367 return ret; 2368 } 2369 2370 dev_info(&hdev->pdev->dev, "Reset done\n"); 2371 2372 return 0; 2373 } 2374 2375 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2376 { 2377 struct pci_dev *pdev = hdev->pdev; 2378 int ret; 2379 2380 ret = hclgevf_pci_init(hdev); 2381 if (ret) { 2382 dev_err(&pdev->dev, "PCI initialization failed\n"); 2383 return ret; 2384 } 2385 2386 ret = hclgevf_cmd_queue_init(hdev); 2387 if (ret) { 2388 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); 2389 goto err_cmd_queue_init; 2390 } 2391 2392 ret = hclgevf_cmd_init(hdev); 2393 if (ret) 2394 goto err_cmd_init; 2395 2396 /* Get vf resource */ 2397 ret = hclgevf_query_vf_resource(hdev); 2398 if (ret) { 2399 dev_err(&hdev->pdev->dev, 2400 "Query vf status error, ret = %d.\n", ret); 2401 goto err_cmd_init; 2402 } 2403 2404 ret = hclgevf_init_msi(hdev); 2405 if (ret) { 2406 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2407 goto err_cmd_init; 2408 } 2409 2410 hclgevf_state_init(hdev); 2411 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2412 2413 ret = hclgevf_misc_irq_init(hdev); 2414 if (ret) { 2415 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2416 ret); 2417 goto err_misc_irq_init; 2418 } 2419 2420 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2421 2422 ret = hclgevf_configure(hdev); 2423 if (ret) { 2424 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2425 goto err_config; 2426 } 2427 2428 ret = hclgevf_alloc_tqps(hdev); 2429 if (ret) { 2430 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2431 goto err_config; 2432 } 2433 2434 ret = hclgevf_set_handle_info(hdev); 2435 if (ret) { 2436 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2437 goto err_config; 2438 } 2439 2440 ret = hclgevf_config_gro(hdev, true); 2441 if (ret) 2442 goto err_config; 2443 2444 /* vf is not allowed to enable unicast/multicast promisc mode. 2445 * For revision 0x20, default to disable broadcast promisc mode, 2446 * firmware makes sure broadcast packets can be accepted. 2447 * For revision 0x21, default to enable broadcast promisc mode. 2448 */ 2449 ret = hclgevf_set_promisc_mode(hdev, true); 2450 if (ret) 2451 goto err_config; 2452 2453 /* Initialize RSS for this VF */ 2454 ret = hclgevf_rss_init_hw(hdev); 2455 if (ret) { 2456 dev_err(&hdev->pdev->dev, 2457 "failed(%d) to initialize RSS\n", ret); 2458 goto err_config; 2459 } 2460 2461 ret = hclgevf_init_vlan_config(hdev); 2462 if (ret) { 2463 dev_err(&hdev->pdev->dev, 2464 "failed(%d) to initialize VLAN config\n", ret); 2465 goto err_config; 2466 } 2467 2468 hdev->last_reset_time = jiffies; 2469 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 2470 2471 return 0; 2472 2473 err_config: 2474 hclgevf_misc_irq_uninit(hdev); 2475 err_misc_irq_init: 2476 hclgevf_state_uninit(hdev); 2477 hclgevf_uninit_msi(hdev); 2478 err_cmd_init: 2479 hclgevf_cmd_uninit(hdev); 2480 err_cmd_queue_init: 2481 hclgevf_pci_uninit(hdev); 2482 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2483 return ret; 2484 } 2485 2486 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2487 { 2488 hclgevf_state_uninit(hdev); 2489 2490 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2491 hclgevf_misc_irq_uninit(hdev); 2492 hclgevf_uninit_msi(hdev); 2493 } 2494 2495 hclgevf_pci_uninit(hdev); 2496 hclgevf_cmd_uninit(hdev); 2497 } 2498 2499 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2500 { 2501 struct pci_dev *pdev = ae_dev->pdev; 2502 struct hclgevf_dev *hdev; 2503 int ret; 2504 2505 ret = hclgevf_alloc_hdev(ae_dev); 2506 if (ret) { 2507 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2508 return ret; 2509 } 2510 2511 ret = hclgevf_init_hdev(ae_dev->priv); 2512 if (ret) { 2513 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2514 return ret; 2515 } 2516 2517 hdev = ae_dev->priv; 2518 timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0); 2519 INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task); 2520 2521 return 0; 2522 } 2523 2524 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2525 { 2526 struct hclgevf_dev *hdev = ae_dev->priv; 2527 2528 hclgevf_uninit_hdev(hdev); 2529 ae_dev->priv = NULL; 2530 } 2531 2532 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2533 { 2534 struct hnae3_handle *nic = &hdev->nic; 2535 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2536 2537 return min_t(u32, hdev->rss_size_max, 2538 hdev->num_tqps / kinfo->num_tc); 2539 } 2540 2541 /** 2542 * hclgevf_get_channels - Get the current channels enabled and max supported. 2543 * @handle: hardware information for network interface 2544 * @ch: ethtool channels structure 2545 * 2546 * We don't support separate tx and rx queues as channels. The other count 2547 * represents how many queues are being used for control. max_combined counts 2548 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2549 * q_vectors since we support a lot more queue pairs than q_vectors. 2550 **/ 2551 static void hclgevf_get_channels(struct hnae3_handle *handle, 2552 struct ethtool_channels *ch) 2553 { 2554 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2555 2556 ch->max_combined = hclgevf_get_max_channels(hdev); 2557 ch->other_count = 0; 2558 ch->max_other = 0; 2559 ch->combined_count = handle->kinfo.rss_size; 2560 } 2561 2562 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2563 u16 *alloc_tqps, u16 *max_rss_size) 2564 { 2565 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2566 2567 *alloc_tqps = hdev->num_tqps; 2568 *max_rss_size = hdev->rss_size_max; 2569 } 2570 2571 static int hclgevf_get_status(struct hnae3_handle *handle) 2572 { 2573 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2574 2575 return hdev->hw.mac.link; 2576 } 2577 2578 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2579 u8 *auto_neg, u32 *speed, 2580 u8 *duplex) 2581 { 2582 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2583 2584 if (speed) 2585 *speed = hdev->hw.mac.speed; 2586 if (duplex) 2587 *duplex = hdev->hw.mac.duplex; 2588 if (auto_neg) 2589 *auto_neg = AUTONEG_DISABLE; 2590 } 2591 2592 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 2593 u8 duplex) 2594 { 2595 hdev->hw.mac.speed = speed; 2596 hdev->hw.mac.duplex = duplex; 2597 } 2598 2599 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 2600 { 2601 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2602 2603 return hclgevf_config_gro(hdev, enable); 2604 } 2605 2606 static void hclgevf_get_media_type(struct hnae3_handle *handle, 2607 u8 *media_type) 2608 { 2609 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2610 if (media_type) 2611 *media_type = hdev->hw.mac.media_type; 2612 } 2613 2614 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 2615 { 2616 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2617 2618 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2619 } 2620 2621 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 2622 { 2623 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2624 2625 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2626 } 2627 2628 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 2629 { 2630 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2631 2632 return hdev->reset_count; 2633 } 2634 2635 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 2636 unsigned long *supported, 2637 unsigned long *advertising) 2638 { 2639 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2640 2641 *supported = hdev->hw.mac.supported; 2642 *advertising = hdev->hw.mac.advertising; 2643 } 2644 2645 #define MAX_SEPARATE_NUM 4 2646 #define SEPARATOR_VALUE 0xFFFFFFFF 2647 #define REG_NUM_PER_LINE 4 2648 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 2649 2650 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 2651 { 2652 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 2653 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2654 2655 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 2656 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 2657 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 2658 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 2659 2660 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 2661 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 2662 } 2663 2664 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 2665 void *data) 2666 { 2667 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2668 int i, j, reg_um, separator_num; 2669 u32 *reg = data; 2670 2671 *version = hdev->fw_version; 2672 2673 /* fetching per-VF registers values from VF PCIe register space */ 2674 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 2675 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2676 for (i = 0; i < reg_um; i++) 2677 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 2678 for (i = 0; i < separator_num; i++) 2679 *reg++ = SEPARATOR_VALUE; 2680 2681 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 2682 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2683 for (i = 0; i < reg_um; i++) 2684 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 2685 for (i = 0; i < separator_num; i++) 2686 *reg++ = SEPARATOR_VALUE; 2687 2688 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 2689 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2690 for (j = 0; j < hdev->num_tqps; j++) { 2691 for (i = 0; i < reg_um; i++) 2692 *reg++ = hclgevf_read_dev(&hdev->hw, 2693 ring_reg_addr_list[i] + 2694 0x200 * j); 2695 for (i = 0; i < separator_num; i++) 2696 *reg++ = SEPARATOR_VALUE; 2697 } 2698 2699 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 2700 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2701 for (j = 0; j < hdev->num_msi_used - 1; j++) { 2702 for (i = 0; i < reg_um; i++) 2703 *reg++ = hclgevf_read_dev(&hdev->hw, 2704 tqp_intr_reg_addr_list[i] + 2705 4 * j); 2706 for (i = 0; i < separator_num; i++) 2707 *reg++ = SEPARATOR_VALUE; 2708 } 2709 } 2710 2711 static const struct hnae3_ae_ops hclgevf_ops = { 2712 .init_ae_dev = hclgevf_init_ae_dev, 2713 .uninit_ae_dev = hclgevf_uninit_ae_dev, 2714 .flr_prepare = hclgevf_flr_prepare, 2715 .flr_done = hclgevf_flr_done, 2716 .init_client_instance = hclgevf_init_client_instance, 2717 .uninit_client_instance = hclgevf_uninit_client_instance, 2718 .start = hclgevf_ae_start, 2719 .stop = hclgevf_ae_stop, 2720 .client_start = hclgevf_client_start, 2721 .client_stop = hclgevf_client_stop, 2722 .map_ring_to_vector = hclgevf_map_ring_to_vector, 2723 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2724 .get_vector = hclgevf_get_vector, 2725 .put_vector = hclgevf_put_vector, 2726 .reset_queue = hclgevf_reset_tqp, 2727 .get_mac_addr = hclgevf_get_mac_addr, 2728 .set_mac_addr = hclgevf_set_mac_addr, 2729 .add_uc_addr = hclgevf_add_uc_addr, 2730 .rm_uc_addr = hclgevf_rm_uc_addr, 2731 .add_mc_addr = hclgevf_add_mc_addr, 2732 .rm_mc_addr = hclgevf_rm_mc_addr, 2733 .get_stats = hclgevf_get_stats, 2734 .update_stats = hclgevf_update_stats, 2735 .get_strings = hclgevf_get_strings, 2736 .get_sset_count = hclgevf_get_sset_count, 2737 .get_rss_key_size = hclgevf_get_rss_key_size, 2738 .get_rss_indir_size = hclgevf_get_rss_indir_size, 2739 .get_rss = hclgevf_get_rss, 2740 .set_rss = hclgevf_set_rss, 2741 .get_rss_tuple = hclgevf_get_rss_tuple, 2742 .set_rss_tuple = hclgevf_set_rss_tuple, 2743 .get_tc_size = hclgevf_get_tc_size, 2744 .get_fw_version = hclgevf_get_fw_version, 2745 .set_vlan_filter = hclgevf_set_vlan_filter, 2746 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 2747 .reset_event = hclgevf_reset_event, 2748 .set_default_reset_request = hclgevf_set_def_reset_request, 2749 .get_channels = hclgevf_get_channels, 2750 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2751 .get_regs_len = hclgevf_get_regs_len, 2752 .get_regs = hclgevf_get_regs, 2753 .get_status = hclgevf_get_status, 2754 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2755 .get_media_type = hclgevf_get_media_type, 2756 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 2757 .ae_dev_resetting = hclgevf_ae_dev_resetting, 2758 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 2759 .set_gro_en = hclgevf_gro_en, 2760 .set_mtu = hclgevf_set_mtu, 2761 .get_global_queue_id = hclgevf_get_qid_global, 2762 .set_timer_task = hclgevf_set_timer_task, 2763 .get_link_mode = hclgevf_get_link_mode, 2764 }; 2765 2766 static struct hnae3_ae_algo ae_algovf = { 2767 .ops = &hclgevf_ops, 2768 .pdev_id_table = ae_algovf_pci_tbl, 2769 }; 2770 2771 static int hclgevf_init(void) 2772 { 2773 pr_info("%s is initializing\n", HCLGEVF_NAME); 2774 2775 hnae3_register_ae_algo(&ae_algovf); 2776 2777 return 0; 2778 } 2779 2780 static void hclgevf_exit(void) 2781 { 2782 hnae3_unregister_ae_algo(&ae_algovf); 2783 } 2784 module_init(hclgevf_init); 2785 module_exit(hclgevf_exit); 2786 2787 MODULE_LICENSE("GPL"); 2788 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2789 MODULE_DESCRIPTION("HCLGEVF Driver"); 2790 MODULE_VERSION(HCLGEVF_MOD_VERSION); 2791