1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 15 static struct hnae3_ae_algo ae_algovf; 16 17 static const struct pci_device_id ae_algovf_pci_tbl[] = { 18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 20 /* required last entry */ 21 {0, } 22 }; 23 24 static const u8 hclgevf_hash_key[] = { 25 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 26 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 27 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 28 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 29 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 30 }; 31 32 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 33 34 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 35 HCLGEVF_CMDQ_TX_ADDR_H_REG, 36 HCLGEVF_CMDQ_TX_DEPTH_REG, 37 HCLGEVF_CMDQ_TX_TAIL_REG, 38 HCLGEVF_CMDQ_TX_HEAD_REG, 39 HCLGEVF_CMDQ_RX_ADDR_L_REG, 40 HCLGEVF_CMDQ_RX_ADDR_H_REG, 41 HCLGEVF_CMDQ_RX_DEPTH_REG, 42 HCLGEVF_CMDQ_RX_TAIL_REG, 43 HCLGEVF_CMDQ_RX_HEAD_REG, 44 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 45 HCLGEVF_CMDQ_INTR_STS_REG, 46 HCLGEVF_CMDQ_INTR_EN_REG, 47 HCLGEVF_CMDQ_INTR_GEN_REG}; 48 49 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 50 HCLGEVF_RST_ING, 51 HCLGEVF_GRO_EN_REG}; 52 53 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 54 HCLGEVF_RING_RX_ADDR_H_REG, 55 HCLGEVF_RING_RX_BD_NUM_REG, 56 HCLGEVF_RING_RX_BD_LENGTH_REG, 57 HCLGEVF_RING_RX_MERGE_EN_REG, 58 HCLGEVF_RING_RX_TAIL_REG, 59 HCLGEVF_RING_RX_HEAD_REG, 60 HCLGEVF_RING_RX_FBD_NUM_REG, 61 HCLGEVF_RING_RX_OFFSET_REG, 62 HCLGEVF_RING_RX_FBD_OFFSET_REG, 63 HCLGEVF_RING_RX_STASH_REG, 64 HCLGEVF_RING_RX_BD_ERR_REG, 65 HCLGEVF_RING_TX_ADDR_L_REG, 66 HCLGEVF_RING_TX_ADDR_H_REG, 67 HCLGEVF_RING_TX_BD_NUM_REG, 68 HCLGEVF_RING_TX_PRIORITY_REG, 69 HCLGEVF_RING_TX_TC_REG, 70 HCLGEVF_RING_TX_MERGE_EN_REG, 71 HCLGEVF_RING_TX_TAIL_REG, 72 HCLGEVF_RING_TX_HEAD_REG, 73 HCLGEVF_RING_TX_FBD_NUM_REG, 74 HCLGEVF_RING_TX_OFFSET_REG, 75 HCLGEVF_RING_TX_EBD_NUM_REG, 76 HCLGEVF_RING_TX_EBD_OFFSET_REG, 77 HCLGEVF_RING_TX_BD_ERR_REG, 78 HCLGEVF_RING_EN_REG}; 79 80 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 81 HCLGEVF_TQP_INTR_GL0_REG, 82 HCLGEVF_TQP_INTR_GL1_REG, 83 HCLGEVF_TQP_INTR_GL2_REG, 84 HCLGEVF_TQP_INTR_RL_REG}; 85 86 static inline struct hclgevf_dev *hclgevf_ae_get_hdev( 87 struct hnae3_handle *handle) 88 { 89 if (!handle->client) 90 return container_of(handle, struct hclgevf_dev, nic); 91 else if (handle->client->type == HNAE3_CLIENT_ROCE) 92 return container_of(handle, struct hclgevf_dev, roce); 93 else 94 return container_of(handle, struct hclgevf_dev, nic); 95 } 96 97 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 98 { 99 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 100 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 101 struct hclgevf_desc desc; 102 struct hclgevf_tqp *tqp; 103 int status; 104 int i; 105 106 for (i = 0; i < kinfo->num_tqps; i++) { 107 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 108 hclgevf_cmd_setup_basic_desc(&desc, 109 HCLGEVF_OPC_QUERY_RX_STATUS, 110 true); 111 112 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 113 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 114 if (status) { 115 dev_err(&hdev->pdev->dev, 116 "Query tqp stat fail, status = %d,queue = %d\n", 117 status, i); 118 return status; 119 } 120 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 121 le32_to_cpu(desc.data[1]); 122 123 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 124 true); 125 126 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 127 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 128 if (status) { 129 dev_err(&hdev->pdev->dev, 130 "Query tqp stat fail, status = %d,queue = %d\n", 131 status, i); 132 return status; 133 } 134 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 135 le32_to_cpu(desc.data[1]); 136 } 137 138 return 0; 139 } 140 141 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 142 { 143 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 144 struct hclgevf_tqp *tqp; 145 u64 *buff = data; 146 int i; 147 148 for (i = 0; i < kinfo->num_tqps; i++) { 149 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 150 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 151 } 152 for (i = 0; i < kinfo->num_tqps; i++) { 153 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 154 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 155 } 156 157 return buff; 158 } 159 160 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 161 { 162 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 163 164 return kinfo->num_tqps * 2; 165 } 166 167 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 168 { 169 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 170 u8 *buff = data; 171 int i = 0; 172 173 for (i = 0; i < kinfo->num_tqps; i++) { 174 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 175 struct hclgevf_tqp, q); 176 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 177 tqp->index); 178 buff += ETH_GSTRING_LEN; 179 } 180 181 for (i = 0; i < kinfo->num_tqps; i++) { 182 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 183 struct hclgevf_tqp, q); 184 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 185 tqp->index); 186 buff += ETH_GSTRING_LEN; 187 } 188 189 return buff; 190 } 191 192 static void hclgevf_update_stats(struct hnae3_handle *handle, 193 struct net_device_stats *net_stats) 194 { 195 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 196 int status; 197 198 status = hclgevf_tqps_update_stats(handle); 199 if (status) 200 dev_err(&hdev->pdev->dev, 201 "VF update of TQPS stats fail, status = %d.\n", 202 status); 203 } 204 205 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 206 { 207 if (strset == ETH_SS_TEST) 208 return -EOPNOTSUPP; 209 else if (strset == ETH_SS_STATS) 210 return hclgevf_tqps_get_sset_count(handle, strset); 211 212 return 0; 213 } 214 215 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 216 u8 *data) 217 { 218 u8 *p = (char *)data; 219 220 if (strset == ETH_SS_STATS) 221 p = hclgevf_tqps_get_strings(handle, p); 222 } 223 224 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 225 { 226 hclgevf_tqps_get_stats(handle, data); 227 } 228 229 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 230 { 231 u8 resp_msg; 232 int status; 233 234 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 235 true, &resp_msg, sizeof(u8)); 236 if (status) { 237 dev_err(&hdev->pdev->dev, 238 "VF request to get TC info from PF failed %d", 239 status); 240 return status; 241 } 242 243 hdev->hw_tc_map = resp_msg; 244 245 return 0; 246 } 247 248 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 249 { 250 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 251 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 252 int status; 253 254 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 255 true, resp_msg, 256 HCLGEVF_TQPS_RSS_INFO_LEN); 257 if (status) { 258 dev_err(&hdev->pdev->dev, 259 "VF request to get tqp info from PF failed %d", 260 status); 261 return status; 262 } 263 264 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 265 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 266 memcpy(&hdev->rx_buf_len, &resp_msg[4], sizeof(u16)); 267 268 return 0; 269 } 270 271 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 272 { 273 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 274 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 275 int ret; 276 277 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QDEPTH, 0, NULL, 0, 278 true, resp_msg, 279 HCLGEVF_TQPS_DEPTH_INFO_LEN); 280 if (ret) { 281 dev_err(&hdev->pdev->dev, 282 "VF request to get tqp depth info from PF failed %d", 283 ret); 284 return ret; 285 } 286 287 memcpy(&hdev->num_tx_desc, &resp_msg[0], sizeof(u16)); 288 memcpy(&hdev->num_rx_desc, &resp_msg[2], sizeof(u16)); 289 290 return 0; 291 } 292 293 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 294 { 295 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 296 u8 msg_data[2], resp_data[2]; 297 u16 qid_in_pf = 0; 298 int ret; 299 300 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 301 302 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, 303 2, true, resp_data, 2); 304 if (!ret) 305 qid_in_pf = *(u16 *)resp_data; 306 307 return qid_in_pf; 308 } 309 310 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 311 { 312 struct hclgevf_tqp *tqp; 313 int i; 314 315 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 316 sizeof(struct hclgevf_tqp), GFP_KERNEL); 317 if (!hdev->htqp) 318 return -ENOMEM; 319 320 tqp = hdev->htqp; 321 322 for (i = 0; i < hdev->num_tqps; i++) { 323 tqp->dev = &hdev->pdev->dev; 324 tqp->index = i; 325 326 tqp->q.ae_algo = &ae_algovf; 327 tqp->q.buf_size = hdev->rx_buf_len; 328 tqp->q.tx_desc_num = hdev->num_tx_desc; 329 tqp->q.rx_desc_num = hdev->num_rx_desc; 330 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 331 i * HCLGEVF_TQP_REG_SIZE; 332 333 tqp++; 334 } 335 336 return 0; 337 } 338 339 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 340 { 341 struct hnae3_handle *nic = &hdev->nic; 342 struct hnae3_knic_private_info *kinfo; 343 u16 new_tqps = hdev->num_tqps; 344 int i; 345 346 kinfo = &nic->kinfo; 347 kinfo->num_tc = 0; 348 kinfo->num_tx_desc = hdev->num_tx_desc; 349 kinfo->num_rx_desc = hdev->num_rx_desc; 350 kinfo->rx_buf_len = hdev->rx_buf_len; 351 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 352 if (hdev->hw_tc_map & BIT(i)) 353 kinfo->num_tc++; 354 355 kinfo->rss_size 356 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 357 new_tqps = kinfo->rss_size * kinfo->num_tc; 358 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 359 360 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 361 sizeof(struct hnae3_queue *), GFP_KERNEL); 362 if (!kinfo->tqp) 363 return -ENOMEM; 364 365 for (i = 0; i < kinfo->num_tqps; i++) { 366 hdev->htqp[i].q.handle = &hdev->nic; 367 hdev->htqp[i].q.tqp_index = i; 368 kinfo->tqp[i] = &hdev->htqp[i].q; 369 } 370 371 return 0; 372 } 373 374 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 375 { 376 int status; 377 u8 resp_msg; 378 379 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 380 0, false, &resp_msg, sizeof(u8)); 381 if (status) 382 dev_err(&hdev->pdev->dev, 383 "VF failed to fetch link status(%d) from PF", status); 384 } 385 386 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 387 { 388 struct hnae3_handle *rhandle = &hdev->roce; 389 struct hnae3_handle *handle = &hdev->nic; 390 struct hnae3_client *rclient; 391 struct hnae3_client *client; 392 393 client = handle->client; 394 rclient = hdev->roce_client; 395 396 link_state = 397 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 398 399 if (link_state != hdev->hw.mac.link) { 400 client->ops->link_status_change(handle, !!link_state); 401 if (rclient && rclient->ops->link_status_change) 402 rclient->ops->link_status_change(rhandle, !!link_state); 403 hdev->hw.mac.link = link_state; 404 } 405 } 406 407 void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 408 { 409 #define HCLGEVF_ADVERTISING 0 410 #define HCLGEVF_SUPPORTED 1 411 u8 send_msg; 412 u8 resp_msg; 413 414 send_msg = HCLGEVF_ADVERTISING; 415 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, 416 sizeof(u8), false, &resp_msg, sizeof(u8)); 417 send_msg = HCLGEVF_SUPPORTED; 418 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, 419 sizeof(u8), false, &resp_msg, sizeof(u8)); 420 } 421 422 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 423 { 424 struct hnae3_handle *nic = &hdev->nic; 425 int ret; 426 427 nic->ae_algo = &ae_algovf; 428 nic->pdev = hdev->pdev; 429 nic->numa_node_mask = hdev->numa_node_mask; 430 nic->flags |= HNAE3_SUPPORT_VF; 431 432 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { 433 dev_err(&hdev->pdev->dev, "unsupported device type %d\n", 434 hdev->ae_dev->dev_type); 435 return -EINVAL; 436 } 437 438 ret = hclgevf_knic_setup(hdev); 439 if (ret) 440 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 441 ret); 442 return ret; 443 } 444 445 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 446 { 447 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 448 dev_warn(&hdev->pdev->dev, 449 "vector(vector_id %d) has been freed.\n", vector_id); 450 return; 451 } 452 453 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 454 hdev->num_msi_left += 1; 455 hdev->num_msi_used -= 1; 456 } 457 458 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 459 struct hnae3_vector_info *vector_info) 460 { 461 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 462 struct hnae3_vector_info *vector = vector_info; 463 int alloc = 0; 464 int i, j; 465 466 vector_num = min(hdev->num_msi_left, vector_num); 467 468 for (j = 0; j < vector_num; j++) { 469 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 470 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 471 vector->vector = pci_irq_vector(hdev->pdev, i); 472 vector->io_addr = hdev->hw.io_base + 473 HCLGEVF_VECTOR_REG_BASE + 474 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 475 hdev->vector_status[i] = 0; 476 hdev->vector_irq[i] = vector->vector; 477 478 vector++; 479 alloc++; 480 481 break; 482 } 483 } 484 } 485 hdev->num_msi_left -= alloc; 486 hdev->num_msi_used += alloc; 487 488 return alloc; 489 } 490 491 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 492 { 493 int i; 494 495 for (i = 0; i < hdev->num_msi; i++) 496 if (vector == hdev->vector_irq[i]) 497 return i; 498 499 return -EINVAL; 500 } 501 502 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 503 const u8 hfunc, const u8 *key) 504 { 505 struct hclgevf_rss_config_cmd *req; 506 struct hclgevf_desc desc; 507 int key_offset; 508 int key_size; 509 int ret; 510 511 req = (struct hclgevf_rss_config_cmd *)desc.data; 512 513 for (key_offset = 0; key_offset < 3; key_offset++) { 514 hclgevf_cmd_setup_basic_desc(&desc, 515 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 516 false); 517 518 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 519 req->hash_config |= 520 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 521 522 if (key_offset == 2) 523 key_size = 524 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; 525 else 526 key_size = HCLGEVF_RSS_HASH_KEY_NUM; 527 528 memcpy(req->hash_key, 529 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 530 531 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 532 if (ret) { 533 dev_err(&hdev->pdev->dev, 534 "Configure RSS config fail, status = %d\n", 535 ret); 536 return ret; 537 } 538 } 539 540 return 0; 541 } 542 543 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 544 { 545 return HCLGEVF_RSS_KEY_SIZE; 546 } 547 548 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 549 { 550 return HCLGEVF_RSS_IND_TBL_SIZE; 551 } 552 553 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 554 { 555 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 556 struct hclgevf_rss_indirection_table_cmd *req; 557 struct hclgevf_desc desc; 558 int status; 559 int i, j; 560 561 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 562 563 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 564 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 565 false); 566 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 567 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 568 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 569 req->rss_result[j] = 570 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 571 572 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 573 if (status) { 574 dev_err(&hdev->pdev->dev, 575 "VF failed(=%d) to set RSS indirection table\n", 576 status); 577 return status; 578 } 579 } 580 581 return 0; 582 } 583 584 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 585 { 586 struct hclgevf_rss_tc_mode_cmd *req; 587 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 588 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 589 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 590 struct hclgevf_desc desc; 591 u16 roundup_size; 592 int status; 593 int i; 594 595 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 596 597 roundup_size = roundup_pow_of_two(rss_size); 598 roundup_size = ilog2(roundup_size); 599 600 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 601 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 602 tc_size[i] = roundup_size; 603 tc_offset[i] = rss_size * i; 604 } 605 606 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 607 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 608 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 609 (tc_valid[i] & 0x1)); 610 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 611 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 612 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 613 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 614 } 615 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 616 if (status) 617 dev_err(&hdev->pdev->dev, 618 "VF failed(=%d) to set rss tc mode\n", status); 619 620 return status; 621 } 622 623 /* for revision 0x20, vf shared the same rss config with pf */ 624 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 625 { 626 #define HCLGEVF_RSS_MBX_RESP_LEN 8 627 628 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 629 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 630 u16 msg_num, hash_key_index; 631 u8 index; 632 int ret; 633 634 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 635 HCLGEVF_RSS_MBX_RESP_LEN; 636 for (index = 0; index < msg_num; index++) { 637 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_RSS_KEY, 0, 638 &index, sizeof(index), 639 true, resp_msg, 640 HCLGEVF_RSS_MBX_RESP_LEN); 641 if (ret) { 642 dev_err(&hdev->pdev->dev, 643 "VF get rss hash key from PF failed, ret=%d", 644 ret); 645 return ret; 646 } 647 648 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 649 if (index == msg_num - 1) 650 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 651 &resp_msg[0], 652 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 653 else 654 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 655 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 656 } 657 658 return 0; 659 } 660 661 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 662 u8 *hfunc) 663 { 664 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 665 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 666 int i, ret; 667 668 if (handle->pdev->revision >= 0x21) { 669 /* Get hash algorithm */ 670 if (hfunc) { 671 switch (rss_cfg->hash_algo) { 672 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 673 *hfunc = ETH_RSS_HASH_TOP; 674 break; 675 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 676 *hfunc = ETH_RSS_HASH_XOR; 677 break; 678 default: 679 *hfunc = ETH_RSS_HASH_UNKNOWN; 680 break; 681 } 682 } 683 684 /* Get the RSS Key required by the user */ 685 if (key) 686 memcpy(key, rss_cfg->rss_hash_key, 687 HCLGEVF_RSS_KEY_SIZE); 688 } else { 689 if (hfunc) 690 *hfunc = ETH_RSS_HASH_TOP; 691 if (key) { 692 ret = hclgevf_get_rss_hash_key(hdev); 693 if (ret) 694 return ret; 695 memcpy(key, rss_cfg->rss_hash_key, 696 HCLGEVF_RSS_KEY_SIZE); 697 } 698 } 699 700 if (indir) 701 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 702 indir[i] = rss_cfg->rss_indirection_tbl[i]; 703 704 return 0; 705 } 706 707 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 708 const u8 *key, const u8 hfunc) 709 { 710 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 711 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 712 int ret, i; 713 714 if (handle->pdev->revision >= 0x21) { 715 /* Set the RSS Hash Key if specififed by the user */ 716 if (key) { 717 switch (hfunc) { 718 case ETH_RSS_HASH_TOP: 719 rss_cfg->hash_algo = 720 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 721 break; 722 case ETH_RSS_HASH_XOR: 723 rss_cfg->hash_algo = 724 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 725 break; 726 case ETH_RSS_HASH_NO_CHANGE: 727 break; 728 default: 729 return -EINVAL; 730 } 731 732 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 733 key); 734 if (ret) 735 return ret; 736 737 /* Update the shadow RSS key with user specified qids */ 738 memcpy(rss_cfg->rss_hash_key, key, 739 HCLGEVF_RSS_KEY_SIZE); 740 } 741 } 742 743 /* update the shadow RSS table with user specified qids */ 744 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 745 rss_cfg->rss_indirection_tbl[i] = indir[i]; 746 747 /* update the hardware */ 748 return hclgevf_set_rss_indir_table(hdev); 749 } 750 751 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 752 { 753 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 754 755 if (nfc->data & RXH_L4_B_2_3) 756 hash_sets |= HCLGEVF_D_PORT_BIT; 757 else 758 hash_sets &= ~HCLGEVF_D_PORT_BIT; 759 760 if (nfc->data & RXH_IP_SRC) 761 hash_sets |= HCLGEVF_S_IP_BIT; 762 else 763 hash_sets &= ~HCLGEVF_S_IP_BIT; 764 765 if (nfc->data & RXH_IP_DST) 766 hash_sets |= HCLGEVF_D_IP_BIT; 767 else 768 hash_sets &= ~HCLGEVF_D_IP_BIT; 769 770 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 771 hash_sets |= HCLGEVF_V_TAG_BIT; 772 773 return hash_sets; 774 } 775 776 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 777 struct ethtool_rxnfc *nfc) 778 { 779 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 780 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 781 struct hclgevf_rss_input_tuple_cmd *req; 782 struct hclgevf_desc desc; 783 u8 tuple_sets; 784 int ret; 785 786 if (handle->pdev->revision == 0x20) 787 return -EOPNOTSUPP; 788 789 if (nfc->data & 790 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 791 return -EINVAL; 792 793 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 794 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 795 796 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 797 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 798 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 799 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 800 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 801 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 802 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 803 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 804 805 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 806 switch (nfc->flow_type) { 807 case TCP_V4_FLOW: 808 req->ipv4_tcp_en = tuple_sets; 809 break; 810 case TCP_V6_FLOW: 811 req->ipv6_tcp_en = tuple_sets; 812 break; 813 case UDP_V4_FLOW: 814 req->ipv4_udp_en = tuple_sets; 815 break; 816 case UDP_V6_FLOW: 817 req->ipv6_udp_en = tuple_sets; 818 break; 819 case SCTP_V4_FLOW: 820 req->ipv4_sctp_en = tuple_sets; 821 break; 822 case SCTP_V6_FLOW: 823 if ((nfc->data & RXH_L4_B_0_1) || 824 (nfc->data & RXH_L4_B_2_3)) 825 return -EINVAL; 826 827 req->ipv6_sctp_en = tuple_sets; 828 break; 829 case IPV4_FLOW: 830 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 831 break; 832 case IPV6_FLOW: 833 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 834 break; 835 default: 836 return -EINVAL; 837 } 838 839 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 840 if (ret) { 841 dev_err(&hdev->pdev->dev, 842 "Set rss tuple fail, status = %d\n", ret); 843 return ret; 844 } 845 846 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 847 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 848 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 849 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 850 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 851 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 852 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 853 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 854 return 0; 855 } 856 857 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 858 struct ethtool_rxnfc *nfc) 859 { 860 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 861 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 862 u8 tuple_sets; 863 864 if (handle->pdev->revision == 0x20) 865 return -EOPNOTSUPP; 866 867 nfc->data = 0; 868 869 switch (nfc->flow_type) { 870 case TCP_V4_FLOW: 871 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 872 break; 873 case UDP_V4_FLOW: 874 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 875 break; 876 case TCP_V6_FLOW: 877 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 878 break; 879 case UDP_V6_FLOW: 880 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 881 break; 882 case SCTP_V4_FLOW: 883 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 884 break; 885 case SCTP_V6_FLOW: 886 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 887 break; 888 case IPV4_FLOW: 889 case IPV6_FLOW: 890 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 891 break; 892 default: 893 return -EINVAL; 894 } 895 896 if (!tuple_sets) 897 return 0; 898 899 if (tuple_sets & HCLGEVF_D_PORT_BIT) 900 nfc->data |= RXH_L4_B_2_3; 901 if (tuple_sets & HCLGEVF_S_PORT_BIT) 902 nfc->data |= RXH_L4_B_0_1; 903 if (tuple_sets & HCLGEVF_D_IP_BIT) 904 nfc->data |= RXH_IP_DST; 905 if (tuple_sets & HCLGEVF_S_IP_BIT) 906 nfc->data |= RXH_IP_SRC; 907 908 return 0; 909 } 910 911 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 912 struct hclgevf_rss_cfg *rss_cfg) 913 { 914 struct hclgevf_rss_input_tuple_cmd *req; 915 struct hclgevf_desc desc; 916 int ret; 917 918 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 919 920 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 921 922 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 923 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 924 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 925 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 926 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 927 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 928 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 929 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 930 931 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 932 if (ret) 933 dev_err(&hdev->pdev->dev, 934 "Configure rss input fail, status = %d\n", ret); 935 return ret; 936 } 937 938 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 939 { 940 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 941 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 942 943 return rss_cfg->rss_size; 944 } 945 946 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 947 int vector_id, 948 struct hnae3_ring_chain_node *ring_chain) 949 { 950 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 951 struct hnae3_ring_chain_node *node; 952 struct hclge_mbx_vf_to_pf_cmd *req; 953 struct hclgevf_desc desc; 954 int i = 0; 955 int status; 956 u8 type; 957 958 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 959 960 for (node = ring_chain; node; node = node->next) { 961 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 962 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 963 964 if (i == 0) { 965 hclgevf_cmd_setup_basic_desc(&desc, 966 HCLGEVF_OPC_MBX_VF_TO_PF, 967 false); 968 type = en ? 969 HCLGE_MBX_MAP_RING_TO_VECTOR : 970 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 971 req->msg[0] = type; 972 req->msg[1] = vector_id; 973 } 974 975 req->msg[idx_offset] = 976 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 977 req->msg[idx_offset + 1] = node->tqp_index; 978 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 979 HNAE3_RING_GL_IDX_M, 980 HNAE3_RING_GL_IDX_S); 981 982 i++; 983 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 984 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 985 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 986 !node->next) { 987 req->msg[2] = i; 988 989 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 990 if (status) { 991 dev_err(&hdev->pdev->dev, 992 "Map TQP fail, status is %d.\n", 993 status); 994 return status; 995 } 996 i = 0; 997 hclgevf_cmd_setup_basic_desc(&desc, 998 HCLGEVF_OPC_MBX_VF_TO_PF, 999 false); 1000 req->msg[0] = type; 1001 req->msg[1] = vector_id; 1002 } 1003 } 1004 1005 return 0; 1006 } 1007 1008 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1009 struct hnae3_ring_chain_node *ring_chain) 1010 { 1011 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1012 int vector_id; 1013 1014 vector_id = hclgevf_get_vector_index(hdev, vector); 1015 if (vector_id < 0) { 1016 dev_err(&handle->pdev->dev, 1017 "Get vector index fail. ret =%d\n", vector_id); 1018 return vector_id; 1019 } 1020 1021 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1022 } 1023 1024 static int hclgevf_unmap_ring_from_vector( 1025 struct hnae3_handle *handle, 1026 int vector, 1027 struct hnae3_ring_chain_node *ring_chain) 1028 { 1029 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1030 int ret, vector_id; 1031 1032 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1033 return 0; 1034 1035 vector_id = hclgevf_get_vector_index(hdev, vector); 1036 if (vector_id < 0) { 1037 dev_err(&handle->pdev->dev, 1038 "Get vector index fail. ret =%d\n", vector_id); 1039 return vector_id; 1040 } 1041 1042 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1043 if (ret) 1044 dev_err(&handle->pdev->dev, 1045 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1046 vector_id, 1047 ret); 1048 1049 return ret; 1050 } 1051 1052 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1053 { 1054 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1055 int vector_id; 1056 1057 vector_id = hclgevf_get_vector_index(hdev, vector); 1058 if (vector_id < 0) { 1059 dev_err(&handle->pdev->dev, 1060 "hclgevf_put_vector get vector index fail. ret =%d\n", 1061 vector_id); 1062 return vector_id; 1063 } 1064 1065 hclgevf_free_vector(hdev, vector_id); 1066 1067 return 0; 1068 } 1069 1070 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1071 bool en_bc_pmc) 1072 { 1073 struct hclge_mbx_vf_to_pf_cmd *req; 1074 struct hclgevf_desc desc; 1075 int ret; 1076 1077 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 1078 1079 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 1080 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 1081 req->msg[1] = en_bc_pmc ? 1 : 0; 1082 1083 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1084 if (ret) 1085 dev_err(&hdev->pdev->dev, 1086 "Set promisc mode fail, status is %d.\n", ret); 1087 1088 return ret; 1089 } 1090 1091 static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc) 1092 { 1093 return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc); 1094 } 1095 1096 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, 1097 int stream_id, bool enable) 1098 { 1099 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1100 struct hclgevf_desc desc; 1101 int status; 1102 1103 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1104 1105 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1106 false); 1107 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1108 req->stream_id = cpu_to_le16(stream_id); 1109 req->enable |= enable << HCLGEVF_TQP_ENABLE_B; 1110 1111 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1112 if (status) 1113 dev_err(&hdev->pdev->dev, 1114 "TQP enable fail, status =%d.\n", status); 1115 1116 return status; 1117 } 1118 1119 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1120 { 1121 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1122 struct hclgevf_tqp *tqp; 1123 int i; 1124 1125 for (i = 0; i < kinfo->num_tqps; i++) { 1126 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1127 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1128 } 1129 } 1130 1131 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1132 { 1133 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1134 1135 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1136 } 1137 1138 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1139 bool is_first) 1140 { 1141 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1142 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1143 u8 *new_mac_addr = (u8 *)p; 1144 u8 msg_data[ETH_ALEN * 2]; 1145 u16 subcode; 1146 int status; 1147 1148 ether_addr_copy(msg_data, new_mac_addr); 1149 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 1150 1151 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 1152 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1153 1154 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1155 subcode, msg_data, ETH_ALEN * 2, 1156 true, NULL, 0); 1157 if (!status) 1158 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1159 1160 return status; 1161 } 1162 1163 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1164 const unsigned char *addr) 1165 { 1166 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1167 1168 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1169 HCLGE_MBX_MAC_VLAN_UC_ADD, 1170 addr, ETH_ALEN, false, NULL, 0); 1171 } 1172 1173 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1174 const unsigned char *addr) 1175 { 1176 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1177 1178 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1179 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1180 addr, ETH_ALEN, false, NULL, 0); 1181 } 1182 1183 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1184 const unsigned char *addr) 1185 { 1186 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1187 1188 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1189 HCLGE_MBX_MAC_VLAN_MC_ADD, 1190 addr, ETH_ALEN, false, NULL, 0); 1191 } 1192 1193 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1194 const unsigned char *addr) 1195 { 1196 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1197 1198 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1199 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1200 addr, ETH_ALEN, false, NULL, 0); 1201 } 1202 1203 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1204 __be16 proto, u16 vlan_id, 1205 bool is_kill) 1206 { 1207 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1208 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1209 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1210 1211 if (vlan_id > 4095) 1212 return -EINVAL; 1213 1214 if (proto != htons(ETH_P_8021Q)) 1215 return -EPROTONOSUPPORT; 1216 1217 msg_data[0] = is_kill; 1218 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1219 memcpy(&msg_data[3], &proto, sizeof(proto)); 1220 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1221 HCLGE_MBX_VLAN_FILTER, msg_data, 1222 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); 1223 } 1224 1225 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1226 { 1227 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1228 u8 msg_data; 1229 1230 msg_data = enable ? 1 : 0; 1231 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1232 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1233 1, false, NULL, 0); 1234 } 1235 1236 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1237 { 1238 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1239 u8 msg_data[2]; 1240 int ret; 1241 1242 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 1243 1244 /* disable vf queue before send queue reset msg to PF */ 1245 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1246 if (ret) 1247 return ret; 1248 1249 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1250 2, true, NULL, 0); 1251 } 1252 1253 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1254 { 1255 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1256 1257 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu, 1258 sizeof(new_mtu), true, NULL, 0); 1259 } 1260 1261 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1262 enum hnae3_reset_notify_type type) 1263 { 1264 struct hnae3_client *client = hdev->nic_client; 1265 struct hnae3_handle *handle = &hdev->nic; 1266 int ret; 1267 1268 if (!client->ops->reset_notify) 1269 return -EOPNOTSUPP; 1270 1271 ret = client->ops->reset_notify(handle, type); 1272 if (ret) 1273 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1274 type, ret); 1275 1276 return ret; 1277 } 1278 1279 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 1280 { 1281 struct hclgevf_dev *hdev = ae_dev->priv; 1282 1283 set_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1284 } 1285 1286 static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev, 1287 unsigned long delay_us, 1288 unsigned long wait_cnt) 1289 { 1290 unsigned long cnt = 0; 1291 1292 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && 1293 cnt++ < wait_cnt) 1294 usleep_range(delay_us, delay_us * 2); 1295 1296 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { 1297 dev_err(&hdev->pdev->dev, 1298 "flr wait timeout\n"); 1299 return -ETIMEDOUT; 1300 } 1301 1302 return 0; 1303 } 1304 1305 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1306 { 1307 #define HCLGEVF_RESET_WAIT_US 20000 1308 #define HCLGEVF_RESET_WAIT_CNT 2000 1309 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1310 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1311 1312 u32 val; 1313 int ret; 1314 1315 /* wait to check the hardware reset completion status */ 1316 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1317 dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val); 1318 1319 if (hdev->reset_type == HNAE3_FLR_RESET) 1320 return hclgevf_flr_poll_timeout(hdev, 1321 HCLGEVF_RESET_WAIT_US, 1322 HCLGEVF_RESET_WAIT_CNT); 1323 1324 ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val, 1325 !(val & HCLGEVF_RST_ING_BITS), 1326 HCLGEVF_RESET_WAIT_US, 1327 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1328 1329 /* hardware completion status should be available by this time */ 1330 if (ret) { 1331 dev_err(&hdev->pdev->dev, 1332 "could'nt get reset done status from h/w, timeout!\n"); 1333 return ret; 1334 } 1335 1336 /* we will wait a bit more to let reset of the stack to complete. This 1337 * might happen in case reset assertion was made by PF. Yes, this also 1338 * means we might end up waiting bit more even for VF reset. 1339 */ 1340 msleep(5000); 1341 1342 return 0; 1343 } 1344 1345 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1346 { 1347 int ret; 1348 1349 /* uninitialize the nic client */ 1350 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1351 if (ret) 1352 return ret; 1353 1354 /* re-initialize the hclge device */ 1355 ret = hclgevf_reset_hdev(hdev); 1356 if (ret) { 1357 dev_err(&hdev->pdev->dev, 1358 "hclge device re-init failed, VF is disabled!\n"); 1359 return ret; 1360 } 1361 1362 /* bring up the nic client again */ 1363 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1364 if (ret) 1365 return ret; 1366 1367 return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); 1368 } 1369 1370 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1371 { 1372 int ret = 0; 1373 1374 switch (hdev->reset_type) { 1375 case HNAE3_VF_FUNC_RESET: 1376 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1377 0, true, NULL, sizeof(u8)); 1378 break; 1379 case HNAE3_FLR_RESET: 1380 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1381 break; 1382 default: 1383 break; 1384 } 1385 1386 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1387 1388 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1389 hdev->reset_type, ret); 1390 1391 return ret; 1392 } 1393 1394 static int hclgevf_reset(struct hclgevf_dev *hdev) 1395 { 1396 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1397 int ret; 1398 1399 /* Initialize ae_dev reset status as well, in case enet layer wants to 1400 * know if device is undergoing reset 1401 */ 1402 ae_dev->reset_type = hdev->reset_type; 1403 hdev->reset_count++; 1404 rtnl_lock(); 1405 1406 /* bring down the nic to stop any ongoing TX/RX */ 1407 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1408 if (ret) 1409 goto err_reset_lock; 1410 1411 rtnl_unlock(); 1412 1413 ret = hclgevf_reset_prepare_wait(hdev); 1414 if (ret) 1415 goto err_reset; 1416 1417 /* check if VF could successfully fetch the hardware reset completion 1418 * status from the hardware 1419 */ 1420 ret = hclgevf_reset_wait(hdev); 1421 if (ret) { 1422 /* can't do much in this situation, will disable VF */ 1423 dev_err(&hdev->pdev->dev, 1424 "VF failed(=%d) to fetch H/W reset completion status\n", 1425 ret); 1426 goto err_reset; 1427 } 1428 1429 rtnl_lock(); 1430 1431 /* now, re-initialize the nic client and ae device*/ 1432 ret = hclgevf_reset_stack(hdev); 1433 if (ret) { 1434 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1435 goto err_reset_lock; 1436 } 1437 1438 /* bring up the nic to enable TX/RX again */ 1439 ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1440 if (ret) 1441 goto err_reset_lock; 1442 1443 rtnl_unlock(); 1444 1445 hdev->last_reset_time = jiffies; 1446 ae_dev->reset_type = HNAE3_NONE_RESET; 1447 1448 return ret; 1449 err_reset_lock: 1450 rtnl_unlock(); 1451 err_reset: 1452 /* When VF reset failed, only the higher level reset asserted by PF 1453 * can restore it, so re-initialize the command queue to receive 1454 * this higher reset event. 1455 */ 1456 hclgevf_cmd_init(hdev); 1457 dev_err(&hdev->pdev->dev, "failed to reset VF\n"); 1458 1459 return ret; 1460 } 1461 1462 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1463 unsigned long *addr) 1464 { 1465 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1466 1467 /* return the highest priority reset level amongst all */ 1468 if (test_bit(HNAE3_VF_RESET, addr)) { 1469 rst_level = HNAE3_VF_RESET; 1470 clear_bit(HNAE3_VF_RESET, addr); 1471 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1472 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1473 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1474 rst_level = HNAE3_VF_FULL_RESET; 1475 clear_bit(HNAE3_VF_FULL_RESET, addr); 1476 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1477 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1478 rst_level = HNAE3_VF_PF_FUNC_RESET; 1479 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1480 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1481 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1482 rst_level = HNAE3_VF_FUNC_RESET; 1483 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1484 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1485 rst_level = HNAE3_FLR_RESET; 1486 clear_bit(HNAE3_FLR_RESET, addr); 1487 } 1488 1489 return rst_level; 1490 } 1491 1492 static void hclgevf_reset_event(struct pci_dev *pdev, 1493 struct hnae3_handle *handle) 1494 { 1495 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1496 struct hclgevf_dev *hdev = ae_dev->priv; 1497 1498 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1499 1500 if (hdev->default_reset_request) 1501 hdev->reset_level = 1502 hclgevf_get_reset_level(hdev, 1503 &hdev->default_reset_request); 1504 else 1505 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1506 1507 /* reset of this VF requested */ 1508 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1509 hclgevf_reset_task_schedule(hdev); 1510 1511 hdev->last_reset_time = jiffies; 1512 } 1513 1514 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1515 enum hnae3_reset_type rst_type) 1516 { 1517 struct hclgevf_dev *hdev = ae_dev->priv; 1518 1519 set_bit(rst_type, &hdev->default_reset_request); 1520 } 1521 1522 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1523 { 1524 #define HCLGEVF_FLR_WAIT_MS 100 1525 #define HCLGEVF_FLR_WAIT_CNT 50 1526 struct hclgevf_dev *hdev = ae_dev->priv; 1527 int cnt = 0; 1528 1529 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1530 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1531 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); 1532 hclgevf_reset_event(hdev->pdev, NULL); 1533 1534 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && 1535 cnt++ < HCLGEVF_FLR_WAIT_CNT) 1536 msleep(HCLGEVF_FLR_WAIT_MS); 1537 1538 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) 1539 dev_err(&hdev->pdev->dev, 1540 "flr wait down timeout: %d\n", cnt); 1541 } 1542 1543 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1544 { 1545 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1546 1547 return hdev->fw_version; 1548 } 1549 1550 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1551 { 1552 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1553 1554 vector->vector_irq = pci_irq_vector(hdev->pdev, 1555 HCLGEVF_MISC_VECTOR_NUM); 1556 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1557 /* vector status always valid for Vector 0 */ 1558 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1559 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1560 1561 hdev->num_msi_left -= 1; 1562 hdev->num_msi_used += 1; 1563 } 1564 1565 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1566 { 1567 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1568 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { 1569 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1570 schedule_work(&hdev->rst_service_task); 1571 } 1572 } 1573 1574 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1575 { 1576 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1577 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1578 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1579 schedule_work(&hdev->mbx_service_task); 1580 } 1581 } 1582 1583 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1584 { 1585 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1586 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1587 schedule_work(&hdev->service_task); 1588 } 1589 1590 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1591 { 1592 /* if we have any pending mailbox event then schedule the mbx task */ 1593 if (hdev->mbx_event_pending) 1594 hclgevf_mbx_task_schedule(hdev); 1595 1596 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1597 hclgevf_reset_task_schedule(hdev); 1598 } 1599 1600 static void hclgevf_service_timer(struct timer_list *t) 1601 { 1602 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1603 1604 mod_timer(&hdev->service_timer, jiffies + 5 * HZ); 1605 1606 hclgevf_task_schedule(hdev); 1607 } 1608 1609 static void hclgevf_reset_service_task(struct work_struct *work) 1610 { 1611 struct hclgevf_dev *hdev = 1612 container_of(work, struct hclgevf_dev, rst_service_task); 1613 int ret; 1614 1615 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1616 return; 1617 1618 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1619 1620 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1621 &hdev->reset_state)) { 1622 /* PF has initmated that it is about to reset the hardware. 1623 * We now have to poll & check if harware has actually completed 1624 * the reset sequence. On hardware reset completion, VF needs to 1625 * reset the client and ae device. 1626 */ 1627 hdev->reset_attempts = 0; 1628 1629 hdev->last_reset_time = jiffies; 1630 while ((hdev->reset_type = 1631 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 1632 != HNAE3_NONE_RESET) { 1633 ret = hclgevf_reset(hdev); 1634 if (ret) 1635 dev_err(&hdev->pdev->dev, 1636 "VF stack reset failed %d.\n", ret); 1637 } 1638 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1639 &hdev->reset_state)) { 1640 /* we could be here when either of below happens: 1641 * 1. reset was initiated due to watchdog timeout due to 1642 * a. IMP was earlier reset and our TX got choked down and 1643 * which resulted in watchdog reacting and inducing VF 1644 * reset. This also means our cmdq would be unreliable. 1645 * b. problem in TX due to other lower layer(example link 1646 * layer not functioning properly etc.) 1647 * 2. VF reset might have been initiated due to some config 1648 * change. 1649 * 1650 * NOTE: Theres no clear way to detect above cases than to react 1651 * to the response of PF for this reset request. PF will ack the 1652 * 1b and 2. cases but we will not get any intimation about 1a 1653 * from PF as cmdq would be in unreliable state i.e. mailbox 1654 * communication between PF and VF would be broken. 1655 */ 1656 1657 /* if we are never geting into pending state it means either: 1658 * 1. PF is not receiving our request which could be due to IMP 1659 * reset 1660 * 2. PF is screwed 1661 * We cannot do much for 2. but to check first we can try reset 1662 * our PCIe + stack and see if it alleviates the problem. 1663 */ 1664 if (hdev->reset_attempts > 3) { 1665 /* prepare for full reset of stack + pcie interface */ 1666 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1667 1668 /* "defer" schedule the reset task again */ 1669 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1670 } else { 1671 hdev->reset_attempts++; 1672 1673 set_bit(hdev->reset_level, &hdev->reset_pending); 1674 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1675 } 1676 hclgevf_reset_task_schedule(hdev); 1677 } 1678 1679 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1680 } 1681 1682 static void hclgevf_mailbox_service_task(struct work_struct *work) 1683 { 1684 struct hclgevf_dev *hdev; 1685 1686 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1687 1688 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1689 return; 1690 1691 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1692 1693 hclgevf_mbx_async_handler(hdev); 1694 1695 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1696 } 1697 1698 static void hclgevf_keep_alive_timer(struct timer_list *t) 1699 { 1700 struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); 1701 1702 schedule_work(&hdev->keep_alive_task); 1703 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 1704 } 1705 1706 static void hclgevf_keep_alive_task(struct work_struct *work) 1707 { 1708 struct hclgevf_dev *hdev; 1709 u8 respmsg; 1710 int ret; 1711 1712 hdev = container_of(work, struct hclgevf_dev, keep_alive_task); 1713 1714 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1715 return; 1716 1717 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, 1718 0, false, &respmsg, sizeof(u8)); 1719 if (ret) 1720 dev_err(&hdev->pdev->dev, 1721 "VF sends keep alive cmd failed(=%d)\n", ret); 1722 } 1723 1724 static void hclgevf_service_task(struct work_struct *work) 1725 { 1726 struct hclgevf_dev *hdev; 1727 1728 hdev = container_of(work, struct hclgevf_dev, service_task); 1729 1730 /* request the link status from the PF. PF would be able to tell VF 1731 * about such updates in future so we might remove this later 1732 */ 1733 hclgevf_request_link_info(hdev); 1734 1735 hclgevf_update_link_mode(hdev); 1736 1737 hclgevf_deferred_task_schedule(hdev); 1738 1739 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1740 } 1741 1742 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1743 { 1744 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1745 } 1746 1747 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1748 u32 *clearval) 1749 { 1750 u32 cmdq_src_reg, rst_ing_reg; 1751 1752 /* fetch the events from their corresponding regs */ 1753 cmdq_src_reg = hclgevf_read_dev(&hdev->hw, 1754 HCLGEVF_VECTOR0_CMDQ_SRC_REG); 1755 1756 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) { 1757 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1758 dev_info(&hdev->pdev->dev, 1759 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1760 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1761 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1762 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1763 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); 1764 *clearval = cmdq_src_reg; 1765 return HCLGEVF_VECTOR0_EVENT_RST; 1766 } 1767 1768 /* check for vector0 mailbox(=CMDQ RX) event source */ 1769 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 1770 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1771 *clearval = cmdq_src_reg; 1772 return HCLGEVF_VECTOR0_EVENT_MBX; 1773 } 1774 1775 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1776 1777 return HCLGEVF_VECTOR0_EVENT_OTHER; 1778 } 1779 1780 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1781 { 1782 writel(en ? 1 : 0, vector->addr); 1783 } 1784 1785 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1786 { 1787 enum hclgevf_evt_cause event_cause; 1788 struct hclgevf_dev *hdev = data; 1789 u32 clearval; 1790 1791 hclgevf_enable_vector(&hdev->misc_vector, false); 1792 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 1793 1794 switch (event_cause) { 1795 case HCLGEVF_VECTOR0_EVENT_RST: 1796 hclgevf_reset_task_schedule(hdev); 1797 break; 1798 case HCLGEVF_VECTOR0_EVENT_MBX: 1799 hclgevf_mbx_handler(hdev); 1800 break; 1801 default: 1802 break; 1803 } 1804 1805 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 1806 hclgevf_clear_event_cause(hdev, clearval); 1807 hclgevf_enable_vector(&hdev->misc_vector, true); 1808 } 1809 1810 return IRQ_HANDLED; 1811 } 1812 1813 static int hclgevf_configure(struct hclgevf_dev *hdev) 1814 { 1815 int ret; 1816 1817 /* get queue configuration from PF */ 1818 ret = hclgevf_get_queue_info(hdev); 1819 if (ret) 1820 return ret; 1821 1822 /* get queue depth info from PF */ 1823 ret = hclgevf_get_queue_depth(hdev); 1824 if (ret) 1825 return ret; 1826 1827 /* get tc configuration from PF */ 1828 return hclgevf_get_tc_info(hdev); 1829 } 1830 1831 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 1832 { 1833 struct pci_dev *pdev = ae_dev->pdev; 1834 struct hclgevf_dev *hdev; 1835 1836 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1837 if (!hdev) 1838 return -ENOMEM; 1839 1840 hdev->pdev = pdev; 1841 hdev->ae_dev = ae_dev; 1842 ae_dev->priv = hdev; 1843 1844 return 0; 1845 } 1846 1847 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 1848 { 1849 struct hnae3_handle *roce = &hdev->roce; 1850 struct hnae3_handle *nic = &hdev->nic; 1851 1852 roce->rinfo.num_vectors = hdev->num_roce_msix; 1853 1854 if (hdev->num_msi_left < roce->rinfo.num_vectors || 1855 hdev->num_msi_left == 0) 1856 return -EINVAL; 1857 1858 roce->rinfo.base_vector = hdev->roce_base_vector; 1859 1860 roce->rinfo.netdev = nic->kinfo.netdev; 1861 roce->rinfo.roce_io_base = hdev->hw.io_base; 1862 1863 roce->pdev = nic->pdev; 1864 roce->ae_algo = nic->ae_algo; 1865 roce->numa_node_mask = nic->numa_node_mask; 1866 1867 return 0; 1868 } 1869 1870 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 1871 { 1872 struct hclgevf_cfg_gro_status_cmd *req; 1873 struct hclgevf_desc desc; 1874 int ret; 1875 1876 if (!hnae3_dev_gro_supported(hdev)) 1877 return 0; 1878 1879 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 1880 false); 1881 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 1882 1883 req->gro_en = cpu_to_le16(en ? 1 : 0); 1884 1885 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1886 if (ret) 1887 dev_err(&hdev->pdev->dev, 1888 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 1889 1890 return ret; 1891 } 1892 1893 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 1894 { 1895 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1896 int i, ret; 1897 1898 rss_cfg->rss_size = hdev->rss_size_max; 1899 1900 if (hdev->pdev->revision >= 0x21) { 1901 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 1902 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 1903 HCLGEVF_RSS_KEY_SIZE); 1904 1905 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 1906 rss_cfg->rss_hash_key); 1907 if (ret) 1908 return ret; 1909 1910 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 1911 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1912 rss_cfg->rss_tuple_sets.ipv4_udp_en = 1913 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1914 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 1915 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1916 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 1917 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1918 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 1919 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1920 rss_cfg->rss_tuple_sets.ipv6_udp_en = 1921 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1922 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 1923 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 1924 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 1925 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 1926 1927 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 1928 if (ret) 1929 return ret; 1930 1931 } 1932 1933 /* Initialize RSS indirect table for each vport */ 1934 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 1935 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; 1936 1937 ret = hclgevf_set_rss_indir_table(hdev); 1938 if (ret) 1939 return ret; 1940 1941 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); 1942 } 1943 1944 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 1945 { 1946 /* other vlan config(like, VLAN TX/RX offload) would also be added 1947 * here later 1948 */ 1949 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 1950 false); 1951 } 1952 1953 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 1954 { 1955 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1956 1957 if (enable) { 1958 mod_timer(&hdev->service_timer, jiffies + HZ); 1959 } else { 1960 del_timer_sync(&hdev->service_timer); 1961 cancel_work_sync(&hdev->service_task); 1962 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1963 } 1964 } 1965 1966 static int hclgevf_ae_start(struct hnae3_handle *handle) 1967 { 1968 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1969 1970 /* reset tqp stats */ 1971 hclgevf_reset_tqp_stats(handle); 1972 1973 hclgevf_request_link_info(hdev); 1974 1975 hclgevf_update_link_mode(hdev); 1976 1977 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1978 1979 return 0; 1980 } 1981 1982 static void hclgevf_ae_stop(struct hnae3_handle *handle) 1983 { 1984 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1985 int i; 1986 1987 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 1988 1989 for (i = 0; i < handle->kinfo.num_tqps; i++) 1990 hclgevf_reset_tqp(handle, i); 1991 1992 /* reset tqp stats */ 1993 hclgevf_reset_tqp_stats(handle); 1994 hclgevf_update_link_status(hdev, 0); 1995 } 1996 1997 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 1998 { 1999 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2000 u8 msg_data; 2001 2002 msg_data = alive ? 1 : 0; 2003 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE, 2004 0, &msg_data, 1, false, NULL, 0); 2005 } 2006 2007 static int hclgevf_client_start(struct hnae3_handle *handle) 2008 { 2009 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2010 2011 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); 2012 return hclgevf_set_alive(handle, true); 2013 } 2014 2015 static void hclgevf_client_stop(struct hnae3_handle *handle) 2016 { 2017 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2018 int ret; 2019 2020 ret = hclgevf_set_alive(handle, false); 2021 if (ret) 2022 dev_warn(&hdev->pdev->dev, 2023 "%s failed %d\n", __func__, ret); 2024 2025 del_timer_sync(&hdev->keep_alive_timer); 2026 cancel_work_sync(&hdev->keep_alive_task); 2027 } 2028 2029 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2030 { 2031 /* setup tasks for the MBX */ 2032 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 2033 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2034 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2035 2036 /* setup tasks for service timer */ 2037 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 2038 2039 INIT_WORK(&hdev->service_task, hclgevf_service_task); 2040 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 2041 2042 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 2043 2044 mutex_init(&hdev->mbx_resp.mbx_mutex); 2045 2046 /* bring the device down */ 2047 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2048 } 2049 2050 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2051 { 2052 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2053 2054 if (hdev->service_timer.function) 2055 del_timer_sync(&hdev->service_timer); 2056 if (hdev->service_task.func) 2057 cancel_work_sync(&hdev->service_task); 2058 if (hdev->mbx_service_task.func) 2059 cancel_work_sync(&hdev->mbx_service_task); 2060 if (hdev->rst_service_task.func) 2061 cancel_work_sync(&hdev->rst_service_task); 2062 2063 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2064 } 2065 2066 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2067 { 2068 struct pci_dev *pdev = hdev->pdev; 2069 int vectors; 2070 int i; 2071 2072 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 2073 vectors = pci_alloc_irq_vectors(pdev, 2074 hdev->roce_base_msix_offset + 1, 2075 hdev->num_msi, 2076 PCI_IRQ_MSIX); 2077 else 2078 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 2079 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2080 2081 if (vectors < 0) { 2082 dev_err(&pdev->dev, 2083 "failed(%d) to allocate MSI/MSI-X vectors\n", 2084 vectors); 2085 return vectors; 2086 } 2087 if (vectors < hdev->num_msi) 2088 dev_warn(&hdev->pdev->dev, 2089 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2090 hdev->num_msi, vectors); 2091 2092 hdev->num_msi = vectors; 2093 hdev->num_msi_left = vectors; 2094 hdev->base_msi_vector = pdev->irq; 2095 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2096 2097 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2098 sizeof(u16), GFP_KERNEL); 2099 if (!hdev->vector_status) { 2100 pci_free_irq_vectors(pdev); 2101 return -ENOMEM; 2102 } 2103 2104 for (i = 0; i < hdev->num_msi; i++) 2105 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2106 2107 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2108 sizeof(int), GFP_KERNEL); 2109 if (!hdev->vector_irq) { 2110 devm_kfree(&pdev->dev, hdev->vector_status); 2111 pci_free_irq_vectors(pdev); 2112 return -ENOMEM; 2113 } 2114 2115 return 0; 2116 } 2117 2118 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2119 { 2120 struct pci_dev *pdev = hdev->pdev; 2121 2122 devm_kfree(&pdev->dev, hdev->vector_status); 2123 devm_kfree(&pdev->dev, hdev->vector_irq); 2124 pci_free_irq_vectors(pdev); 2125 } 2126 2127 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2128 { 2129 int ret = 0; 2130 2131 hclgevf_get_misc_vector(hdev); 2132 2133 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2134 0, "hclgevf_cmd", hdev); 2135 if (ret) { 2136 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2137 hdev->misc_vector.vector_irq); 2138 return ret; 2139 } 2140 2141 hclgevf_clear_event_cause(hdev, 0); 2142 2143 /* enable misc. vector(vector 0) */ 2144 hclgevf_enable_vector(&hdev->misc_vector, true); 2145 2146 return ret; 2147 } 2148 2149 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2150 { 2151 /* disable misc vector(vector 0) */ 2152 hclgevf_enable_vector(&hdev->misc_vector, false); 2153 synchronize_irq(hdev->misc_vector.vector_irq); 2154 free_irq(hdev->misc_vector.vector_irq, hdev); 2155 hclgevf_free_vector(hdev, 0); 2156 } 2157 2158 static int hclgevf_init_client_instance(struct hnae3_client *client, 2159 struct hnae3_ae_dev *ae_dev) 2160 { 2161 struct hclgevf_dev *hdev = ae_dev->priv; 2162 int ret; 2163 2164 switch (client->type) { 2165 case HNAE3_CLIENT_KNIC: 2166 hdev->nic_client = client; 2167 hdev->nic.client = client; 2168 2169 ret = client->ops->init_instance(&hdev->nic); 2170 if (ret) 2171 goto clear_nic; 2172 2173 hnae3_set_client_init_flag(client, ae_dev, 1); 2174 2175 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { 2176 struct hnae3_client *rc = hdev->roce_client; 2177 2178 ret = hclgevf_init_roce_base_info(hdev); 2179 if (ret) 2180 goto clear_roce; 2181 ret = rc->ops->init_instance(&hdev->roce); 2182 if (ret) 2183 goto clear_roce; 2184 2185 hnae3_set_client_init_flag(hdev->roce_client, ae_dev, 2186 1); 2187 } 2188 break; 2189 case HNAE3_CLIENT_UNIC: 2190 hdev->nic_client = client; 2191 hdev->nic.client = client; 2192 2193 ret = client->ops->init_instance(&hdev->nic); 2194 if (ret) 2195 goto clear_nic; 2196 2197 hnae3_set_client_init_flag(client, ae_dev, 1); 2198 break; 2199 case HNAE3_CLIENT_ROCE: 2200 if (hnae3_dev_roce_supported(hdev)) { 2201 hdev->roce_client = client; 2202 hdev->roce.client = client; 2203 } 2204 2205 if (hdev->roce_client && hdev->nic_client) { 2206 ret = hclgevf_init_roce_base_info(hdev); 2207 if (ret) 2208 goto clear_roce; 2209 2210 ret = client->ops->init_instance(&hdev->roce); 2211 if (ret) 2212 goto clear_roce; 2213 } 2214 2215 hnae3_set_client_init_flag(client, ae_dev, 1); 2216 break; 2217 default: 2218 return -EINVAL; 2219 } 2220 2221 return 0; 2222 2223 clear_nic: 2224 hdev->nic_client = NULL; 2225 hdev->nic.client = NULL; 2226 return ret; 2227 clear_roce: 2228 hdev->roce_client = NULL; 2229 hdev->roce.client = NULL; 2230 return ret; 2231 } 2232 2233 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2234 struct hnae3_ae_dev *ae_dev) 2235 { 2236 struct hclgevf_dev *hdev = ae_dev->priv; 2237 2238 /* un-init roce, if it exists */ 2239 if (hdev->roce_client) { 2240 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2241 hdev->roce_client = NULL; 2242 hdev->roce.client = NULL; 2243 } 2244 2245 /* un-init nic/unic, if this was not called by roce client */ 2246 if (client->ops->uninit_instance && hdev->nic_client && 2247 client->type != HNAE3_CLIENT_ROCE) { 2248 client->ops->uninit_instance(&hdev->nic, 0); 2249 hdev->nic_client = NULL; 2250 hdev->nic.client = NULL; 2251 } 2252 } 2253 2254 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2255 { 2256 struct pci_dev *pdev = hdev->pdev; 2257 struct hclgevf_hw *hw; 2258 int ret; 2259 2260 ret = pci_enable_device(pdev); 2261 if (ret) { 2262 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2263 return ret; 2264 } 2265 2266 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2267 if (ret) { 2268 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2269 goto err_disable_device; 2270 } 2271 2272 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2273 if (ret) { 2274 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2275 goto err_disable_device; 2276 } 2277 2278 pci_set_master(pdev); 2279 hw = &hdev->hw; 2280 hw->hdev = hdev; 2281 hw->io_base = pci_iomap(pdev, 2, 0); 2282 if (!hw->io_base) { 2283 dev_err(&pdev->dev, "can't map configuration register space\n"); 2284 ret = -ENOMEM; 2285 goto err_clr_master; 2286 } 2287 2288 return 0; 2289 2290 err_clr_master: 2291 pci_clear_master(pdev); 2292 pci_release_regions(pdev); 2293 err_disable_device: 2294 pci_disable_device(pdev); 2295 2296 return ret; 2297 } 2298 2299 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2300 { 2301 struct pci_dev *pdev = hdev->pdev; 2302 2303 pci_iounmap(pdev, hdev->hw.io_base); 2304 pci_clear_master(pdev); 2305 pci_release_regions(pdev); 2306 pci_disable_device(pdev); 2307 } 2308 2309 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2310 { 2311 struct hclgevf_query_res_cmd *req; 2312 struct hclgevf_desc desc; 2313 int ret; 2314 2315 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2316 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2317 if (ret) { 2318 dev_err(&hdev->pdev->dev, 2319 "query vf resource failed, ret = %d.\n", ret); 2320 return ret; 2321 } 2322 2323 req = (struct hclgevf_query_res_cmd *)desc.data; 2324 2325 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 2326 hdev->roce_base_msix_offset = 2327 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 2328 HCLGEVF_MSIX_OFT_ROCEE_M, 2329 HCLGEVF_MSIX_OFT_ROCEE_S); 2330 hdev->num_roce_msix = 2331 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2332 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2333 2334 /* VF should have NIC vectors and Roce vectors, NIC vectors 2335 * are queued before Roce vectors. The offset is fixed to 64. 2336 */ 2337 hdev->num_msi = hdev->num_roce_msix + 2338 hdev->roce_base_msix_offset; 2339 } else { 2340 hdev->num_msi = 2341 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2342 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2343 } 2344 2345 return 0; 2346 } 2347 2348 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2349 { 2350 struct pci_dev *pdev = hdev->pdev; 2351 int ret = 0; 2352 2353 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2354 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2355 hclgevf_misc_irq_uninit(hdev); 2356 hclgevf_uninit_msi(hdev); 2357 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2358 } 2359 2360 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2361 pci_set_master(pdev); 2362 ret = hclgevf_init_msi(hdev); 2363 if (ret) { 2364 dev_err(&pdev->dev, 2365 "failed(%d) to init MSI/MSI-X\n", ret); 2366 return ret; 2367 } 2368 2369 ret = hclgevf_misc_irq_init(hdev); 2370 if (ret) { 2371 hclgevf_uninit_msi(hdev); 2372 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2373 ret); 2374 return ret; 2375 } 2376 2377 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2378 } 2379 2380 return ret; 2381 } 2382 2383 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2384 { 2385 struct pci_dev *pdev = hdev->pdev; 2386 int ret; 2387 2388 ret = hclgevf_pci_reset(hdev); 2389 if (ret) { 2390 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2391 return ret; 2392 } 2393 2394 ret = hclgevf_cmd_init(hdev); 2395 if (ret) { 2396 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2397 return ret; 2398 } 2399 2400 ret = hclgevf_rss_init_hw(hdev); 2401 if (ret) { 2402 dev_err(&hdev->pdev->dev, 2403 "failed(%d) to initialize RSS\n", ret); 2404 return ret; 2405 } 2406 2407 ret = hclgevf_config_gro(hdev, true); 2408 if (ret) 2409 return ret; 2410 2411 ret = hclgevf_init_vlan_config(hdev); 2412 if (ret) { 2413 dev_err(&hdev->pdev->dev, 2414 "failed(%d) to initialize VLAN config\n", ret); 2415 return ret; 2416 } 2417 2418 dev_info(&hdev->pdev->dev, "Reset done\n"); 2419 2420 return 0; 2421 } 2422 2423 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2424 { 2425 struct pci_dev *pdev = hdev->pdev; 2426 int ret; 2427 2428 ret = hclgevf_pci_init(hdev); 2429 if (ret) { 2430 dev_err(&pdev->dev, "PCI initialization failed\n"); 2431 return ret; 2432 } 2433 2434 ret = hclgevf_cmd_queue_init(hdev); 2435 if (ret) { 2436 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); 2437 goto err_cmd_queue_init; 2438 } 2439 2440 ret = hclgevf_cmd_init(hdev); 2441 if (ret) 2442 goto err_cmd_init; 2443 2444 /* Get vf resource */ 2445 ret = hclgevf_query_vf_resource(hdev); 2446 if (ret) { 2447 dev_err(&hdev->pdev->dev, 2448 "Query vf status error, ret = %d.\n", ret); 2449 goto err_cmd_init; 2450 } 2451 2452 ret = hclgevf_init_msi(hdev); 2453 if (ret) { 2454 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2455 goto err_cmd_init; 2456 } 2457 2458 hclgevf_state_init(hdev); 2459 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2460 2461 ret = hclgevf_misc_irq_init(hdev); 2462 if (ret) { 2463 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2464 ret); 2465 goto err_misc_irq_init; 2466 } 2467 2468 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2469 2470 ret = hclgevf_configure(hdev); 2471 if (ret) { 2472 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2473 goto err_config; 2474 } 2475 2476 ret = hclgevf_alloc_tqps(hdev); 2477 if (ret) { 2478 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2479 goto err_config; 2480 } 2481 2482 ret = hclgevf_set_handle_info(hdev); 2483 if (ret) { 2484 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2485 goto err_config; 2486 } 2487 2488 ret = hclgevf_config_gro(hdev, true); 2489 if (ret) 2490 goto err_config; 2491 2492 /* vf is not allowed to enable unicast/multicast promisc mode. 2493 * For revision 0x20, default to disable broadcast promisc mode, 2494 * firmware makes sure broadcast packets can be accepted. 2495 * For revision 0x21, default to enable broadcast promisc mode. 2496 */ 2497 ret = hclgevf_set_promisc_mode(hdev, true); 2498 if (ret) 2499 goto err_config; 2500 2501 /* Initialize RSS for this VF */ 2502 ret = hclgevf_rss_init_hw(hdev); 2503 if (ret) { 2504 dev_err(&hdev->pdev->dev, 2505 "failed(%d) to initialize RSS\n", ret); 2506 goto err_config; 2507 } 2508 2509 ret = hclgevf_init_vlan_config(hdev); 2510 if (ret) { 2511 dev_err(&hdev->pdev->dev, 2512 "failed(%d) to initialize VLAN config\n", ret); 2513 goto err_config; 2514 } 2515 2516 hdev->last_reset_time = jiffies; 2517 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); 2518 2519 return 0; 2520 2521 err_config: 2522 hclgevf_misc_irq_uninit(hdev); 2523 err_misc_irq_init: 2524 hclgevf_state_uninit(hdev); 2525 hclgevf_uninit_msi(hdev); 2526 err_cmd_init: 2527 hclgevf_cmd_uninit(hdev); 2528 err_cmd_queue_init: 2529 hclgevf_pci_uninit(hdev); 2530 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2531 return ret; 2532 } 2533 2534 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2535 { 2536 hclgevf_state_uninit(hdev); 2537 2538 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2539 hclgevf_misc_irq_uninit(hdev); 2540 hclgevf_uninit_msi(hdev); 2541 } 2542 2543 hclgevf_pci_uninit(hdev); 2544 hclgevf_cmd_uninit(hdev); 2545 } 2546 2547 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2548 { 2549 struct pci_dev *pdev = ae_dev->pdev; 2550 struct hclgevf_dev *hdev; 2551 int ret; 2552 2553 ret = hclgevf_alloc_hdev(ae_dev); 2554 if (ret) { 2555 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2556 return ret; 2557 } 2558 2559 ret = hclgevf_init_hdev(ae_dev->priv); 2560 if (ret) { 2561 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2562 return ret; 2563 } 2564 2565 hdev = ae_dev->priv; 2566 timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0); 2567 INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task); 2568 2569 return 0; 2570 } 2571 2572 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2573 { 2574 struct hclgevf_dev *hdev = ae_dev->priv; 2575 2576 hclgevf_uninit_hdev(hdev); 2577 ae_dev->priv = NULL; 2578 } 2579 2580 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2581 { 2582 struct hnae3_handle *nic = &hdev->nic; 2583 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2584 2585 return min_t(u32, hdev->rss_size_max, 2586 hdev->num_tqps / kinfo->num_tc); 2587 } 2588 2589 /** 2590 * hclgevf_get_channels - Get the current channels enabled and max supported. 2591 * @handle: hardware information for network interface 2592 * @ch: ethtool channels structure 2593 * 2594 * We don't support separate tx and rx queues as channels. The other count 2595 * represents how many queues are being used for control. max_combined counts 2596 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2597 * q_vectors since we support a lot more queue pairs than q_vectors. 2598 **/ 2599 static void hclgevf_get_channels(struct hnae3_handle *handle, 2600 struct ethtool_channels *ch) 2601 { 2602 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2603 2604 ch->max_combined = hclgevf_get_max_channels(hdev); 2605 ch->other_count = 0; 2606 ch->max_other = 0; 2607 ch->combined_count = handle->kinfo.rss_size; 2608 } 2609 2610 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2611 u16 *alloc_tqps, u16 *max_rss_size) 2612 { 2613 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2614 2615 *alloc_tqps = hdev->num_tqps; 2616 *max_rss_size = hdev->rss_size_max; 2617 } 2618 2619 static int hclgevf_get_status(struct hnae3_handle *handle) 2620 { 2621 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2622 2623 return hdev->hw.mac.link; 2624 } 2625 2626 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2627 u8 *auto_neg, u32 *speed, 2628 u8 *duplex) 2629 { 2630 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2631 2632 if (speed) 2633 *speed = hdev->hw.mac.speed; 2634 if (duplex) 2635 *duplex = hdev->hw.mac.duplex; 2636 if (auto_neg) 2637 *auto_neg = AUTONEG_DISABLE; 2638 } 2639 2640 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 2641 u8 duplex) 2642 { 2643 hdev->hw.mac.speed = speed; 2644 hdev->hw.mac.duplex = duplex; 2645 } 2646 2647 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 2648 { 2649 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2650 2651 return hclgevf_config_gro(hdev, enable); 2652 } 2653 2654 static void hclgevf_get_media_type(struct hnae3_handle *handle, 2655 u8 *media_type) 2656 { 2657 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2658 if (media_type) 2659 *media_type = hdev->hw.mac.media_type; 2660 } 2661 2662 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 2663 { 2664 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2665 2666 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2667 } 2668 2669 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 2670 { 2671 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2672 2673 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2674 } 2675 2676 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 2677 { 2678 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2679 2680 return hdev->reset_count; 2681 } 2682 2683 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 2684 unsigned long *supported, 2685 unsigned long *advertising) 2686 { 2687 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2688 2689 *supported = hdev->hw.mac.supported; 2690 *advertising = hdev->hw.mac.advertising; 2691 } 2692 2693 #define MAX_SEPARATE_NUM 4 2694 #define SEPARATOR_VALUE 0xFFFFFFFF 2695 #define REG_NUM_PER_LINE 4 2696 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 2697 2698 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 2699 { 2700 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 2701 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2702 2703 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 2704 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 2705 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 2706 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 2707 2708 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 2709 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 2710 } 2711 2712 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 2713 void *data) 2714 { 2715 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2716 int i, j, reg_um, separator_num; 2717 u32 *reg = data; 2718 2719 *version = hdev->fw_version; 2720 2721 /* fetching per-VF registers values from VF PCIe register space */ 2722 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 2723 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2724 for (i = 0; i < reg_um; i++) 2725 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 2726 for (i = 0; i < separator_num; i++) 2727 *reg++ = SEPARATOR_VALUE; 2728 2729 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 2730 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2731 for (i = 0; i < reg_um; i++) 2732 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 2733 for (i = 0; i < separator_num; i++) 2734 *reg++ = SEPARATOR_VALUE; 2735 2736 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 2737 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2738 for (j = 0; j < hdev->num_tqps; j++) { 2739 for (i = 0; i < reg_um; i++) 2740 *reg++ = hclgevf_read_dev(&hdev->hw, 2741 ring_reg_addr_list[i] + 2742 0x200 * j); 2743 for (i = 0; i < separator_num; i++) 2744 *reg++ = SEPARATOR_VALUE; 2745 } 2746 2747 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 2748 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 2749 for (j = 0; j < hdev->num_msi_used - 1; j++) { 2750 for (i = 0; i < reg_um; i++) 2751 *reg++ = hclgevf_read_dev(&hdev->hw, 2752 tqp_intr_reg_addr_list[i] + 2753 4 * j); 2754 for (i = 0; i < separator_num; i++) 2755 *reg++ = SEPARATOR_VALUE; 2756 } 2757 } 2758 2759 static const struct hnae3_ae_ops hclgevf_ops = { 2760 .init_ae_dev = hclgevf_init_ae_dev, 2761 .uninit_ae_dev = hclgevf_uninit_ae_dev, 2762 .flr_prepare = hclgevf_flr_prepare, 2763 .flr_done = hclgevf_flr_done, 2764 .init_client_instance = hclgevf_init_client_instance, 2765 .uninit_client_instance = hclgevf_uninit_client_instance, 2766 .start = hclgevf_ae_start, 2767 .stop = hclgevf_ae_stop, 2768 .client_start = hclgevf_client_start, 2769 .client_stop = hclgevf_client_stop, 2770 .map_ring_to_vector = hclgevf_map_ring_to_vector, 2771 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 2772 .get_vector = hclgevf_get_vector, 2773 .put_vector = hclgevf_put_vector, 2774 .reset_queue = hclgevf_reset_tqp, 2775 .get_mac_addr = hclgevf_get_mac_addr, 2776 .set_mac_addr = hclgevf_set_mac_addr, 2777 .add_uc_addr = hclgevf_add_uc_addr, 2778 .rm_uc_addr = hclgevf_rm_uc_addr, 2779 .add_mc_addr = hclgevf_add_mc_addr, 2780 .rm_mc_addr = hclgevf_rm_mc_addr, 2781 .get_stats = hclgevf_get_stats, 2782 .update_stats = hclgevf_update_stats, 2783 .get_strings = hclgevf_get_strings, 2784 .get_sset_count = hclgevf_get_sset_count, 2785 .get_rss_key_size = hclgevf_get_rss_key_size, 2786 .get_rss_indir_size = hclgevf_get_rss_indir_size, 2787 .get_rss = hclgevf_get_rss, 2788 .set_rss = hclgevf_set_rss, 2789 .get_rss_tuple = hclgevf_get_rss_tuple, 2790 .set_rss_tuple = hclgevf_set_rss_tuple, 2791 .get_tc_size = hclgevf_get_tc_size, 2792 .get_fw_version = hclgevf_get_fw_version, 2793 .set_vlan_filter = hclgevf_set_vlan_filter, 2794 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 2795 .reset_event = hclgevf_reset_event, 2796 .set_default_reset_request = hclgevf_set_def_reset_request, 2797 .get_channels = hclgevf_get_channels, 2798 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 2799 .get_regs_len = hclgevf_get_regs_len, 2800 .get_regs = hclgevf_get_regs, 2801 .get_status = hclgevf_get_status, 2802 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 2803 .get_media_type = hclgevf_get_media_type, 2804 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 2805 .ae_dev_resetting = hclgevf_ae_dev_resetting, 2806 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 2807 .set_gro_en = hclgevf_gro_en, 2808 .set_mtu = hclgevf_set_mtu, 2809 .get_global_queue_id = hclgevf_get_qid_global, 2810 .set_timer_task = hclgevf_set_timer_task, 2811 .get_link_mode = hclgevf_get_link_mode, 2812 }; 2813 2814 static struct hnae3_ae_algo ae_algovf = { 2815 .ops = &hclgevf_ops, 2816 .pdev_id_table = ae_algovf_pci_tbl, 2817 }; 2818 2819 static int hclgevf_init(void) 2820 { 2821 pr_info("%s is initializing\n", HCLGEVF_NAME); 2822 2823 hnae3_register_ae_algo(&ae_algovf); 2824 2825 return 0; 2826 } 2827 2828 static void hclgevf_exit(void) 2829 { 2830 hnae3_unregister_ae_algo(&ae_algovf); 2831 } 2832 module_init(hclgevf_init); 2833 module_exit(hclgevf_exit); 2834 2835 MODULE_LICENSE("GPL"); 2836 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 2837 MODULE_DESCRIPTION("HCLGEVF Driver"); 2838 MODULE_VERSION(HCLGEVF_MOD_VERSION); 2839