1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 #define HCLGEVF_RESET_MAX_FAIL_CNT 5 15 16 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 17 static struct hnae3_ae_algo ae_algovf; 18 19 static const struct pci_device_id ae_algovf_pci_tbl[] = { 20 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 21 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 22 /* required last entry */ 23 {0, } 24 }; 25 26 static const u8 hclgevf_hash_key[] = { 27 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 28 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 29 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 30 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 31 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 32 }; 33 34 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 35 36 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 37 HCLGEVF_CMDQ_TX_ADDR_H_REG, 38 HCLGEVF_CMDQ_TX_DEPTH_REG, 39 HCLGEVF_CMDQ_TX_TAIL_REG, 40 HCLGEVF_CMDQ_TX_HEAD_REG, 41 HCLGEVF_CMDQ_RX_ADDR_L_REG, 42 HCLGEVF_CMDQ_RX_ADDR_H_REG, 43 HCLGEVF_CMDQ_RX_DEPTH_REG, 44 HCLGEVF_CMDQ_RX_TAIL_REG, 45 HCLGEVF_CMDQ_RX_HEAD_REG, 46 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 47 HCLGEVF_CMDQ_INTR_STS_REG, 48 HCLGEVF_CMDQ_INTR_EN_REG, 49 HCLGEVF_CMDQ_INTR_GEN_REG}; 50 51 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 52 HCLGEVF_RST_ING, 53 HCLGEVF_GRO_EN_REG}; 54 55 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 56 HCLGEVF_RING_RX_ADDR_H_REG, 57 HCLGEVF_RING_RX_BD_NUM_REG, 58 HCLGEVF_RING_RX_BD_LENGTH_REG, 59 HCLGEVF_RING_RX_MERGE_EN_REG, 60 HCLGEVF_RING_RX_TAIL_REG, 61 HCLGEVF_RING_RX_HEAD_REG, 62 HCLGEVF_RING_RX_FBD_NUM_REG, 63 HCLGEVF_RING_RX_OFFSET_REG, 64 HCLGEVF_RING_RX_FBD_OFFSET_REG, 65 HCLGEVF_RING_RX_STASH_REG, 66 HCLGEVF_RING_RX_BD_ERR_REG, 67 HCLGEVF_RING_TX_ADDR_L_REG, 68 HCLGEVF_RING_TX_ADDR_H_REG, 69 HCLGEVF_RING_TX_BD_NUM_REG, 70 HCLGEVF_RING_TX_PRIORITY_REG, 71 HCLGEVF_RING_TX_TC_REG, 72 HCLGEVF_RING_TX_MERGE_EN_REG, 73 HCLGEVF_RING_TX_TAIL_REG, 74 HCLGEVF_RING_TX_HEAD_REG, 75 HCLGEVF_RING_TX_FBD_NUM_REG, 76 HCLGEVF_RING_TX_OFFSET_REG, 77 HCLGEVF_RING_TX_EBD_NUM_REG, 78 HCLGEVF_RING_TX_EBD_OFFSET_REG, 79 HCLGEVF_RING_TX_BD_ERR_REG, 80 HCLGEVF_RING_EN_REG}; 81 82 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 83 HCLGEVF_TQP_INTR_GL0_REG, 84 HCLGEVF_TQP_INTR_GL1_REG, 85 HCLGEVF_TQP_INTR_GL2_REG, 86 HCLGEVF_TQP_INTR_RL_REG}; 87 88 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 89 { 90 if (!handle->client) 91 return container_of(handle, struct hclgevf_dev, nic); 92 else if (handle->client->type == HNAE3_CLIENT_ROCE) 93 return container_of(handle, struct hclgevf_dev, roce); 94 else 95 return container_of(handle, struct hclgevf_dev, nic); 96 } 97 98 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 99 { 100 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 101 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 102 struct hclgevf_desc desc; 103 struct hclgevf_tqp *tqp; 104 int status; 105 int i; 106 107 for (i = 0; i < kinfo->num_tqps; i++) { 108 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 109 hclgevf_cmd_setup_basic_desc(&desc, 110 HCLGEVF_OPC_QUERY_RX_STATUS, 111 true); 112 113 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 114 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 115 if (status) { 116 dev_err(&hdev->pdev->dev, 117 "Query tqp stat fail, status = %d,queue = %d\n", 118 status, i); 119 return status; 120 } 121 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 122 le32_to_cpu(desc.data[1]); 123 124 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 125 true); 126 127 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 128 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 129 if (status) { 130 dev_err(&hdev->pdev->dev, 131 "Query tqp stat fail, status = %d,queue = %d\n", 132 status, i); 133 return status; 134 } 135 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 136 le32_to_cpu(desc.data[1]); 137 } 138 139 return 0; 140 } 141 142 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 143 { 144 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 145 struct hclgevf_tqp *tqp; 146 u64 *buff = data; 147 int i; 148 149 for (i = 0; i < kinfo->num_tqps; i++) { 150 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 151 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 152 } 153 for (i = 0; i < kinfo->num_tqps; i++) { 154 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 155 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 156 } 157 158 return buff; 159 } 160 161 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 162 { 163 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 164 165 return kinfo->num_tqps * 2; 166 } 167 168 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 169 { 170 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 171 u8 *buff = data; 172 int i = 0; 173 174 for (i = 0; i < kinfo->num_tqps; i++) { 175 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 176 struct hclgevf_tqp, q); 177 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 178 tqp->index); 179 buff += ETH_GSTRING_LEN; 180 } 181 182 for (i = 0; i < kinfo->num_tqps; i++) { 183 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 184 struct hclgevf_tqp, q); 185 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 186 tqp->index); 187 buff += ETH_GSTRING_LEN; 188 } 189 190 return buff; 191 } 192 193 static void hclgevf_update_stats(struct hnae3_handle *handle, 194 struct net_device_stats *net_stats) 195 { 196 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 197 int status; 198 199 status = hclgevf_tqps_update_stats(handle); 200 if (status) 201 dev_err(&hdev->pdev->dev, 202 "VF update of TQPS stats fail, status = %d.\n", 203 status); 204 } 205 206 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 207 { 208 if (strset == ETH_SS_TEST) 209 return -EOPNOTSUPP; 210 else if (strset == ETH_SS_STATS) 211 return hclgevf_tqps_get_sset_count(handle, strset); 212 213 return 0; 214 } 215 216 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 217 u8 *data) 218 { 219 u8 *p = (char *)data; 220 221 if (strset == ETH_SS_STATS) 222 p = hclgevf_tqps_get_strings(handle, p); 223 } 224 225 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 226 { 227 hclgevf_tqps_get_stats(handle, data); 228 } 229 230 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 231 { 232 u8 resp_msg; 233 int status; 234 235 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, 236 true, &resp_msg, sizeof(resp_msg)); 237 if (status) { 238 dev_err(&hdev->pdev->dev, 239 "VF request to get TC info from PF failed %d", 240 status); 241 return status; 242 } 243 244 hdev->hw_tc_map = resp_msg; 245 246 return 0; 247 } 248 249 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 250 { 251 struct hnae3_handle *nic = &hdev->nic; 252 u8 resp_msg; 253 int ret; 254 255 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 256 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, 257 NULL, 0, true, &resp_msg, sizeof(u8)); 258 if (ret) { 259 dev_err(&hdev->pdev->dev, 260 "VF request to get port based vlan state failed %d", 261 ret); 262 return ret; 263 } 264 265 nic->port_base_vlan_state = resp_msg; 266 267 return 0; 268 } 269 270 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 271 { 272 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 273 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 274 int status; 275 276 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, 277 true, resp_msg, 278 HCLGEVF_TQPS_RSS_INFO_LEN); 279 if (status) { 280 dev_err(&hdev->pdev->dev, 281 "VF request to get tqp info from PF failed %d", 282 status); 283 return status; 284 } 285 286 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); 287 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); 288 memcpy(&hdev->rx_buf_len, &resp_msg[4], sizeof(u16)); 289 290 return 0; 291 } 292 293 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 294 { 295 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 296 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 297 int ret; 298 299 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QDEPTH, 0, NULL, 0, 300 true, resp_msg, 301 HCLGEVF_TQPS_DEPTH_INFO_LEN); 302 if (ret) { 303 dev_err(&hdev->pdev->dev, 304 "VF request to get tqp depth info from PF failed %d", 305 ret); 306 return ret; 307 } 308 309 memcpy(&hdev->num_tx_desc, &resp_msg[0], sizeof(u16)); 310 memcpy(&hdev->num_rx_desc, &resp_msg[2], sizeof(u16)); 311 312 return 0; 313 } 314 315 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 316 { 317 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 318 u8 msg_data[2], resp_data[2]; 319 u16 qid_in_pf = 0; 320 int ret; 321 322 memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); 323 324 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, 325 sizeof(msg_data), true, resp_data, 326 sizeof(resp_data)); 327 if (!ret) 328 qid_in_pf = *(u16 *)resp_data; 329 330 return qid_in_pf; 331 } 332 333 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 334 { 335 u8 resp_msg[2]; 336 int ret; 337 338 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0, 339 true, resp_msg, sizeof(resp_msg)); 340 if (ret) { 341 dev_err(&hdev->pdev->dev, 342 "VF request to get the pf port media type failed %d", 343 ret); 344 return ret; 345 } 346 347 hdev->hw.mac.media_type = resp_msg[0]; 348 hdev->hw.mac.module_type = resp_msg[1]; 349 350 return 0; 351 } 352 353 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 354 { 355 struct hclgevf_tqp *tqp; 356 int i; 357 358 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 359 sizeof(struct hclgevf_tqp), GFP_KERNEL); 360 if (!hdev->htqp) 361 return -ENOMEM; 362 363 tqp = hdev->htqp; 364 365 for (i = 0; i < hdev->num_tqps; i++) { 366 tqp->dev = &hdev->pdev->dev; 367 tqp->index = i; 368 369 tqp->q.ae_algo = &ae_algovf; 370 tqp->q.buf_size = hdev->rx_buf_len; 371 tqp->q.tx_desc_num = hdev->num_tx_desc; 372 tqp->q.rx_desc_num = hdev->num_rx_desc; 373 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + 374 i * HCLGEVF_TQP_REG_SIZE; 375 376 tqp++; 377 } 378 379 return 0; 380 } 381 382 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 383 { 384 struct hnae3_handle *nic = &hdev->nic; 385 struct hnae3_knic_private_info *kinfo; 386 u16 new_tqps = hdev->num_tqps; 387 unsigned int i; 388 389 kinfo = &nic->kinfo; 390 kinfo->num_tc = 0; 391 kinfo->num_tx_desc = hdev->num_tx_desc; 392 kinfo->num_rx_desc = hdev->num_rx_desc; 393 kinfo->rx_buf_len = hdev->rx_buf_len; 394 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 395 if (hdev->hw_tc_map & BIT(i)) 396 kinfo->num_tc++; 397 398 kinfo->rss_size 399 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 400 new_tqps = kinfo->rss_size * kinfo->num_tc; 401 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 402 403 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 404 sizeof(struct hnae3_queue *), GFP_KERNEL); 405 if (!kinfo->tqp) 406 return -ENOMEM; 407 408 for (i = 0; i < kinfo->num_tqps; i++) { 409 hdev->htqp[i].q.handle = &hdev->nic; 410 hdev->htqp[i].q.tqp_index = i; 411 kinfo->tqp[i] = &hdev->htqp[i].q; 412 } 413 414 return 0; 415 } 416 417 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 418 { 419 int status; 420 u8 resp_msg; 421 422 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, 423 0, false, &resp_msg, sizeof(resp_msg)); 424 if (status) 425 dev_err(&hdev->pdev->dev, 426 "VF failed to fetch link status(%d) from PF", status); 427 } 428 429 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 430 { 431 struct hnae3_handle *rhandle = &hdev->roce; 432 struct hnae3_handle *handle = &hdev->nic; 433 struct hnae3_client *rclient; 434 struct hnae3_client *client; 435 436 client = handle->client; 437 rclient = hdev->roce_client; 438 439 link_state = 440 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 441 442 if (link_state != hdev->hw.mac.link) { 443 client->ops->link_status_change(handle, !!link_state); 444 if (rclient && rclient->ops->link_status_change) 445 rclient->ops->link_status_change(rhandle, !!link_state); 446 hdev->hw.mac.link = link_state; 447 } 448 } 449 450 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 451 { 452 #define HCLGEVF_ADVERTISING 0 453 #define HCLGEVF_SUPPORTED 1 454 u8 send_msg; 455 u8 resp_msg; 456 457 send_msg = HCLGEVF_ADVERTISING; 458 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, 459 &send_msg, sizeof(send_msg), false, 460 &resp_msg, sizeof(resp_msg)); 461 send_msg = HCLGEVF_SUPPORTED; 462 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, 463 &send_msg, sizeof(send_msg), false, 464 &resp_msg, sizeof(resp_msg)); 465 } 466 467 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 468 { 469 struct hnae3_handle *nic = &hdev->nic; 470 int ret; 471 472 nic->ae_algo = &ae_algovf; 473 nic->pdev = hdev->pdev; 474 nic->numa_node_mask = hdev->numa_node_mask; 475 nic->flags |= HNAE3_SUPPORT_VF; 476 477 ret = hclgevf_knic_setup(hdev); 478 if (ret) 479 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 480 ret); 481 return ret; 482 } 483 484 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 485 { 486 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 487 dev_warn(&hdev->pdev->dev, 488 "vector(vector_id %d) has been freed.\n", vector_id); 489 return; 490 } 491 492 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 493 hdev->num_msi_left += 1; 494 hdev->num_msi_used -= 1; 495 } 496 497 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 498 struct hnae3_vector_info *vector_info) 499 { 500 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 501 struct hnae3_vector_info *vector = vector_info; 502 int alloc = 0; 503 int i, j; 504 505 vector_num = min(hdev->num_msi_left, vector_num); 506 507 for (j = 0; j < vector_num; j++) { 508 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 509 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 510 vector->vector = pci_irq_vector(hdev->pdev, i); 511 vector->io_addr = hdev->hw.io_base + 512 HCLGEVF_VECTOR_REG_BASE + 513 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 514 hdev->vector_status[i] = 0; 515 hdev->vector_irq[i] = vector->vector; 516 517 vector++; 518 alloc++; 519 520 break; 521 } 522 } 523 } 524 hdev->num_msi_left -= alloc; 525 hdev->num_msi_used += alloc; 526 527 return alloc; 528 } 529 530 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 531 { 532 int i; 533 534 for (i = 0; i < hdev->num_msi; i++) 535 if (vector == hdev->vector_irq[i]) 536 return i; 537 538 return -EINVAL; 539 } 540 541 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 542 const u8 hfunc, const u8 *key) 543 { 544 struct hclgevf_rss_config_cmd *req; 545 unsigned int key_offset = 0; 546 struct hclgevf_desc desc; 547 int key_counts; 548 int key_size; 549 int ret; 550 551 key_counts = HCLGEVF_RSS_KEY_SIZE; 552 req = (struct hclgevf_rss_config_cmd *)desc.data; 553 554 while (key_counts) { 555 hclgevf_cmd_setup_basic_desc(&desc, 556 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 557 false); 558 559 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 560 req->hash_config |= 561 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 562 563 key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 564 memcpy(req->hash_key, 565 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 566 567 key_counts -= key_size; 568 key_offset++; 569 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 570 if (ret) { 571 dev_err(&hdev->pdev->dev, 572 "Configure RSS config fail, status = %d\n", 573 ret); 574 return ret; 575 } 576 } 577 578 return 0; 579 } 580 581 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 582 { 583 return HCLGEVF_RSS_KEY_SIZE; 584 } 585 586 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) 587 { 588 return HCLGEVF_RSS_IND_TBL_SIZE; 589 } 590 591 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 592 { 593 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 594 struct hclgevf_rss_indirection_table_cmd *req; 595 struct hclgevf_desc desc; 596 int status; 597 int i, j; 598 599 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 600 601 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { 602 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 603 false); 604 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; 605 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; 606 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 607 req->rss_result[j] = 608 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 609 610 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 611 if (status) { 612 dev_err(&hdev->pdev->dev, 613 "VF failed(=%d) to set RSS indirection table\n", 614 status); 615 return status; 616 } 617 } 618 619 return 0; 620 } 621 622 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 623 { 624 struct hclgevf_rss_tc_mode_cmd *req; 625 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 626 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 627 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 628 struct hclgevf_desc desc; 629 u16 roundup_size; 630 int status; 631 unsigned int i; 632 633 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 634 635 roundup_size = roundup_pow_of_two(rss_size); 636 roundup_size = ilog2(roundup_size); 637 638 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 639 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 640 tc_size[i] = roundup_size; 641 tc_offset[i] = rss_size * i; 642 } 643 644 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 645 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 646 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, 647 (tc_valid[i] & 0x1)); 648 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, 649 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 650 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, 651 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 652 } 653 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 654 if (status) 655 dev_err(&hdev->pdev->dev, 656 "VF failed(=%d) to set rss tc mode\n", status); 657 658 return status; 659 } 660 661 /* for revision 0x20, vf shared the same rss config with pf */ 662 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 663 { 664 #define HCLGEVF_RSS_MBX_RESP_LEN 8 665 666 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 667 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 668 u16 msg_num, hash_key_index; 669 u8 index; 670 int ret; 671 672 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 673 HCLGEVF_RSS_MBX_RESP_LEN; 674 for (index = 0; index < msg_num; index++) { 675 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_RSS_KEY, 0, 676 &index, sizeof(index), 677 true, resp_msg, 678 HCLGEVF_RSS_MBX_RESP_LEN); 679 if (ret) { 680 dev_err(&hdev->pdev->dev, 681 "VF get rss hash key from PF failed, ret=%d", 682 ret); 683 return ret; 684 } 685 686 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 687 if (index == msg_num - 1) 688 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 689 &resp_msg[0], 690 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 691 else 692 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 693 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 694 } 695 696 return 0; 697 } 698 699 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 700 u8 *hfunc) 701 { 702 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 703 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 704 int i, ret; 705 706 if (handle->pdev->revision >= 0x21) { 707 /* Get hash algorithm */ 708 if (hfunc) { 709 switch (rss_cfg->hash_algo) { 710 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 711 *hfunc = ETH_RSS_HASH_TOP; 712 break; 713 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 714 *hfunc = ETH_RSS_HASH_XOR; 715 break; 716 default: 717 *hfunc = ETH_RSS_HASH_UNKNOWN; 718 break; 719 } 720 } 721 722 /* Get the RSS Key required by the user */ 723 if (key) 724 memcpy(key, rss_cfg->rss_hash_key, 725 HCLGEVF_RSS_KEY_SIZE); 726 } else { 727 if (hfunc) 728 *hfunc = ETH_RSS_HASH_TOP; 729 if (key) { 730 ret = hclgevf_get_rss_hash_key(hdev); 731 if (ret) 732 return ret; 733 memcpy(key, rss_cfg->rss_hash_key, 734 HCLGEVF_RSS_KEY_SIZE); 735 } 736 } 737 738 if (indir) 739 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 740 indir[i] = rss_cfg->rss_indirection_tbl[i]; 741 742 return 0; 743 } 744 745 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 746 const u8 *key, const u8 hfunc) 747 { 748 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 749 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 750 int ret, i; 751 752 if (handle->pdev->revision >= 0x21) { 753 /* Set the RSS Hash Key if specififed by the user */ 754 if (key) { 755 switch (hfunc) { 756 case ETH_RSS_HASH_TOP: 757 rss_cfg->hash_algo = 758 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 759 break; 760 case ETH_RSS_HASH_XOR: 761 rss_cfg->hash_algo = 762 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 763 break; 764 case ETH_RSS_HASH_NO_CHANGE: 765 break; 766 default: 767 return -EINVAL; 768 } 769 770 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 771 key); 772 if (ret) 773 return ret; 774 775 /* Update the shadow RSS key with user specified qids */ 776 memcpy(rss_cfg->rss_hash_key, key, 777 HCLGEVF_RSS_KEY_SIZE); 778 } 779 } 780 781 /* update the shadow RSS table with user specified qids */ 782 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 783 rss_cfg->rss_indirection_tbl[i] = indir[i]; 784 785 /* update the hardware */ 786 return hclgevf_set_rss_indir_table(hdev); 787 } 788 789 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 790 { 791 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 792 793 if (nfc->data & RXH_L4_B_2_3) 794 hash_sets |= HCLGEVF_D_PORT_BIT; 795 else 796 hash_sets &= ~HCLGEVF_D_PORT_BIT; 797 798 if (nfc->data & RXH_IP_SRC) 799 hash_sets |= HCLGEVF_S_IP_BIT; 800 else 801 hash_sets &= ~HCLGEVF_S_IP_BIT; 802 803 if (nfc->data & RXH_IP_DST) 804 hash_sets |= HCLGEVF_D_IP_BIT; 805 else 806 hash_sets &= ~HCLGEVF_D_IP_BIT; 807 808 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 809 hash_sets |= HCLGEVF_V_TAG_BIT; 810 811 return hash_sets; 812 } 813 814 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 815 struct ethtool_rxnfc *nfc) 816 { 817 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 818 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 819 struct hclgevf_rss_input_tuple_cmd *req; 820 struct hclgevf_desc desc; 821 u8 tuple_sets; 822 int ret; 823 824 if (handle->pdev->revision == 0x20) 825 return -EOPNOTSUPP; 826 827 if (nfc->data & 828 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 829 return -EINVAL; 830 831 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 832 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 833 834 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 835 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 836 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 837 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 838 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 839 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 840 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 841 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 842 843 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 844 switch (nfc->flow_type) { 845 case TCP_V4_FLOW: 846 req->ipv4_tcp_en = tuple_sets; 847 break; 848 case TCP_V6_FLOW: 849 req->ipv6_tcp_en = tuple_sets; 850 break; 851 case UDP_V4_FLOW: 852 req->ipv4_udp_en = tuple_sets; 853 break; 854 case UDP_V6_FLOW: 855 req->ipv6_udp_en = tuple_sets; 856 break; 857 case SCTP_V4_FLOW: 858 req->ipv4_sctp_en = tuple_sets; 859 break; 860 case SCTP_V6_FLOW: 861 if ((nfc->data & RXH_L4_B_0_1) || 862 (nfc->data & RXH_L4_B_2_3)) 863 return -EINVAL; 864 865 req->ipv6_sctp_en = tuple_sets; 866 break; 867 case IPV4_FLOW: 868 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 869 break; 870 case IPV6_FLOW: 871 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 872 break; 873 default: 874 return -EINVAL; 875 } 876 877 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 878 if (ret) { 879 dev_err(&hdev->pdev->dev, 880 "Set rss tuple fail, status = %d\n", ret); 881 return ret; 882 } 883 884 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 885 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 886 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 887 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 888 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 889 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 890 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 891 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 892 return 0; 893 } 894 895 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 896 struct ethtool_rxnfc *nfc) 897 { 898 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 899 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 900 u8 tuple_sets; 901 902 if (handle->pdev->revision == 0x20) 903 return -EOPNOTSUPP; 904 905 nfc->data = 0; 906 907 switch (nfc->flow_type) { 908 case TCP_V4_FLOW: 909 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 910 break; 911 case UDP_V4_FLOW: 912 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 913 break; 914 case TCP_V6_FLOW: 915 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 916 break; 917 case UDP_V6_FLOW: 918 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 919 break; 920 case SCTP_V4_FLOW: 921 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 922 break; 923 case SCTP_V6_FLOW: 924 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 925 break; 926 case IPV4_FLOW: 927 case IPV6_FLOW: 928 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 929 break; 930 default: 931 return -EINVAL; 932 } 933 934 if (!tuple_sets) 935 return 0; 936 937 if (tuple_sets & HCLGEVF_D_PORT_BIT) 938 nfc->data |= RXH_L4_B_2_3; 939 if (tuple_sets & HCLGEVF_S_PORT_BIT) 940 nfc->data |= RXH_L4_B_0_1; 941 if (tuple_sets & HCLGEVF_D_IP_BIT) 942 nfc->data |= RXH_IP_DST; 943 if (tuple_sets & HCLGEVF_S_IP_BIT) 944 nfc->data |= RXH_IP_SRC; 945 946 return 0; 947 } 948 949 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 950 struct hclgevf_rss_cfg *rss_cfg) 951 { 952 struct hclgevf_rss_input_tuple_cmd *req; 953 struct hclgevf_desc desc; 954 int ret; 955 956 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 957 958 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 959 960 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 961 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 962 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 963 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 964 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 965 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 966 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 967 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 968 969 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 970 if (ret) 971 dev_err(&hdev->pdev->dev, 972 "Configure rss input fail, status = %d\n", ret); 973 return ret; 974 } 975 976 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 977 { 978 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 979 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 980 981 return rss_cfg->rss_size; 982 } 983 984 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 985 int vector_id, 986 struct hnae3_ring_chain_node *ring_chain) 987 { 988 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 989 struct hnae3_ring_chain_node *node; 990 struct hclge_mbx_vf_to_pf_cmd *req; 991 struct hclgevf_desc desc; 992 int i = 0; 993 int status; 994 u8 type; 995 996 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 997 type = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 998 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 999 1000 for (node = ring_chain; node; node = node->next) { 1001 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1002 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; 1003 1004 if (i == 0) { 1005 hclgevf_cmd_setup_basic_desc(&desc, 1006 HCLGEVF_OPC_MBX_VF_TO_PF, 1007 false); 1008 req->msg[0] = type; 1009 req->msg[1] = vector_id; 1010 } 1011 1012 req->msg[idx_offset] = 1013 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1014 req->msg[idx_offset + 1] = node->tqp_index; 1015 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, 1016 HNAE3_RING_GL_IDX_M, 1017 HNAE3_RING_GL_IDX_S); 1018 1019 i++; 1020 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - 1021 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 1022 HCLGE_MBX_RING_NODE_VARIABLE_NUM) || 1023 !node->next) { 1024 req->msg[2] = i; 1025 1026 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1027 if (status) { 1028 dev_err(&hdev->pdev->dev, 1029 "Map TQP fail, status is %d.\n", 1030 status); 1031 return status; 1032 } 1033 i = 0; 1034 hclgevf_cmd_setup_basic_desc(&desc, 1035 HCLGEVF_OPC_MBX_VF_TO_PF, 1036 false); 1037 req->msg[0] = type; 1038 req->msg[1] = vector_id; 1039 } 1040 } 1041 1042 return 0; 1043 } 1044 1045 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1046 struct hnae3_ring_chain_node *ring_chain) 1047 { 1048 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1049 int vector_id; 1050 1051 vector_id = hclgevf_get_vector_index(hdev, vector); 1052 if (vector_id < 0) { 1053 dev_err(&handle->pdev->dev, 1054 "Get vector index fail. ret =%d\n", vector_id); 1055 return vector_id; 1056 } 1057 1058 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1059 } 1060 1061 static int hclgevf_unmap_ring_from_vector( 1062 struct hnae3_handle *handle, 1063 int vector, 1064 struct hnae3_ring_chain_node *ring_chain) 1065 { 1066 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1067 int ret, vector_id; 1068 1069 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1070 return 0; 1071 1072 vector_id = hclgevf_get_vector_index(hdev, vector); 1073 if (vector_id < 0) { 1074 dev_err(&handle->pdev->dev, 1075 "Get vector index fail. ret =%d\n", vector_id); 1076 return vector_id; 1077 } 1078 1079 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1080 if (ret) 1081 dev_err(&handle->pdev->dev, 1082 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1083 vector_id, 1084 ret); 1085 1086 return ret; 1087 } 1088 1089 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1090 { 1091 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1092 int vector_id; 1093 1094 vector_id = hclgevf_get_vector_index(hdev, vector); 1095 if (vector_id < 0) { 1096 dev_err(&handle->pdev->dev, 1097 "hclgevf_put_vector get vector index fail. ret =%d\n", 1098 vector_id); 1099 return vector_id; 1100 } 1101 1102 hclgevf_free_vector(hdev, vector_id); 1103 1104 return 0; 1105 } 1106 1107 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1108 bool en_uc_pmc, bool en_mc_pmc, 1109 bool en_bc_pmc) 1110 { 1111 struct hclge_mbx_vf_to_pf_cmd *req; 1112 struct hclgevf_desc desc; 1113 int ret; 1114 1115 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; 1116 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); 1117 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; 1118 req->msg[1] = en_bc_pmc ? 1 : 0; 1119 req->msg[2] = en_uc_pmc ? 1 : 0; 1120 req->msg[3] = en_mc_pmc ? 1 : 0; 1121 1122 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1123 if (ret) 1124 dev_err(&hdev->pdev->dev, 1125 "Set promisc mode fail, status is %d.\n", ret); 1126 1127 return ret; 1128 } 1129 1130 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 1131 bool en_mc_pmc) 1132 { 1133 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1134 struct pci_dev *pdev = hdev->pdev; 1135 bool en_bc_pmc; 1136 1137 en_bc_pmc = pdev->revision != 0x20; 1138 1139 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 1140 en_bc_pmc); 1141 } 1142 1143 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, 1144 int stream_id, bool enable) 1145 { 1146 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1147 struct hclgevf_desc desc; 1148 int status; 1149 1150 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1151 1152 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1153 false); 1154 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1155 req->stream_id = cpu_to_le16(stream_id); 1156 if (enable) 1157 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1158 1159 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1160 if (status) 1161 dev_err(&hdev->pdev->dev, 1162 "TQP enable fail, status =%d.\n", status); 1163 1164 return status; 1165 } 1166 1167 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1168 { 1169 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1170 struct hclgevf_tqp *tqp; 1171 int i; 1172 1173 for (i = 0; i < kinfo->num_tqps; i++) { 1174 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1175 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1176 } 1177 } 1178 1179 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 1180 { 1181 u8 host_mac[ETH_ALEN]; 1182 int status; 1183 1184 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MAC_ADDR, 0, NULL, 0, 1185 true, host_mac, ETH_ALEN); 1186 if (status) { 1187 dev_err(&hdev->pdev->dev, 1188 "fail to get VF MAC from host %d", status); 1189 return status; 1190 } 1191 1192 ether_addr_copy(p, host_mac); 1193 1194 return 0; 1195 } 1196 1197 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1198 { 1199 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1200 u8 host_mac_addr[ETH_ALEN]; 1201 1202 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 1203 return; 1204 1205 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 1206 if (hdev->has_pf_mac) 1207 ether_addr_copy(p, host_mac_addr); 1208 else 1209 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1210 } 1211 1212 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1213 bool is_first) 1214 { 1215 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1216 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1217 u8 *new_mac_addr = (u8 *)p; 1218 u8 msg_data[ETH_ALEN * 2]; 1219 u16 subcode; 1220 int status; 1221 1222 ether_addr_copy(msg_data, new_mac_addr); 1223 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); 1224 1225 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : 1226 HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1227 1228 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1229 subcode, msg_data, sizeof(msg_data), 1230 true, NULL, 0); 1231 if (!status) 1232 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1233 1234 return status; 1235 } 1236 1237 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1238 const unsigned char *addr) 1239 { 1240 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1241 1242 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1243 HCLGE_MBX_MAC_VLAN_UC_ADD, 1244 addr, ETH_ALEN, false, NULL, 0); 1245 } 1246 1247 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1248 const unsigned char *addr) 1249 { 1250 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1251 1252 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, 1253 HCLGE_MBX_MAC_VLAN_UC_REMOVE, 1254 addr, ETH_ALEN, false, NULL, 0); 1255 } 1256 1257 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1258 const unsigned char *addr) 1259 { 1260 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1261 1262 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1263 HCLGE_MBX_MAC_VLAN_MC_ADD, 1264 addr, ETH_ALEN, false, NULL, 0); 1265 } 1266 1267 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1268 const unsigned char *addr) 1269 { 1270 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1271 1272 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, 1273 HCLGE_MBX_MAC_VLAN_MC_REMOVE, 1274 addr, ETH_ALEN, false, NULL, 0); 1275 } 1276 1277 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1278 __be16 proto, u16 vlan_id, 1279 bool is_kill) 1280 { 1281 #define HCLGEVF_VLAN_MBX_MSG_LEN 5 1282 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1283 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; 1284 int ret; 1285 1286 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1287 return -EINVAL; 1288 1289 if (proto != htons(ETH_P_8021Q)) 1290 return -EPROTONOSUPPORT; 1291 1292 /* When device is resetting, firmware is unable to handle 1293 * mailbox. Just record the vlan id, and remove it after 1294 * reset finished. 1295 */ 1296 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) { 1297 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1298 return -EBUSY; 1299 } 1300 1301 msg_data[0] = is_kill; 1302 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); 1303 memcpy(&msg_data[3], &proto, sizeof(proto)); 1304 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1305 HCLGE_MBX_VLAN_FILTER, msg_data, 1306 HCLGEVF_VLAN_MBX_MSG_LEN, true, NULL, 0); 1307 1308 /* when remove hw vlan filter failed, record the vlan id, 1309 * and try to remove it from hw later, to be consistence 1310 * with stack. 1311 */ 1312 if (is_kill && ret) 1313 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1314 1315 return ret; 1316 } 1317 1318 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1319 { 1320 #define HCLGEVF_MAX_SYNC_COUNT 60 1321 struct hnae3_handle *handle = &hdev->nic; 1322 int ret, sync_cnt = 0; 1323 u16 vlan_id; 1324 1325 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1326 while (vlan_id != VLAN_N_VID) { 1327 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1328 vlan_id, true); 1329 if (ret) 1330 return; 1331 1332 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1333 sync_cnt++; 1334 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1335 return; 1336 1337 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1338 } 1339 } 1340 1341 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1342 { 1343 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1344 u8 msg_data; 1345 1346 msg_data = enable ? 1 : 0; 1347 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 1348 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, 1349 1, false, NULL, 0); 1350 } 1351 1352 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1353 { 1354 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1355 u8 msg_data[2]; 1356 int ret; 1357 1358 memcpy(msg_data, &queue_id, sizeof(queue_id)); 1359 1360 /* disable vf queue before send queue reset msg to PF */ 1361 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1362 if (ret) 1363 return ret; 1364 1365 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 1366 sizeof(msg_data), true, NULL, 0); 1367 } 1368 1369 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1370 { 1371 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1372 1373 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu, 1374 sizeof(new_mtu), true, NULL, 0); 1375 } 1376 1377 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1378 enum hnae3_reset_notify_type type) 1379 { 1380 struct hnae3_client *client = hdev->nic_client; 1381 struct hnae3_handle *handle = &hdev->nic; 1382 int ret; 1383 1384 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1385 !client) 1386 return 0; 1387 1388 if (!client->ops->reset_notify) 1389 return -EOPNOTSUPP; 1390 1391 ret = client->ops->reset_notify(handle, type); 1392 if (ret) 1393 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1394 type, ret); 1395 1396 return ret; 1397 } 1398 1399 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 1400 { 1401 struct hclgevf_dev *hdev = ae_dev->priv; 1402 1403 set_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1404 } 1405 1406 static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev, 1407 unsigned long delay_us, 1408 unsigned long wait_cnt) 1409 { 1410 unsigned long cnt = 0; 1411 1412 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && 1413 cnt++ < wait_cnt) 1414 usleep_range(delay_us, delay_us * 2); 1415 1416 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { 1417 dev_err(&hdev->pdev->dev, 1418 "flr wait timeout\n"); 1419 return -ETIMEDOUT; 1420 } 1421 1422 return 0; 1423 } 1424 1425 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1426 { 1427 #define HCLGEVF_RESET_WAIT_US 20000 1428 #define HCLGEVF_RESET_WAIT_CNT 2000 1429 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1430 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1431 1432 u32 val; 1433 int ret; 1434 1435 if (hdev->reset_type == HNAE3_FLR_RESET) 1436 return hclgevf_flr_poll_timeout(hdev, 1437 HCLGEVF_RESET_WAIT_US, 1438 HCLGEVF_RESET_WAIT_CNT); 1439 else if (hdev->reset_type == HNAE3_VF_RESET) 1440 ret = readl_poll_timeout(hdev->hw.io_base + 1441 HCLGEVF_VF_RST_ING, val, 1442 !(val & HCLGEVF_VF_RST_ING_BIT), 1443 HCLGEVF_RESET_WAIT_US, 1444 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1445 else 1446 ret = readl_poll_timeout(hdev->hw.io_base + 1447 HCLGEVF_RST_ING, val, 1448 !(val & HCLGEVF_RST_ING_BITS), 1449 HCLGEVF_RESET_WAIT_US, 1450 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1451 1452 /* hardware completion status should be available by this time */ 1453 if (ret) { 1454 dev_err(&hdev->pdev->dev, 1455 "could'nt get reset done status from h/w, timeout!\n"); 1456 return ret; 1457 } 1458 1459 /* we will wait a bit more to let reset of the stack to complete. This 1460 * might happen in case reset assertion was made by PF. Yes, this also 1461 * means we might end up waiting bit more even for VF reset. 1462 */ 1463 msleep(5000); 1464 1465 return 0; 1466 } 1467 1468 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 1469 { 1470 u32 reg_val; 1471 1472 reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); 1473 if (enable) 1474 reg_val |= HCLGEVF_NIC_SW_RST_RDY; 1475 else 1476 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 1477 1478 hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 1479 reg_val); 1480 } 1481 1482 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1483 { 1484 int ret; 1485 1486 /* uninitialize the nic client */ 1487 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1488 if (ret) 1489 return ret; 1490 1491 /* re-initialize the hclge device */ 1492 ret = hclgevf_reset_hdev(hdev); 1493 if (ret) { 1494 dev_err(&hdev->pdev->dev, 1495 "hclge device re-init failed, VF is disabled!\n"); 1496 return ret; 1497 } 1498 1499 /* bring up the nic client again */ 1500 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1501 if (ret) 1502 return ret; 1503 1504 ret = hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); 1505 if (ret) 1506 return ret; 1507 1508 /* clear handshake status with IMP */ 1509 hclgevf_reset_handshake(hdev, false); 1510 1511 return 0; 1512 } 1513 1514 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1515 { 1516 #define HCLGEVF_RESET_SYNC_TIME 100 1517 1518 int ret = 0; 1519 1520 switch (hdev->reset_type) { 1521 case HNAE3_VF_FUNC_RESET: 1522 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 1523 0, true, NULL, sizeof(u8)); 1524 hdev->rst_stats.vf_func_rst_cnt++; 1525 break; 1526 case HNAE3_FLR_RESET: 1527 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1528 hdev->rst_stats.flr_rst_cnt++; 1529 break; 1530 default: 1531 break; 1532 } 1533 1534 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1535 /* inform hardware that preparatory work is done */ 1536 msleep(HCLGEVF_RESET_SYNC_TIME); 1537 hclgevf_reset_handshake(hdev, true); 1538 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", 1539 hdev->reset_type, ret); 1540 1541 return ret; 1542 } 1543 1544 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1545 { 1546 /* recover handshake status with IMP when reset fail */ 1547 hclgevf_reset_handshake(hdev, true); 1548 hdev->rst_stats.rst_fail_cnt++; 1549 dev_err(&hdev->pdev->dev, "failed to reset VF(%d)\n", 1550 hdev->rst_stats.rst_fail_cnt); 1551 1552 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1553 set_bit(hdev->reset_type, &hdev->reset_pending); 1554 1555 if (hclgevf_is_reset_pending(hdev)) { 1556 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1557 hclgevf_reset_task_schedule(hdev); 1558 } 1559 } 1560 1561 static int hclgevf_reset(struct hclgevf_dev *hdev) 1562 { 1563 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1564 int ret; 1565 1566 /* Initialize ae_dev reset status as well, in case enet layer wants to 1567 * know if device is undergoing reset 1568 */ 1569 ae_dev->reset_type = hdev->reset_type; 1570 hdev->rst_stats.rst_cnt++; 1571 rtnl_lock(); 1572 1573 /* bring down the nic to stop any ongoing TX/RX */ 1574 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1575 if (ret) 1576 goto err_reset_lock; 1577 1578 rtnl_unlock(); 1579 1580 ret = hclgevf_reset_prepare_wait(hdev); 1581 if (ret) 1582 goto err_reset; 1583 1584 /* check if VF could successfully fetch the hardware reset completion 1585 * status from the hardware 1586 */ 1587 ret = hclgevf_reset_wait(hdev); 1588 if (ret) { 1589 /* can't do much in this situation, will disable VF */ 1590 dev_err(&hdev->pdev->dev, 1591 "VF failed(=%d) to fetch H/W reset completion status\n", 1592 ret); 1593 goto err_reset; 1594 } 1595 1596 hdev->rst_stats.hw_rst_done_cnt++; 1597 1598 rtnl_lock(); 1599 1600 /* now, re-initialize the nic client and ae device */ 1601 ret = hclgevf_reset_stack(hdev); 1602 if (ret) { 1603 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1604 goto err_reset_lock; 1605 } 1606 1607 /* bring up the nic to enable TX/RX again */ 1608 ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1609 if (ret) 1610 goto err_reset_lock; 1611 1612 rtnl_unlock(); 1613 1614 hdev->last_reset_time = jiffies; 1615 ae_dev->reset_type = HNAE3_NONE_RESET; 1616 hdev->rst_stats.rst_done_cnt++; 1617 hdev->rst_stats.rst_fail_cnt = 0; 1618 1619 return ret; 1620 err_reset_lock: 1621 rtnl_unlock(); 1622 err_reset: 1623 hclgevf_reset_err_handle(hdev); 1624 1625 return ret; 1626 } 1627 1628 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 1629 unsigned long *addr) 1630 { 1631 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1632 1633 /* return the highest priority reset level amongst all */ 1634 if (test_bit(HNAE3_VF_RESET, addr)) { 1635 rst_level = HNAE3_VF_RESET; 1636 clear_bit(HNAE3_VF_RESET, addr); 1637 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1638 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1639 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1640 rst_level = HNAE3_VF_FULL_RESET; 1641 clear_bit(HNAE3_VF_FULL_RESET, addr); 1642 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1643 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1644 rst_level = HNAE3_VF_PF_FUNC_RESET; 1645 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1646 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1647 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1648 rst_level = HNAE3_VF_FUNC_RESET; 1649 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1650 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1651 rst_level = HNAE3_FLR_RESET; 1652 clear_bit(HNAE3_FLR_RESET, addr); 1653 } 1654 1655 return rst_level; 1656 } 1657 1658 static void hclgevf_reset_event(struct pci_dev *pdev, 1659 struct hnae3_handle *handle) 1660 { 1661 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1662 struct hclgevf_dev *hdev = ae_dev->priv; 1663 1664 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 1665 1666 if (hdev->default_reset_request) 1667 hdev->reset_level = 1668 hclgevf_get_reset_level(hdev, 1669 &hdev->default_reset_request); 1670 else 1671 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1672 1673 /* reset of this VF requested */ 1674 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1675 hclgevf_reset_task_schedule(hdev); 1676 1677 hdev->last_reset_time = jiffies; 1678 } 1679 1680 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1681 enum hnae3_reset_type rst_type) 1682 { 1683 struct hclgevf_dev *hdev = ae_dev->priv; 1684 1685 set_bit(rst_type, &hdev->default_reset_request); 1686 } 1687 1688 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 1689 { 1690 #define HCLGEVF_FLR_WAIT_MS 100 1691 #define HCLGEVF_FLR_WAIT_CNT 50 1692 struct hclgevf_dev *hdev = ae_dev->priv; 1693 int cnt = 0; 1694 1695 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 1696 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); 1697 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); 1698 hclgevf_reset_event(hdev->pdev, NULL); 1699 1700 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && 1701 cnt++ < HCLGEVF_FLR_WAIT_CNT) 1702 msleep(HCLGEVF_FLR_WAIT_MS); 1703 1704 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) 1705 dev_err(&hdev->pdev->dev, 1706 "flr wait down timeout: %d\n", cnt); 1707 } 1708 1709 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1710 { 1711 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1712 1713 return hdev->fw_version; 1714 } 1715 1716 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1717 { 1718 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1719 1720 vector->vector_irq = pci_irq_vector(hdev->pdev, 1721 HCLGEVF_MISC_VECTOR_NUM); 1722 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1723 /* vector status always valid for Vector 0 */ 1724 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1725 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1726 1727 hdev->num_msi_left -= 1; 1728 hdev->num_msi_used += 1; 1729 } 1730 1731 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1732 { 1733 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && 1734 !test_bit(HCLGEVF_STATE_REMOVING, &hdev->state)) { 1735 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1736 schedule_work(&hdev->rst_service_task); 1737 } 1738 } 1739 1740 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1741 { 1742 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && 1743 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { 1744 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1745 schedule_work(&hdev->mbx_service_task); 1746 } 1747 } 1748 1749 static void hclgevf_task_schedule(struct hclgevf_dev *hdev) 1750 { 1751 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && 1752 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) 1753 schedule_work(&hdev->service_task); 1754 } 1755 1756 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) 1757 { 1758 /* if we have any pending mailbox event then schedule the mbx task */ 1759 if (hdev->mbx_event_pending) 1760 hclgevf_mbx_task_schedule(hdev); 1761 1762 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) 1763 hclgevf_reset_task_schedule(hdev); 1764 } 1765 1766 static void hclgevf_service_timer(struct timer_list *t) 1767 { 1768 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); 1769 1770 mod_timer(&hdev->service_timer, jiffies + 1771 HCLGEVF_GENERAL_TASK_INTERVAL * HZ); 1772 1773 hdev->stats_timer++; 1774 hclgevf_task_schedule(hdev); 1775 } 1776 1777 static void hclgevf_reset_service_task(struct work_struct *work) 1778 { 1779 struct hclgevf_dev *hdev = 1780 container_of(work, struct hclgevf_dev, rst_service_task); 1781 int ret; 1782 1783 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1784 return; 1785 1786 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); 1787 1788 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1789 &hdev->reset_state)) { 1790 /* PF has initmated that it is about to reset the hardware. 1791 * We now have to poll & check if hardware has actually 1792 * completed the reset sequence. On hardware reset completion, 1793 * VF needs to reset the client and ae device. 1794 */ 1795 hdev->reset_attempts = 0; 1796 1797 hdev->last_reset_time = jiffies; 1798 while ((hdev->reset_type = 1799 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 1800 != HNAE3_NONE_RESET) { 1801 ret = hclgevf_reset(hdev); 1802 if (ret) 1803 dev_err(&hdev->pdev->dev, 1804 "VF stack reset failed %d.\n", ret); 1805 } 1806 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1807 &hdev->reset_state)) { 1808 /* we could be here when either of below happens: 1809 * 1. reset was initiated due to watchdog timeout caused by 1810 * a. IMP was earlier reset and our TX got choked down and 1811 * which resulted in watchdog reacting and inducing VF 1812 * reset. This also means our cmdq would be unreliable. 1813 * b. problem in TX due to other lower layer(example link 1814 * layer not functioning properly etc.) 1815 * 2. VF reset might have been initiated due to some config 1816 * change. 1817 * 1818 * NOTE: Theres no clear way to detect above cases than to react 1819 * to the response of PF for this reset request. PF will ack the 1820 * 1b and 2. cases but we will not get any intimation about 1a 1821 * from PF as cmdq would be in unreliable state i.e. mailbox 1822 * communication between PF and VF would be broken. 1823 * 1824 * if we are never geting into pending state it means either: 1825 * 1. PF is not receiving our request which could be due to IMP 1826 * reset 1827 * 2. PF is screwed 1828 * We cannot do much for 2. but to check first we can try reset 1829 * our PCIe + stack and see if it alleviates the problem. 1830 */ 1831 if (hdev->reset_attempts > 3) { 1832 /* prepare for full reset of stack + pcie interface */ 1833 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1834 1835 /* "defer" schedule the reset task again */ 1836 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1837 } else { 1838 hdev->reset_attempts++; 1839 1840 set_bit(hdev->reset_level, &hdev->reset_pending); 1841 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1842 } 1843 hclgevf_reset_task_schedule(hdev); 1844 } 1845 1846 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1847 } 1848 1849 static void hclgevf_mailbox_service_task(struct work_struct *work) 1850 { 1851 struct hclgevf_dev *hdev; 1852 1853 hdev = container_of(work, struct hclgevf_dev, mbx_service_task); 1854 1855 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1856 return; 1857 1858 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 1859 1860 hclgevf_mbx_async_handler(hdev); 1861 1862 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1863 } 1864 1865 static void hclgevf_keep_alive_timer(struct timer_list *t) 1866 { 1867 struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); 1868 1869 schedule_work(&hdev->keep_alive_task); 1870 mod_timer(&hdev->keep_alive_timer, jiffies + 1871 HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ); 1872 } 1873 1874 static void hclgevf_keep_alive_task(struct work_struct *work) 1875 { 1876 struct hclgevf_dev *hdev; 1877 u8 respmsg; 1878 int ret; 1879 1880 hdev = container_of(work, struct hclgevf_dev, keep_alive_task); 1881 1882 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) 1883 return; 1884 1885 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, 1886 0, false, &respmsg, sizeof(respmsg)); 1887 if (ret) 1888 dev_err(&hdev->pdev->dev, 1889 "VF sends keep alive cmd failed(=%d)\n", ret); 1890 } 1891 1892 static void hclgevf_service_task(struct work_struct *work) 1893 { 1894 struct hnae3_handle *handle; 1895 struct hclgevf_dev *hdev; 1896 1897 hdev = container_of(work, struct hclgevf_dev, service_task); 1898 handle = &hdev->nic; 1899 1900 if (hdev->stats_timer >= HCLGEVF_STATS_TIMER_INTERVAL) { 1901 hclgevf_tqps_update_stats(handle); 1902 hdev->stats_timer = 0; 1903 } 1904 1905 /* request the link status from the PF. PF would be able to tell VF 1906 * about such updates in future so we might remove this later 1907 */ 1908 hclgevf_request_link_info(hdev); 1909 1910 hclgevf_update_link_mode(hdev); 1911 1912 hclgevf_sync_vlan_filter(hdev); 1913 1914 hclgevf_deferred_task_schedule(hdev); 1915 1916 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 1917 } 1918 1919 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1920 { 1921 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 1922 } 1923 1924 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1925 u32 *clearval) 1926 { 1927 u32 val, cmdq_stat_reg, rst_ing_reg; 1928 1929 /* fetch the events from their corresponding regs */ 1930 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 1931 HCLGEVF_VECTOR0_CMDQ_STAT_REG); 1932 1933 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 1934 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1935 dev_info(&hdev->pdev->dev, 1936 "receive reset interrupt 0x%x!\n", rst_ing_reg); 1937 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1938 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1939 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1940 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 1941 hdev->rst_stats.vf_rst_cnt++; 1942 /* set up VF hardware reset status, its PF will clear 1943 * this status when PF has initialized done. 1944 */ 1945 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 1946 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 1947 val | HCLGEVF_VF_RST_ING_BIT); 1948 return HCLGEVF_VECTOR0_EVENT_RST; 1949 } 1950 1951 /* check for vector0 mailbox(=CMDQ RX) event source */ 1952 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 1953 /* for revision 0x21, clearing interrupt is writing bit 0 1954 * to the clear register, writing bit 1 means to keep the 1955 * old value. 1956 * for revision 0x20, the clear register is a read & write 1957 * register, so we should just write 0 to the bit we are 1958 * handling, and keep other bits as cmdq_stat_reg. 1959 */ 1960 if (hdev->pdev->revision >= 0x21) 1961 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1962 else 1963 *clearval = cmdq_stat_reg & 1964 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 1965 1966 return HCLGEVF_VECTOR0_EVENT_MBX; 1967 } 1968 1969 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); 1970 1971 return HCLGEVF_VECTOR0_EVENT_OTHER; 1972 } 1973 1974 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1975 { 1976 writel(en ? 1 : 0, vector->addr); 1977 } 1978 1979 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 1980 { 1981 enum hclgevf_evt_cause event_cause; 1982 struct hclgevf_dev *hdev = data; 1983 u32 clearval; 1984 1985 hclgevf_enable_vector(&hdev->misc_vector, false); 1986 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 1987 1988 switch (event_cause) { 1989 case HCLGEVF_VECTOR0_EVENT_RST: 1990 hclgevf_reset_task_schedule(hdev); 1991 break; 1992 case HCLGEVF_VECTOR0_EVENT_MBX: 1993 hclgevf_mbx_handler(hdev); 1994 break; 1995 default: 1996 break; 1997 } 1998 1999 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 2000 hclgevf_clear_event_cause(hdev, clearval); 2001 hclgevf_enable_vector(&hdev->misc_vector, true); 2002 } 2003 2004 return IRQ_HANDLED; 2005 } 2006 2007 static int hclgevf_configure(struct hclgevf_dev *hdev) 2008 { 2009 int ret; 2010 2011 /* get current port based vlan state from PF */ 2012 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2013 if (ret) 2014 return ret; 2015 2016 /* get queue configuration from PF */ 2017 ret = hclgevf_get_queue_info(hdev); 2018 if (ret) 2019 return ret; 2020 2021 /* get queue depth info from PF */ 2022 ret = hclgevf_get_queue_depth(hdev); 2023 if (ret) 2024 return ret; 2025 2026 ret = hclgevf_get_pf_media_type(hdev); 2027 if (ret) 2028 return ret; 2029 2030 /* get tc configuration from PF */ 2031 return hclgevf_get_tc_info(hdev); 2032 } 2033 2034 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 2035 { 2036 struct pci_dev *pdev = ae_dev->pdev; 2037 struct hclgevf_dev *hdev; 2038 2039 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 2040 if (!hdev) 2041 return -ENOMEM; 2042 2043 hdev->pdev = pdev; 2044 hdev->ae_dev = ae_dev; 2045 ae_dev->priv = hdev; 2046 2047 return 0; 2048 } 2049 2050 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2051 { 2052 struct hnae3_handle *roce = &hdev->roce; 2053 struct hnae3_handle *nic = &hdev->nic; 2054 2055 roce->rinfo.num_vectors = hdev->num_roce_msix; 2056 2057 if (hdev->num_msi_left < roce->rinfo.num_vectors || 2058 hdev->num_msi_left == 0) 2059 return -EINVAL; 2060 2061 roce->rinfo.base_vector = hdev->roce_base_vector; 2062 2063 roce->rinfo.netdev = nic->kinfo.netdev; 2064 roce->rinfo.roce_io_base = hdev->hw.io_base; 2065 2066 roce->pdev = nic->pdev; 2067 roce->ae_algo = nic->ae_algo; 2068 roce->numa_node_mask = nic->numa_node_mask; 2069 2070 return 0; 2071 } 2072 2073 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 2074 { 2075 struct hclgevf_cfg_gro_status_cmd *req; 2076 struct hclgevf_desc desc; 2077 int ret; 2078 2079 if (!hnae3_dev_gro_supported(hdev)) 2080 return 0; 2081 2082 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2083 false); 2084 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2085 2086 req->gro_en = cpu_to_le16(en ? 1 : 0); 2087 2088 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2089 if (ret) 2090 dev_err(&hdev->pdev->dev, 2091 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2092 2093 return ret; 2094 } 2095 2096 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2097 { 2098 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2099 int ret; 2100 u32 i; 2101 2102 rss_cfg->rss_size = hdev->nic.kinfo.rss_size; 2103 2104 if (hdev->pdev->revision >= 0x21) { 2105 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 2106 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2107 HCLGEVF_RSS_KEY_SIZE); 2108 2109 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 2110 rss_cfg->rss_hash_key); 2111 if (ret) 2112 return ret; 2113 2114 rss_cfg->rss_tuple_sets.ipv4_tcp_en = 2115 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2116 rss_cfg->rss_tuple_sets.ipv4_udp_en = 2117 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2118 rss_cfg->rss_tuple_sets.ipv4_sctp_en = 2119 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2120 rss_cfg->rss_tuple_sets.ipv4_fragment_en = 2121 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2122 rss_cfg->rss_tuple_sets.ipv6_tcp_en = 2123 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2124 rss_cfg->rss_tuple_sets.ipv6_udp_en = 2125 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2126 rss_cfg->rss_tuple_sets.ipv6_sctp_en = 2127 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2128 rss_cfg->rss_tuple_sets.ipv6_fragment_en = 2129 HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2130 2131 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2132 if (ret) 2133 return ret; 2134 2135 } 2136 2137 /* Initialize RSS indirect table */ 2138 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 2139 rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 2140 2141 ret = hclgevf_set_rss_indir_table(hdev); 2142 if (ret) 2143 return ret; 2144 2145 return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); 2146 } 2147 2148 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2149 { 2150 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2151 false); 2152 } 2153 2154 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2155 { 2156 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2157 2158 if (enable) { 2159 mod_timer(&hdev->service_timer, jiffies + HZ); 2160 } else { 2161 del_timer_sync(&hdev->service_timer); 2162 cancel_work_sync(&hdev->service_task); 2163 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 2164 } 2165 } 2166 2167 static int hclgevf_ae_start(struct hnae3_handle *handle) 2168 { 2169 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2170 2171 hclgevf_reset_tqp_stats(handle); 2172 2173 hclgevf_request_link_info(hdev); 2174 2175 hclgevf_update_link_mode(hdev); 2176 2177 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2178 2179 return 0; 2180 } 2181 2182 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2183 { 2184 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2185 int i; 2186 2187 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2188 2189 if (hdev->reset_type != HNAE3_VF_RESET) 2190 for (i = 0; i < handle->kinfo.num_tqps; i++) 2191 if (hclgevf_reset_tqp(handle, i)) 2192 break; 2193 2194 hclgevf_reset_tqp_stats(handle); 2195 hclgevf_update_link_status(hdev, 0); 2196 } 2197 2198 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2199 { 2200 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2201 u8 msg_data; 2202 2203 msg_data = alive ? 1 : 0; 2204 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE, 2205 0, &msg_data, 1, false, NULL, 0); 2206 } 2207 2208 static int hclgevf_client_start(struct hnae3_handle *handle) 2209 { 2210 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2211 int ret; 2212 2213 ret = hclgevf_set_alive(handle, true); 2214 if (ret) 2215 return ret; 2216 2217 mod_timer(&hdev->keep_alive_timer, jiffies + 2218 HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ); 2219 2220 return 0; 2221 } 2222 2223 static void hclgevf_client_stop(struct hnae3_handle *handle) 2224 { 2225 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2226 int ret; 2227 2228 ret = hclgevf_set_alive(handle, false); 2229 if (ret) 2230 dev_warn(&hdev->pdev->dev, 2231 "%s failed %d\n", __func__, ret); 2232 2233 del_timer_sync(&hdev->keep_alive_timer); 2234 cancel_work_sync(&hdev->keep_alive_task); 2235 } 2236 2237 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2238 { 2239 /* setup tasks for the MBX */ 2240 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); 2241 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2242 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2243 2244 /* setup tasks for service timer */ 2245 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); 2246 2247 INIT_WORK(&hdev->service_task, hclgevf_service_task); 2248 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); 2249 2250 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); 2251 2252 mutex_init(&hdev->mbx_resp.mbx_mutex); 2253 2254 /* bring the device down */ 2255 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2256 } 2257 2258 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2259 { 2260 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2261 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2262 2263 if (hdev->keep_alive_timer.function) 2264 del_timer_sync(&hdev->keep_alive_timer); 2265 if (hdev->keep_alive_task.func) 2266 cancel_work_sync(&hdev->keep_alive_task); 2267 if (hdev->service_timer.function) 2268 del_timer_sync(&hdev->service_timer); 2269 if (hdev->service_task.func) 2270 cancel_work_sync(&hdev->service_task); 2271 if (hdev->mbx_service_task.func) 2272 cancel_work_sync(&hdev->mbx_service_task); 2273 if (hdev->rst_service_task.func) 2274 cancel_work_sync(&hdev->rst_service_task); 2275 2276 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2277 } 2278 2279 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2280 { 2281 struct pci_dev *pdev = hdev->pdev; 2282 int vectors; 2283 int i; 2284 2285 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) 2286 vectors = pci_alloc_irq_vectors(pdev, 2287 hdev->roce_base_msix_offset + 1, 2288 hdev->num_msi, 2289 PCI_IRQ_MSIX); 2290 else 2291 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 2292 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2293 2294 if (vectors < 0) { 2295 dev_err(&pdev->dev, 2296 "failed(%d) to allocate MSI/MSI-X vectors\n", 2297 vectors); 2298 return vectors; 2299 } 2300 if (vectors < hdev->num_msi) 2301 dev_warn(&hdev->pdev->dev, 2302 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2303 hdev->num_msi, vectors); 2304 2305 hdev->num_msi = vectors; 2306 hdev->num_msi_left = vectors; 2307 hdev->base_msi_vector = pdev->irq; 2308 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2309 2310 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2311 sizeof(u16), GFP_KERNEL); 2312 if (!hdev->vector_status) { 2313 pci_free_irq_vectors(pdev); 2314 return -ENOMEM; 2315 } 2316 2317 for (i = 0; i < hdev->num_msi; i++) 2318 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2319 2320 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2321 sizeof(int), GFP_KERNEL); 2322 if (!hdev->vector_irq) { 2323 devm_kfree(&pdev->dev, hdev->vector_status); 2324 pci_free_irq_vectors(pdev); 2325 return -ENOMEM; 2326 } 2327 2328 return 0; 2329 } 2330 2331 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2332 { 2333 struct pci_dev *pdev = hdev->pdev; 2334 2335 devm_kfree(&pdev->dev, hdev->vector_status); 2336 devm_kfree(&pdev->dev, hdev->vector_irq); 2337 pci_free_irq_vectors(pdev); 2338 } 2339 2340 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2341 { 2342 int ret; 2343 2344 hclgevf_get_misc_vector(hdev); 2345 2346 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2347 0, "hclgevf_cmd", hdev); 2348 if (ret) { 2349 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2350 hdev->misc_vector.vector_irq); 2351 return ret; 2352 } 2353 2354 hclgevf_clear_event_cause(hdev, 0); 2355 2356 /* enable misc. vector(vector 0) */ 2357 hclgevf_enable_vector(&hdev->misc_vector, true); 2358 2359 return ret; 2360 } 2361 2362 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2363 { 2364 /* disable misc vector(vector 0) */ 2365 hclgevf_enable_vector(&hdev->misc_vector, false); 2366 synchronize_irq(hdev->misc_vector.vector_irq); 2367 free_irq(hdev->misc_vector.vector_irq, hdev); 2368 hclgevf_free_vector(hdev, 0); 2369 } 2370 2371 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2372 { 2373 struct device *dev = &hdev->pdev->dev; 2374 2375 dev_info(dev, "VF info begin:\n"); 2376 2377 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps); 2378 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc); 2379 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc); 2380 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport); 2381 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map); 2382 dev_info(dev, "PF media type of this VF: %d\n", 2383 hdev->hw.mac.media_type); 2384 2385 dev_info(dev, "VF info end.\n"); 2386 } 2387 2388 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2389 struct hnae3_client *client) 2390 { 2391 struct hclgevf_dev *hdev = ae_dev->priv; 2392 int ret; 2393 2394 ret = client->ops->init_instance(&hdev->nic); 2395 if (ret) 2396 return ret; 2397 2398 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2399 hnae3_set_client_init_flag(client, ae_dev, 1); 2400 2401 if (netif_msg_drv(&hdev->nic)) 2402 hclgevf_info_show(hdev); 2403 2404 return 0; 2405 } 2406 2407 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2408 struct hnae3_client *client) 2409 { 2410 struct hclgevf_dev *hdev = ae_dev->priv; 2411 int ret; 2412 2413 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2414 !hdev->nic_client) 2415 return 0; 2416 2417 ret = hclgevf_init_roce_base_info(hdev); 2418 if (ret) 2419 return ret; 2420 2421 ret = client->ops->init_instance(&hdev->roce); 2422 if (ret) 2423 return ret; 2424 2425 hnae3_set_client_init_flag(client, ae_dev, 1); 2426 2427 return 0; 2428 } 2429 2430 static int hclgevf_init_client_instance(struct hnae3_client *client, 2431 struct hnae3_ae_dev *ae_dev) 2432 { 2433 struct hclgevf_dev *hdev = ae_dev->priv; 2434 int ret; 2435 2436 switch (client->type) { 2437 case HNAE3_CLIENT_KNIC: 2438 hdev->nic_client = client; 2439 hdev->nic.client = client; 2440 2441 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2442 if (ret) 2443 goto clear_nic; 2444 2445 ret = hclgevf_init_roce_client_instance(ae_dev, 2446 hdev->roce_client); 2447 if (ret) 2448 goto clear_roce; 2449 2450 break; 2451 case HNAE3_CLIENT_ROCE: 2452 if (hnae3_dev_roce_supported(hdev)) { 2453 hdev->roce_client = client; 2454 hdev->roce.client = client; 2455 } 2456 2457 ret = hclgevf_init_roce_client_instance(ae_dev, client); 2458 if (ret) 2459 goto clear_roce; 2460 2461 break; 2462 default: 2463 return -EINVAL; 2464 } 2465 2466 return 0; 2467 2468 clear_nic: 2469 hdev->nic_client = NULL; 2470 hdev->nic.client = NULL; 2471 return ret; 2472 clear_roce: 2473 hdev->roce_client = NULL; 2474 hdev->roce.client = NULL; 2475 return ret; 2476 } 2477 2478 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2479 struct hnae3_ae_dev *ae_dev) 2480 { 2481 struct hclgevf_dev *hdev = ae_dev->priv; 2482 2483 /* un-init roce, if it exists */ 2484 if (hdev->roce_client) { 2485 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2486 hdev->roce_client = NULL; 2487 hdev->roce.client = NULL; 2488 } 2489 2490 /* un-init nic/unic, if this was not called by roce client */ 2491 if (client->ops->uninit_instance && hdev->nic_client && 2492 client->type != HNAE3_CLIENT_ROCE) { 2493 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2494 2495 client->ops->uninit_instance(&hdev->nic, 0); 2496 hdev->nic_client = NULL; 2497 hdev->nic.client = NULL; 2498 } 2499 } 2500 2501 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2502 { 2503 struct pci_dev *pdev = hdev->pdev; 2504 struct hclgevf_hw *hw; 2505 int ret; 2506 2507 ret = pci_enable_device(pdev); 2508 if (ret) { 2509 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2510 return ret; 2511 } 2512 2513 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2514 if (ret) { 2515 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2516 goto err_disable_device; 2517 } 2518 2519 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2520 if (ret) { 2521 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2522 goto err_disable_device; 2523 } 2524 2525 pci_set_master(pdev); 2526 hw = &hdev->hw; 2527 hw->hdev = hdev; 2528 hw->io_base = pci_iomap(pdev, 2, 0); 2529 if (!hw->io_base) { 2530 dev_err(&pdev->dev, "can't map configuration register space\n"); 2531 ret = -ENOMEM; 2532 goto err_clr_master; 2533 } 2534 2535 return 0; 2536 2537 err_clr_master: 2538 pci_clear_master(pdev); 2539 pci_release_regions(pdev); 2540 err_disable_device: 2541 pci_disable_device(pdev); 2542 2543 return ret; 2544 } 2545 2546 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2547 { 2548 struct pci_dev *pdev = hdev->pdev; 2549 2550 pci_iounmap(pdev, hdev->hw.io_base); 2551 pci_clear_master(pdev); 2552 pci_release_regions(pdev); 2553 pci_disable_device(pdev); 2554 } 2555 2556 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2557 { 2558 struct hclgevf_query_res_cmd *req; 2559 struct hclgevf_desc desc; 2560 int ret; 2561 2562 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 2563 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2564 if (ret) { 2565 dev_err(&hdev->pdev->dev, 2566 "query vf resource failed, ret = %d.\n", ret); 2567 return ret; 2568 } 2569 2570 req = (struct hclgevf_query_res_cmd *)desc.data; 2571 2572 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { 2573 hdev->roce_base_msix_offset = 2574 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 2575 HCLGEVF_MSIX_OFT_ROCEE_M, 2576 HCLGEVF_MSIX_OFT_ROCEE_S); 2577 hdev->num_roce_msix = 2578 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2579 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2580 2581 /* VF should have NIC vectors and Roce vectors, NIC vectors 2582 * are queued before Roce vectors. The offset is fixed to 64. 2583 */ 2584 hdev->num_msi = hdev->num_roce_msix + 2585 hdev->roce_base_msix_offset; 2586 } else { 2587 hdev->num_msi = 2588 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), 2589 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2590 } 2591 2592 return 0; 2593 } 2594 2595 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2596 { 2597 struct pci_dev *pdev = hdev->pdev; 2598 int ret = 0; 2599 2600 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 2601 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2602 hclgevf_misc_irq_uninit(hdev); 2603 hclgevf_uninit_msi(hdev); 2604 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2605 } 2606 2607 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2608 pci_set_master(pdev); 2609 ret = hclgevf_init_msi(hdev); 2610 if (ret) { 2611 dev_err(&pdev->dev, 2612 "failed(%d) to init MSI/MSI-X\n", ret); 2613 return ret; 2614 } 2615 2616 ret = hclgevf_misc_irq_init(hdev); 2617 if (ret) { 2618 hclgevf_uninit_msi(hdev); 2619 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2620 ret); 2621 return ret; 2622 } 2623 2624 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2625 } 2626 2627 return ret; 2628 } 2629 2630 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2631 { 2632 struct pci_dev *pdev = hdev->pdev; 2633 int ret; 2634 2635 ret = hclgevf_pci_reset(hdev); 2636 if (ret) { 2637 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2638 return ret; 2639 } 2640 2641 ret = hclgevf_cmd_init(hdev); 2642 if (ret) { 2643 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2644 return ret; 2645 } 2646 2647 ret = hclgevf_rss_init_hw(hdev); 2648 if (ret) { 2649 dev_err(&hdev->pdev->dev, 2650 "failed(%d) to initialize RSS\n", ret); 2651 return ret; 2652 } 2653 2654 ret = hclgevf_config_gro(hdev, true); 2655 if (ret) 2656 return ret; 2657 2658 ret = hclgevf_init_vlan_config(hdev); 2659 if (ret) { 2660 dev_err(&hdev->pdev->dev, 2661 "failed(%d) to initialize VLAN config\n", ret); 2662 return ret; 2663 } 2664 2665 dev_info(&hdev->pdev->dev, "Reset done\n"); 2666 2667 return 0; 2668 } 2669 2670 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2671 { 2672 struct pci_dev *pdev = hdev->pdev; 2673 int ret; 2674 2675 ret = hclgevf_pci_init(hdev); 2676 if (ret) { 2677 dev_err(&pdev->dev, "PCI initialization failed\n"); 2678 return ret; 2679 } 2680 2681 ret = hclgevf_cmd_queue_init(hdev); 2682 if (ret) { 2683 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); 2684 goto err_cmd_queue_init; 2685 } 2686 2687 ret = hclgevf_cmd_init(hdev); 2688 if (ret) 2689 goto err_cmd_init; 2690 2691 /* Get vf resource */ 2692 ret = hclgevf_query_vf_resource(hdev); 2693 if (ret) { 2694 dev_err(&hdev->pdev->dev, 2695 "Query vf status error, ret = %d.\n", ret); 2696 goto err_cmd_init; 2697 } 2698 2699 ret = hclgevf_init_msi(hdev); 2700 if (ret) { 2701 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2702 goto err_cmd_init; 2703 } 2704 2705 hclgevf_state_init(hdev); 2706 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2707 2708 ret = hclgevf_misc_irq_init(hdev); 2709 if (ret) { 2710 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2711 ret); 2712 goto err_misc_irq_init; 2713 } 2714 2715 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2716 2717 ret = hclgevf_configure(hdev); 2718 if (ret) { 2719 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2720 goto err_config; 2721 } 2722 2723 ret = hclgevf_alloc_tqps(hdev); 2724 if (ret) { 2725 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2726 goto err_config; 2727 } 2728 2729 ret = hclgevf_set_handle_info(hdev); 2730 if (ret) { 2731 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); 2732 goto err_config; 2733 } 2734 2735 ret = hclgevf_config_gro(hdev, true); 2736 if (ret) 2737 goto err_config; 2738 2739 /* Initialize RSS for this VF */ 2740 ret = hclgevf_rss_init_hw(hdev); 2741 if (ret) { 2742 dev_err(&hdev->pdev->dev, 2743 "failed(%d) to initialize RSS\n", ret); 2744 goto err_config; 2745 } 2746 2747 ret = hclgevf_init_vlan_config(hdev); 2748 if (ret) { 2749 dev_err(&hdev->pdev->dev, 2750 "failed(%d) to initialize VLAN config\n", ret); 2751 goto err_config; 2752 } 2753 2754 hdev->last_reset_time = jiffies; 2755 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 2756 HCLGEVF_DRIVER_NAME); 2757 2758 return 0; 2759 2760 err_config: 2761 hclgevf_misc_irq_uninit(hdev); 2762 err_misc_irq_init: 2763 hclgevf_state_uninit(hdev); 2764 hclgevf_uninit_msi(hdev); 2765 err_cmd_init: 2766 hclgevf_cmd_uninit(hdev); 2767 err_cmd_queue_init: 2768 hclgevf_pci_uninit(hdev); 2769 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2770 return ret; 2771 } 2772 2773 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 2774 { 2775 hclgevf_state_uninit(hdev); 2776 2777 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2778 hclgevf_misc_irq_uninit(hdev); 2779 hclgevf_uninit_msi(hdev); 2780 } 2781 2782 hclgevf_pci_uninit(hdev); 2783 hclgevf_cmd_uninit(hdev); 2784 } 2785 2786 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 2787 { 2788 struct pci_dev *pdev = ae_dev->pdev; 2789 struct hclgevf_dev *hdev; 2790 int ret; 2791 2792 ret = hclgevf_alloc_hdev(ae_dev); 2793 if (ret) { 2794 dev_err(&pdev->dev, "hclge device allocation failed\n"); 2795 return ret; 2796 } 2797 2798 ret = hclgevf_init_hdev(ae_dev->priv); 2799 if (ret) { 2800 dev_err(&pdev->dev, "hclge device initialization failed\n"); 2801 return ret; 2802 } 2803 2804 hdev = ae_dev->priv; 2805 timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0); 2806 INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task); 2807 2808 return 0; 2809 } 2810 2811 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 2812 { 2813 struct hclgevf_dev *hdev = ae_dev->priv; 2814 2815 hclgevf_uninit_hdev(hdev); 2816 ae_dev->priv = NULL; 2817 } 2818 2819 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 2820 { 2821 struct hnae3_handle *nic = &hdev->nic; 2822 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 2823 2824 return min_t(u32, hdev->rss_size_max, 2825 hdev->num_tqps / kinfo->num_tc); 2826 } 2827 2828 /** 2829 * hclgevf_get_channels - Get the current channels enabled and max supported. 2830 * @handle: hardware information for network interface 2831 * @ch: ethtool channels structure 2832 * 2833 * We don't support separate tx and rx queues as channels. The other count 2834 * represents how many queues are being used for control. max_combined counts 2835 * how many queue pairs we can support. They may not be mapped 1 to 1 with 2836 * q_vectors since we support a lot more queue pairs than q_vectors. 2837 **/ 2838 static void hclgevf_get_channels(struct hnae3_handle *handle, 2839 struct ethtool_channels *ch) 2840 { 2841 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2842 2843 ch->max_combined = hclgevf_get_max_channels(hdev); 2844 ch->other_count = 0; 2845 ch->max_other = 0; 2846 ch->combined_count = handle->kinfo.rss_size; 2847 } 2848 2849 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 2850 u16 *alloc_tqps, u16 *max_rss_size) 2851 { 2852 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2853 2854 *alloc_tqps = hdev->num_tqps; 2855 *max_rss_size = hdev->rss_size_max; 2856 } 2857 2858 static void hclgevf_update_rss_size(struct hnae3_handle *handle, 2859 u32 new_tqps_num) 2860 { 2861 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 2862 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2863 u16 max_rss_size; 2864 2865 kinfo->req_rss_size = new_tqps_num; 2866 2867 max_rss_size = min_t(u16, hdev->rss_size_max, 2868 hdev->num_tqps / kinfo->num_tc); 2869 2870 /* Use the user's configuration when it is not larger than 2871 * max_rss_size, otherwise, use the maximum specification value. 2872 */ 2873 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 2874 kinfo->req_rss_size <= max_rss_size) 2875 kinfo->rss_size = kinfo->req_rss_size; 2876 else if (kinfo->rss_size > max_rss_size || 2877 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 2878 kinfo->rss_size = max_rss_size; 2879 2880 kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; 2881 } 2882 2883 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 2884 bool rxfh_configured) 2885 { 2886 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2887 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 2888 u16 cur_rss_size = kinfo->rss_size; 2889 u16 cur_tqps = kinfo->num_tqps; 2890 u32 *rss_indir; 2891 unsigned int i; 2892 int ret; 2893 2894 hclgevf_update_rss_size(handle, new_tqps_num); 2895 2896 ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); 2897 if (ret) 2898 return ret; 2899 2900 /* RSS indirection table has been configuared by user */ 2901 if (rxfh_configured) 2902 goto out; 2903 2904 /* Reinitializes the rss indirect table according to the new RSS size */ 2905 rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 2906 if (!rss_indir) 2907 return -ENOMEM; 2908 2909 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) 2910 rss_indir[i] = i % kinfo->rss_size; 2911 2912 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 2913 if (ret) 2914 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 2915 ret); 2916 2917 kfree(rss_indir); 2918 2919 out: 2920 if (!ret) 2921 dev_info(&hdev->pdev->dev, 2922 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 2923 cur_rss_size, kinfo->rss_size, 2924 cur_tqps, kinfo->rss_size * kinfo->num_tc); 2925 2926 return ret; 2927 } 2928 2929 static int hclgevf_get_status(struct hnae3_handle *handle) 2930 { 2931 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2932 2933 return hdev->hw.mac.link; 2934 } 2935 2936 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 2937 u8 *auto_neg, u32 *speed, 2938 u8 *duplex) 2939 { 2940 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2941 2942 if (speed) 2943 *speed = hdev->hw.mac.speed; 2944 if (duplex) 2945 *duplex = hdev->hw.mac.duplex; 2946 if (auto_neg) 2947 *auto_neg = AUTONEG_DISABLE; 2948 } 2949 2950 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 2951 u8 duplex) 2952 { 2953 hdev->hw.mac.speed = speed; 2954 hdev->hw.mac.duplex = duplex; 2955 } 2956 2957 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 2958 { 2959 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2960 2961 return hclgevf_config_gro(hdev, enable); 2962 } 2963 2964 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 2965 u8 *module_type) 2966 { 2967 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2968 2969 if (media_type) 2970 *media_type = hdev->hw.mac.media_type; 2971 2972 if (module_type) 2973 *module_type = hdev->hw.mac.module_type; 2974 } 2975 2976 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 2977 { 2978 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2979 2980 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2981 } 2982 2983 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 2984 { 2985 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2986 2987 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2988 } 2989 2990 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 2991 { 2992 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2993 2994 return hdev->rst_stats.hw_rst_done_cnt; 2995 } 2996 2997 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 2998 unsigned long *supported, 2999 unsigned long *advertising) 3000 { 3001 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3002 3003 *supported = hdev->hw.mac.supported; 3004 *advertising = hdev->hw.mac.advertising; 3005 } 3006 3007 #define MAX_SEPARATE_NUM 4 3008 #define SEPARATOR_VALUE 0xFFFFFFFF 3009 #define REG_NUM_PER_LINE 4 3010 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 3011 3012 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 3013 { 3014 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 3015 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3016 3017 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 3018 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 3019 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 3020 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 3021 3022 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 3023 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 3024 } 3025 3026 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 3027 void *data) 3028 { 3029 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3030 int i, j, reg_um, separator_num; 3031 u32 *reg = data; 3032 3033 *version = hdev->fw_version; 3034 3035 /* fetching per-VF registers values from VF PCIe register space */ 3036 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 3037 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3038 for (i = 0; i < reg_um; i++) 3039 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 3040 for (i = 0; i < separator_num; i++) 3041 *reg++ = SEPARATOR_VALUE; 3042 3043 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 3044 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3045 for (i = 0; i < reg_um; i++) 3046 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 3047 for (i = 0; i < separator_num; i++) 3048 *reg++ = SEPARATOR_VALUE; 3049 3050 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 3051 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3052 for (j = 0; j < hdev->num_tqps; j++) { 3053 for (i = 0; i < reg_um; i++) 3054 *reg++ = hclgevf_read_dev(&hdev->hw, 3055 ring_reg_addr_list[i] + 3056 0x200 * j); 3057 for (i = 0; i < separator_num; i++) 3058 *reg++ = SEPARATOR_VALUE; 3059 } 3060 3061 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 3062 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3063 for (j = 0; j < hdev->num_msi_used - 1; j++) { 3064 for (i = 0; i < reg_um; i++) 3065 *reg++ = hclgevf_read_dev(&hdev->hw, 3066 tqp_intr_reg_addr_list[i] + 3067 4 * j); 3068 for (i = 0; i < separator_num; i++) 3069 *reg++ = SEPARATOR_VALUE; 3070 } 3071 } 3072 3073 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 3074 u8 *port_base_vlan_info, u8 data_size) 3075 { 3076 struct hnae3_handle *nic = &hdev->nic; 3077 3078 rtnl_lock(); 3079 hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3080 rtnl_unlock(); 3081 3082 /* send msg to PF and wait update port based vlan info */ 3083 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, 3084 HCLGE_MBX_PORT_BASE_VLAN_CFG, 3085 port_base_vlan_info, data_size, 3086 false, NULL, 0); 3087 3088 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3089 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; 3090 else 3091 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3092 3093 rtnl_lock(); 3094 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 3095 rtnl_unlock(); 3096 } 3097 3098 static const struct hnae3_ae_ops hclgevf_ops = { 3099 .init_ae_dev = hclgevf_init_ae_dev, 3100 .uninit_ae_dev = hclgevf_uninit_ae_dev, 3101 .flr_prepare = hclgevf_flr_prepare, 3102 .flr_done = hclgevf_flr_done, 3103 .init_client_instance = hclgevf_init_client_instance, 3104 .uninit_client_instance = hclgevf_uninit_client_instance, 3105 .start = hclgevf_ae_start, 3106 .stop = hclgevf_ae_stop, 3107 .client_start = hclgevf_client_start, 3108 .client_stop = hclgevf_client_stop, 3109 .map_ring_to_vector = hclgevf_map_ring_to_vector, 3110 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3111 .get_vector = hclgevf_get_vector, 3112 .put_vector = hclgevf_put_vector, 3113 .reset_queue = hclgevf_reset_tqp, 3114 .get_mac_addr = hclgevf_get_mac_addr, 3115 .set_mac_addr = hclgevf_set_mac_addr, 3116 .add_uc_addr = hclgevf_add_uc_addr, 3117 .rm_uc_addr = hclgevf_rm_uc_addr, 3118 .add_mc_addr = hclgevf_add_mc_addr, 3119 .rm_mc_addr = hclgevf_rm_mc_addr, 3120 .get_stats = hclgevf_get_stats, 3121 .update_stats = hclgevf_update_stats, 3122 .get_strings = hclgevf_get_strings, 3123 .get_sset_count = hclgevf_get_sset_count, 3124 .get_rss_key_size = hclgevf_get_rss_key_size, 3125 .get_rss_indir_size = hclgevf_get_rss_indir_size, 3126 .get_rss = hclgevf_get_rss, 3127 .set_rss = hclgevf_set_rss, 3128 .get_rss_tuple = hclgevf_get_rss_tuple, 3129 .set_rss_tuple = hclgevf_set_rss_tuple, 3130 .get_tc_size = hclgevf_get_tc_size, 3131 .get_fw_version = hclgevf_get_fw_version, 3132 .set_vlan_filter = hclgevf_set_vlan_filter, 3133 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 3134 .reset_event = hclgevf_reset_event, 3135 .set_default_reset_request = hclgevf_set_def_reset_request, 3136 .set_channels = hclgevf_set_channels, 3137 .get_channels = hclgevf_get_channels, 3138 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 3139 .get_regs_len = hclgevf_get_regs_len, 3140 .get_regs = hclgevf_get_regs, 3141 .get_status = hclgevf_get_status, 3142 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3143 .get_media_type = hclgevf_get_media_type, 3144 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 3145 .ae_dev_resetting = hclgevf_ae_dev_resetting, 3146 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 3147 .set_gro_en = hclgevf_gro_en, 3148 .set_mtu = hclgevf_set_mtu, 3149 .get_global_queue_id = hclgevf_get_qid_global, 3150 .set_timer_task = hclgevf_set_timer_task, 3151 .get_link_mode = hclgevf_get_link_mode, 3152 .set_promisc_mode = hclgevf_set_promisc_mode, 3153 }; 3154 3155 static struct hnae3_ae_algo ae_algovf = { 3156 .ops = &hclgevf_ops, 3157 .pdev_id_table = ae_algovf_pci_tbl, 3158 }; 3159 3160 static int hclgevf_init(void) 3161 { 3162 pr_info("%s is initializing\n", HCLGEVF_NAME); 3163 3164 hnae3_register_ae_algo(&ae_algovf); 3165 3166 return 0; 3167 } 3168 3169 static void hclgevf_exit(void) 3170 { 3171 hnae3_unregister_ae_algo(&ae_algovf); 3172 } 3173 module_init(hclgevf_init); 3174 module_exit(hclgevf_exit); 3175 3176 MODULE_LICENSE("GPL"); 3177 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3178 MODULE_DESCRIPTION("HCLGEVF Driver"); 3179 MODULE_VERSION(HCLGEVF_MOD_VERSION); 3180