1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 #define HCLGEVF_RESET_MAX_FAIL_CNT 5 15 16 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 17 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 18 unsigned long delay); 19 20 static struct hnae3_ae_algo ae_algovf; 21 22 static struct workqueue_struct *hclgevf_wq; 23 24 static const struct pci_device_id ae_algovf_pci_tbl[] = { 25 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, 26 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 27 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 28 /* required last entry */ 29 {0, } 30 }; 31 32 static const u8 hclgevf_hash_key[] = { 33 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 34 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 35 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 36 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 37 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 38 }; 39 40 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 41 42 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 43 HCLGEVF_CMDQ_TX_ADDR_H_REG, 44 HCLGEVF_CMDQ_TX_DEPTH_REG, 45 HCLGEVF_CMDQ_TX_TAIL_REG, 46 HCLGEVF_CMDQ_TX_HEAD_REG, 47 HCLGEVF_CMDQ_RX_ADDR_L_REG, 48 HCLGEVF_CMDQ_RX_ADDR_H_REG, 49 HCLGEVF_CMDQ_RX_DEPTH_REG, 50 HCLGEVF_CMDQ_RX_TAIL_REG, 51 HCLGEVF_CMDQ_RX_HEAD_REG, 52 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 53 HCLGEVF_VECTOR0_CMDQ_STATE_REG, 54 HCLGEVF_CMDQ_INTR_EN_REG, 55 HCLGEVF_CMDQ_INTR_GEN_REG}; 56 57 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 58 HCLGEVF_RST_ING, 59 HCLGEVF_GRO_EN_REG}; 60 61 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 62 HCLGEVF_RING_RX_ADDR_H_REG, 63 HCLGEVF_RING_RX_BD_NUM_REG, 64 HCLGEVF_RING_RX_BD_LENGTH_REG, 65 HCLGEVF_RING_RX_MERGE_EN_REG, 66 HCLGEVF_RING_RX_TAIL_REG, 67 HCLGEVF_RING_RX_HEAD_REG, 68 HCLGEVF_RING_RX_FBD_NUM_REG, 69 HCLGEVF_RING_RX_OFFSET_REG, 70 HCLGEVF_RING_RX_FBD_OFFSET_REG, 71 HCLGEVF_RING_RX_STASH_REG, 72 HCLGEVF_RING_RX_BD_ERR_REG, 73 HCLGEVF_RING_TX_ADDR_L_REG, 74 HCLGEVF_RING_TX_ADDR_H_REG, 75 HCLGEVF_RING_TX_BD_NUM_REG, 76 HCLGEVF_RING_TX_PRIORITY_REG, 77 HCLGEVF_RING_TX_TC_REG, 78 HCLGEVF_RING_TX_MERGE_EN_REG, 79 HCLGEVF_RING_TX_TAIL_REG, 80 HCLGEVF_RING_TX_HEAD_REG, 81 HCLGEVF_RING_TX_FBD_NUM_REG, 82 HCLGEVF_RING_TX_OFFSET_REG, 83 HCLGEVF_RING_TX_EBD_NUM_REG, 84 HCLGEVF_RING_TX_EBD_OFFSET_REG, 85 HCLGEVF_RING_TX_BD_ERR_REG, 86 HCLGEVF_RING_EN_REG}; 87 88 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 89 HCLGEVF_TQP_INTR_GL0_REG, 90 HCLGEVF_TQP_INTR_GL1_REG, 91 HCLGEVF_TQP_INTR_GL2_REG, 92 HCLGEVF_TQP_INTR_RL_REG}; 93 94 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 95 { 96 if (!handle->client) 97 return container_of(handle, struct hclgevf_dev, nic); 98 else if (handle->client->type == HNAE3_CLIENT_ROCE) 99 return container_of(handle, struct hclgevf_dev, roce); 100 else 101 return container_of(handle, struct hclgevf_dev, nic); 102 } 103 104 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 105 { 106 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 107 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 108 struct hclgevf_desc desc; 109 struct hclgevf_tqp *tqp; 110 int status; 111 int i; 112 113 for (i = 0; i < kinfo->num_tqps; i++) { 114 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 115 hclgevf_cmd_setup_basic_desc(&desc, 116 HCLGEVF_OPC_QUERY_RX_STATUS, 117 true); 118 119 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 120 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 121 if (status) { 122 dev_err(&hdev->pdev->dev, 123 "Query tqp stat fail, status = %d,queue = %d\n", 124 status, i); 125 return status; 126 } 127 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 128 le32_to_cpu(desc.data[1]); 129 130 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 131 true); 132 133 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 134 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 135 if (status) { 136 dev_err(&hdev->pdev->dev, 137 "Query tqp stat fail, status = %d,queue = %d\n", 138 status, i); 139 return status; 140 } 141 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 142 le32_to_cpu(desc.data[1]); 143 } 144 145 return 0; 146 } 147 148 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 149 { 150 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 151 struct hclgevf_tqp *tqp; 152 u64 *buff = data; 153 int i; 154 155 for (i = 0; i < kinfo->num_tqps; i++) { 156 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 157 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 158 } 159 for (i = 0; i < kinfo->num_tqps; i++) { 160 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 161 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 162 } 163 164 return buff; 165 } 166 167 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 168 { 169 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 170 171 return kinfo->num_tqps * 2; 172 } 173 174 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 175 { 176 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 177 u8 *buff = data; 178 int i; 179 180 for (i = 0; i < kinfo->num_tqps; i++) { 181 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 182 struct hclgevf_tqp, q); 183 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", 184 tqp->index); 185 buff += ETH_GSTRING_LEN; 186 } 187 188 for (i = 0; i < kinfo->num_tqps; i++) { 189 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 190 struct hclgevf_tqp, q); 191 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", 192 tqp->index); 193 buff += ETH_GSTRING_LEN; 194 } 195 196 return buff; 197 } 198 199 static void hclgevf_update_stats(struct hnae3_handle *handle, 200 struct net_device_stats *net_stats) 201 { 202 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 203 int status; 204 205 status = hclgevf_tqps_update_stats(handle); 206 if (status) 207 dev_err(&hdev->pdev->dev, 208 "VF update of TQPS stats fail, status = %d.\n", 209 status); 210 } 211 212 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 213 { 214 if (strset == ETH_SS_TEST) 215 return -EOPNOTSUPP; 216 else if (strset == ETH_SS_STATS) 217 return hclgevf_tqps_get_sset_count(handle, strset); 218 219 return 0; 220 } 221 222 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 223 u8 *data) 224 { 225 u8 *p = (char *)data; 226 227 if (strset == ETH_SS_STATS) 228 p = hclgevf_tqps_get_strings(handle, p); 229 } 230 231 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 232 { 233 hclgevf_tqps_get_stats(handle, data); 234 } 235 236 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 237 u8 subcode) 238 { 239 if (msg) { 240 memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 241 msg->code = code; 242 msg->subcode = subcode; 243 } 244 } 245 246 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 247 { 248 struct hclge_vf_to_pf_msg send_msg; 249 u8 resp_msg; 250 int status; 251 252 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0); 253 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 254 sizeof(resp_msg)); 255 if (status) { 256 dev_err(&hdev->pdev->dev, 257 "VF request to get TC info from PF failed %d", 258 status); 259 return status; 260 } 261 262 hdev->hw_tc_map = resp_msg; 263 264 return 0; 265 } 266 267 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 268 { 269 struct hnae3_handle *nic = &hdev->nic; 270 struct hclge_vf_to_pf_msg send_msg; 271 u8 resp_msg; 272 int ret; 273 274 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 275 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 276 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 277 sizeof(u8)); 278 if (ret) { 279 dev_err(&hdev->pdev->dev, 280 "VF request to get port based vlan state failed %d", 281 ret); 282 return ret; 283 } 284 285 nic->port_base_vlan_state = resp_msg; 286 287 return 0; 288 } 289 290 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 291 { 292 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 293 #define HCLGEVF_TQPS_ALLOC_OFFSET 0 294 #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 295 #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 296 297 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 298 struct hclge_vf_to_pf_msg send_msg; 299 int status; 300 301 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 302 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 303 HCLGEVF_TQPS_RSS_INFO_LEN); 304 if (status) { 305 dev_err(&hdev->pdev->dev, 306 "VF request to get tqp info from PF failed %d", 307 status); 308 return status; 309 } 310 311 memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], 312 sizeof(u16)); 313 memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], 314 sizeof(u16)); 315 memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], 316 sizeof(u16)); 317 318 return 0; 319 } 320 321 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 322 { 323 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 324 #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 325 #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 326 327 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 328 struct hclge_vf_to_pf_msg send_msg; 329 int ret; 330 331 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 332 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 333 HCLGEVF_TQPS_DEPTH_INFO_LEN); 334 if (ret) { 335 dev_err(&hdev->pdev->dev, 336 "VF request to get tqp depth info from PF failed %d", 337 ret); 338 return ret; 339 } 340 341 memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], 342 sizeof(u16)); 343 memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], 344 sizeof(u16)); 345 346 return 0; 347 } 348 349 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 350 { 351 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 352 struct hclge_vf_to_pf_msg send_msg; 353 u16 qid_in_pf = 0; 354 u8 resp_data[2]; 355 int ret; 356 357 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 358 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 359 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 360 sizeof(resp_data)); 361 if (!ret) 362 qid_in_pf = *(u16 *)resp_data; 363 364 return qid_in_pf; 365 } 366 367 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 368 { 369 struct hclge_vf_to_pf_msg send_msg; 370 u8 resp_msg[2]; 371 int ret; 372 373 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 374 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 375 sizeof(resp_msg)); 376 if (ret) { 377 dev_err(&hdev->pdev->dev, 378 "VF request to get the pf port media type failed %d", 379 ret); 380 return ret; 381 } 382 383 hdev->hw.mac.media_type = resp_msg[0]; 384 hdev->hw.mac.module_type = resp_msg[1]; 385 386 return 0; 387 } 388 389 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 390 { 391 struct hclgevf_tqp *tqp; 392 int i; 393 394 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 395 sizeof(struct hclgevf_tqp), GFP_KERNEL); 396 if (!hdev->htqp) 397 return -ENOMEM; 398 399 tqp = hdev->htqp; 400 401 for (i = 0; i < hdev->num_tqps; i++) { 402 tqp->dev = &hdev->pdev->dev; 403 tqp->index = i; 404 405 tqp->q.ae_algo = &ae_algovf; 406 tqp->q.buf_size = hdev->rx_buf_len; 407 tqp->q.tx_desc_num = hdev->num_tx_desc; 408 tqp->q.rx_desc_num = hdev->num_rx_desc; 409 410 /* need an extended offset to configure queues >= 411 * HCLGEVF_TQP_MAX_SIZE_DEV_V2. 412 */ 413 if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2) 414 tqp->q.io_base = hdev->hw.io_base + 415 HCLGEVF_TQP_REG_OFFSET + 416 i * HCLGEVF_TQP_REG_SIZE; 417 else 418 tqp->q.io_base = hdev->hw.io_base + 419 HCLGEVF_TQP_REG_OFFSET + 420 HCLGEVF_TQP_EXT_REG_OFFSET + 421 (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) * 422 HCLGEVF_TQP_REG_SIZE; 423 424 tqp++; 425 } 426 427 return 0; 428 } 429 430 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 431 { 432 struct hnae3_handle *nic = &hdev->nic; 433 struct hnae3_knic_private_info *kinfo; 434 u16 new_tqps = hdev->num_tqps; 435 unsigned int i; 436 u8 num_tc = 0; 437 438 kinfo = &nic->kinfo; 439 kinfo->num_tx_desc = hdev->num_tx_desc; 440 kinfo->num_rx_desc = hdev->num_rx_desc; 441 kinfo->rx_buf_len = hdev->rx_buf_len; 442 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 443 if (hdev->hw_tc_map & BIT(i)) 444 num_tc++; 445 446 num_tc = num_tc ? num_tc : 1; 447 kinfo->tc_info.num_tc = num_tc; 448 kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc); 449 new_tqps = kinfo->rss_size * num_tc; 450 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 451 452 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 453 sizeof(struct hnae3_queue *), GFP_KERNEL); 454 if (!kinfo->tqp) 455 return -ENOMEM; 456 457 for (i = 0; i < kinfo->num_tqps; i++) { 458 hdev->htqp[i].q.handle = &hdev->nic; 459 hdev->htqp[i].q.tqp_index = i; 460 kinfo->tqp[i] = &hdev->htqp[i].q; 461 } 462 463 /* after init the max rss_size and tqps, adjust the default tqp numbers 464 * and rss size with the actual vector numbers 465 */ 466 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 467 kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc, 468 kinfo->rss_size); 469 470 return 0; 471 } 472 473 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 474 { 475 struct hclge_vf_to_pf_msg send_msg; 476 int status; 477 478 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 479 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 480 if (status) 481 dev_err(&hdev->pdev->dev, 482 "VF failed to fetch link status(%d) from PF", status); 483 } 484 485 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 486 { 487 struct hnae3_handle *rhandle = &hdev->roce; 488 struct hnae3_handle *handle = &hdev->nic; 489 struct hnae3_client *rclient; 490 struct hnae3_client *client; 491 492 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 493 return; 494 495 client = handle->client; 496 rclient = hdev->roce_client; 497 498 link_state = 499 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 500 501 if (link_state != hdev->hw.mac.link) { 502 client->ops->link_status_change(handle, !!link_state); 503 if (rclient && rclient->ops->link_status_change) 504 rclient->ops->link_status_change(rhandle, !!link_state); 505 hdev->hw.mac.link = link_state; 506 } 507 508 clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 509 } 510 511 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 512 { 513 #define HCLGEVF_ADVERTISING 0 514 #define HCLGEVF_SUPPORTED 1 515 516 struct hclge_vf_to_pf_msg send_msg; 517 518 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 519 send_msg.data[0] = HCLGEVF_ADVERTISING; 520 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 521 send_msg.data[0] = HCLGEVF_SUPPORTED; 522 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 523 } 524 525 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 526 { 527 struct hnae3_handle *nic = &hdev->nic; 528 int ret; 529 530 nic->ae_algo = &ae_algovf; 531 nic->pdev = hdev->pdev; 532 nic->numa_node_mask = hdev->numa_node_mask; 533 nic->flags |= HNAE3_SUPPORT_VF; 534 535 ret = hclgevf_knic_setup(hdev); 536 if (ret) 537 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 538 ret); 539 return ret; 540 } 541 542 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 543 { 544 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 545 dev_warn(&hdev->pdev->dev, 546 "vector(vector_id %d) has been freed.\n", vector_id); 547 return; 548 } 549 550 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 551 hdev->num_msi_left += 1; 552 hdev->num_msi_used -= 1; 553 } 554 555 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 556 struct hnae3_vector_info *vector_info) 557 { 558 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 559 struct hnae3_vector_info *vector = vector_info; 560 int alloc = 0; 561 int i, j; 562 563 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 564 vector_num = min(hdev->num_msi_left, vector_num); 565 566 for (j = 0; j < vector_num; j++) { 567 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 568 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 569 vector->vector = pci_irq_vector(hdev->pdev, i); 570 vector->io_addr = hdev->hw.io_base + 571 HCLGEVF_VECTOR_REG_BASE + 572 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 573 hdev->vector_status[i] = 0; 574 hdev->vector_irq[i] = vector->vector; 575 576 vector++; 577 alloc++; 578 579 break; 580 } 581 } 582 } 583 hdev->num_msi_left -= alloc; 584 hdev->num_msi_used += alloc; 585 586 return alloc; 587 } 588 589 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 590 { 591 int i; 592 593 for (i = 0; i < hdev->num_msi; i++) 594 if (vector == hdev->vector_irq[i]) 595 return i; 596 597 return -EINVAL; 598 } 599 600 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 601 const u8 hfunc, const u8 *key) 602 { 603 struct hclgevf_rss_config_cmd *req; 604 unsigned int key_offset = 0; 605 struct hclgevf_desc desc; 606 int key_counts; 607 int key_size; 608 int ret; 609 610 key_counts = HCLGEVF_RSS_KEY_SIZE; 611 req = (struct hclgevf_rss_config_cmd *)desc.data; 612 613 while (key_counts) { 614 hclgevf_cmd_setup_basic_desc(&desc, 615 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 616 false); 617 618 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 619 req->hash_config |= 620 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 621 622 key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 623 memcpy(req->hash_key, 624 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 625 626 key_counts -= key_size; 627 key_offset++; 628 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 629 if (ret) { 630 dev_err(&hdev->pdev->dev, 631 "Configure RSS config fail, status = %d\n", 632 ret); 633 return ret; 634 } 635 } 636 637 return 0; 638 } 639 640 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 641 { 642 return HCLGEVF_RSS_KEY_SIZE; 643 } 644 645 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 646 { 647 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 648 struct hclgevf_rss_indirection_table_cmd *req; 649 struct hclgevf_desc desc; 650 int rss_cfg_tbl_num; 651 int status; 652 int i, j; 653 654 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 655 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size / 656 HCLGEVF_RSS_CFG_TBL_SIZE; 657 658 for (i = 0; i < rss_cfg_tbl_num; i++) { 659 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 660 false); 661 req->start_table_index = 662 cpu_to_le16(i * HCLGEVF_RSS_CFG_TBL_SIZE); 663 req->rss_set_bitmap = cpu_to_le16(HCLGEVF_RSS_SET_BITMAP_MSK); 664 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 665 req->rss_result[j] = 666 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 667 668 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 669 if (status) { 670 dev_err(&hdev->pdev->dev, 671 "VF failed(=%d) to set RSS indirection table\n", 672 status); 673 return status; 674 } 675 } 676 677 return 0; 678 } 679 680 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 681 { 682 struct hclgevf_rss_tc_mode_cmd *req; 683 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 684 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 685 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 686 struct hclgevf_desc desc; 687 u16 roundup_size; 688 unsigned int i; 689 int status; 690 691 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 692 693 roundup_size = roundup_pow_of_two(rss_size); 694 roundup_size = ilog2(roundup_size); 695 696 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 697 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 698 tc_size[i] = roundup_size; 699 tc_offset[i] = rss_size * i; 700 } 701 702 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 703 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 704 u16 mode = 0; 705 706 hnae3_set_bit(mode, HCLGEVF_RSS_TC_VALID_B, 707 (tc_valid[i] & 0x1)); 708 hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M, 709 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 710 hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M, 711 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 712 713 req->rss_tc_mode[i] = cpu_to_le16(mode); 714 } 715 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 716 if (status) 717 dev_err(&hdev->pdev->dev, 718 "VF failed(=%d) to set rss tc mode\n", status); 719 720 return status; 721 } 722 723 /* for revision 0x20, vf shared the same rss config with pf */ 724 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 725 { 726 #define HCLGEVF_RSS_MBX_RESP_LEN 8 727 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 728 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 729 struct hclge_vf_to_pf_msg send_msg; 730 u16 msg_num, hash_key_index; 731 u8 index; 732 int ret; 733 734 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 735 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 736 HCLGEVF_RSS_MBX_RESP_LEN; 737 for (index = 0; index < msg_num; index++) { 738 send_msg.data[0] = index; 739 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 740 HCLGEVF_RSS_MBX_RESP_LEN); 741 if (ret) { 742 dev_err(&hdev->pdev->dev, 743 "VF get rss hash key from PF failed, ret=%d", 744 ret); 745 return ret; 746 } 747 748 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 749 if (index == msg_num - 1) 750 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 751 &resp_msg[0], 752 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 753 else 754 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 755 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 756 } 757 758 return 0; 759 } 760 761 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 762 u8 *hfunc) 763 { 764 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 765 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 766 int i, ret; 767 768 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 769 /* Get hash algorithm */ 770 if (hfunc) { 771 switch (rss_cfg->hash_algo) { 772 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 773 *hfunc = ETH_RSS_HASH_TOP; 774 break; 775 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 776 *hfunc = ETH_RSS_HASH_XOR; 777 break; 778 default: 779 *hfunc = ETH_RSS_HASH_UNKNOWN; 780 break; 781 } 782 } 783 784 /* Get the RSS Key required by the user */ 785 if (key) 786 memcpy(key, rss_cfg->rss_hash_key, 787 HCLGEVF_RSS_KEY_SIZE); 788 } else { 789 if (hfunc) 790 *hfunc = ETH_RSS_HASH_TOP; 791 if (key) { 792 ret = hclgevf_get_rss_hash_key(hdev); 793 if (ret) 794 return ret; 795 memcpy(key, rss_cfg->rss_hash_key, 796 HCLGEVF_RSS_KEY_SIZE); 797 } 798 } 799 800 if (indir) 801 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 802 indir[i] = rss_cfg->rss_indirection_tbl[i]; 803 804 return 0; 805 } 806 807 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 808 const u8 *key, const u8 hfunc) 809 { 810 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 811 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 812 int ret, i; 813 814 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 815 /* Set the RSS Hash Key if specififed by the user */ 816 if (key) { 817 switch (hfunc) { 818 case ETH_RSS_HASH_TOP: 819 rss_cfg->hash_algo = 820 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 821 break; 822 case ETH_RSS_HASH_XOR: 823 rss_cfg->hash_algo = 824 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 825 break; 826 case ETH_RSS_HASH_NO_CHANGE: 827 break; 828 default: 829 return -EINVAL; 830 } 831 832 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 833 key); 834 if (ret) 835 return ret; 836 837 /* Update the shadow RSS key with user specified qids */ 838 memcpy(rss_cfg->rss_hash_key, key, 839 HCLGEVF_RSS_KEY_SIZE); 840 } 841 } 842 843 /* update the shadow RSS table with user specified qids */ 844 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 845 rss_cfg->rss_indirection_tbl[i] = indir[i]; 846 847 /* update the hardware */ 848 return hclgevf_set_rss_indir_table(hdev); 849 } 850 851 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 852 { 853 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 854 855 if (nfc->data & RXH_L4_B_2_3) 856 hash_sets |= HCLGEVF_D_PORT_BIT; 857 else 858 hash_sets &= ~HCLGEVF_D_PORT_BIT; 859 860 if (nfc->data & RXH_IP_SRC) 861 hash_sets |= HCLGEVF_S_IP_BIT; 862 else 863 hash_sets &= ~HCLGEVF_S_IP_BIT; 864 865 if (nfc->data & RXH_IP_DST) 866 hash_sets |= HCLGEVF_D_IP_BIT; 867 else 868 hash_sets &= ~HCLGEVF_D_IP_BIT; 869 870 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 871 hash_sets |= HCLGEVF_V_TAG_BIT; 872 873 return hash_sets; 874 } 875 876 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 877 struct ethtool_rxnfc *nfc) 878 { 879 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 880 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 881 struct hclgevf_rss_input_tuple_cmd *req; 882 struct hclgevf_desc desc; 883 u8 tuple_sets; 884 int ret; 885 886 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 887 return -EOPNOTSUPP; 888 889 if (nfc->data & 890 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 891 return -EINVAL; 892 893 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 894 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 895 896 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 897 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 898 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 899 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 900 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 901 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 902 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 903 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 904 905 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 906 switch (nfc->flow_type) { 907 case TCP_V4_FLOW: 908 req->ipv4_tcp_en = tuple_sets; 909 break; 910 case TCP_V6_FLOW: 911 req->ipv6_tcp_en = tuple_sets; 912 break; 913 case UDP_V4_FLOW: 914 req->ipv4_udp_en = tuple_sets; 915 break; 916 case UDP_V6_FLOW: 917 req->ipv6_udp_en = tuple_sets; 918 break; 919 case SCTP_V4_FLOW: 920 req->ipv4_sctp_en = tuple_sets; 921 break; 922 case SCTP_V6_FLOW: 923 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && 924 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))) 925 return -EINVAL; 926 927 req->ipv6_sctp_en = tuple_sets; 928 break; 929 case IPV4_FLOW: 930 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 931 break; 932 case IPV6_FLOW: 933 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 934 break; 935 default: 936 return -EINVAL; 937 } 938 939 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 940 if (ret) { 941 dev_err(&hdev->pdev->dev, 942 "Set rss tuple fail, status = %d\n", ret); 943 return ret; 944 } 945 946 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 947 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 948 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 949 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 950 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 951 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 952 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 953 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 954 return 0; 955 } 956 957 static int hclgevf_get_rss_tuple_by_flow_type(struct hclgevf_dev *hdev, 958 int flow_type, u8 *tuple_sets) 959 { 960 switch (flow_type) { 961 case TCP_V4_FLOW: 962 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_tcp_en; 963 break; 964 case UDP_V4_FLOW: 965 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_udp_en; 966 break; 967 case TCP_V6_FLOW: 968 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_tcp_en; 969 break; 970 case UDP_V6_FLOW: 971 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_udp_en; 972 break; 973 case SCTP_V4_FLOW: 974 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_sctp_en; 975 break; 976 case SCTP_V6_FLOW: 977 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_sctp_en; 978 break; 979 case IPV4_FLOW: 980 case IPV6_FLOW: 981 *tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 982 break; 983 default: 984 return -EINVAL; 985 } 986 987 return 0; 988 } 989 990 static u64 hclgevf_convert_rss_tuple(u8 tuple_sets) 991 { 992 u64 tuple_data = 0; 993 994 if (tuple_sets & HCLGEVF_D_PORT_BIT) 995 tuple_data |= RXH_L4_B_2_3; 996 if (tuple_sets & HCLGEVF_S_PORT_BIT) 997 tuple_data |= RXH_L4_B_0_1; 998 if (tuple_sets & HCLGEVF_D_IP_BIT) 999 tuple_data |= RXH_IP_DST; 1000 if (tuple_sets & HCLGEVF_S_IP_BIT) 1001 tuple_data |= RXH_IP_SRC; 1002 1003 return tuple_data; 1004 } 1005 1006 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 1007 struct ethtool_rxnfc *nfc) 1008 { 1009 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1010 u8 tuple_sets; 1011 int ret; 1012 1013 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 1014 return -EOPNOTSUPP; 1015 1016 nfc->data = 0; 1017 1018 ret = hclgevf_get_rss_tuple_by_flow_type(hdev, nfc->flow_type, 1019 &tuple_sets); 1020 if (ret || !tuple_sets) 1021 return ret; 1022 1023 nfc->data = hclgevf_convert_rss_tuple(tuple_sets); 1024 1025 return 0; 1026 } 1027 1028 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 1029 struct hclgevf_rss_cfg *rss_cfg) 1030 { 1031 struct hclgevf_rss_input_tuple_cmd *req; 1032 struct hclgevf_desc desc; 1033 int ret; 1034 1035 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 1036 1037 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 1038 1039 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 1040 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 1041 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 1042 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 1043 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 1044 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 1045 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 1046 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 1047 1048 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1049 if (ret) 1050 dev_err(&hdev->pdev->dev, 1051 "Configure rss input fail, status = %d\n", ret); 1052 return ret; 1053 } 1054 1055 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 1056 { 1057 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1058 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1059 1060 return rss_cfg->rss_size; 1061 } 1062 1063 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 1064 int vector_id, 1065 struct hnae3_ring_chain_node *ring_chain) 1066 { 1067 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1068 struct hclge_vf_to_pf_msg send_msg; 1069 struct hnae3_ring_chain_node *node; 1070 int status; 1071 int i = 0; 1072 1073 memset(&send_msg, 0, sizeof(send_msg)); 1074 send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 1075 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1076 send_msg.vector_id = vector_id; 1077 1078 for (node = ring_chain; node; node = node->next) { 1079 send_msg.param[i].ring_type = 1080 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1081 1082 send_msg.param[i].tqp_index = node->tqp_index; 1083 send_msg.param[i].int_gl_index = 1084 hnae3_get_field(node->int_gl_idx, 1085 HNAE3_RING_GL_IDX_M, 1086 HNAE3_RING_GL_IDX_S); 1087 1088 i++; 1089 if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 1090 send_msg.ring_num = i; 1091 1092 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 1093 NULL, 0); 1094 if (status) { 1095 dev_err(&hdev->pdev->dev, 1096 "Map TQP fail, status is %d.\n", 1097 status); 1098 return status; 1099 } 1100 i = 0; 1101 } 1102 } 1103 1104 return 0; 1105 } 1106 1107 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1108 struct hnae3_ring_chain_node *ring_chain) 1109 { 1110 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1111 int vector_id; 1112 1113 vector_id = hclgevf_get_vector_index(hdev, vector); 1114 if (vector_id < 0) { 1115 dev_err(&handle->pdev->dev, 1116 "Get vector index fail. ret =%d\n", vector_id); 1117 return vector_id; 1118 } 1119 1120 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1121 } 1122 1123 static int hclgevf_unmap_ring_from_vector( 1124 struct hnae3_handle *handle, 1125 int vector, 1126 struct hnae3_ring_chain_node *ring_chain) 1127 { 1128 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1129 int ret, vector_id; 1130 1131 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1132 return 0; 1133 1134 vector_id = hclgevf_get_vector_index(hdev, vector); 1135 if (vector_id < 0) { 1136 dev_err(&handle->pdev->dev, 1137 "Get vector index fail. ret =%d\n", vector_id); 1138 return vector_id; 1139 } 1140 1141 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1142 if (ret) 1143 dev_err(&handle->pdev->dev, 1144 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1145 vector_id, 1146 ret); 1147 1148 return ret; 1149 } 1150 1151 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1152 { 1153 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1154 int vector_id; 1155 1156 vector_id = hclgevf_get_vector_index(hdev, vector); 1157 if (vector_id < 0) { 1158 dev_err(&handle->pdev->dev, 1159 "hclgevf_put_vector get vector index fail. ret =%d\n", 1160 vector_id); 1161 return vector_id; 1162 } 1163 1164 hclgevf_free_vector(hdev, vector_id); 1165 1166 return 0; 1167 } 1168 1169 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1170 bool en_uc_pmc, bool en_mc_pmc, 1171 bool en_bc_pmc) 1172 { 1173 struct hnae3_handle *handle = &hdev->nic; 1174 struct hclge_vf_to_pf_msg send_msg; 1175 int ret; 1176 1177 memset(&send_msg, 0, sizeof(send_msg)); 1178 send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 1179 send_msg.en_bc = en_bc_pmc ? 1 : 0; 1180 send_msg.en_uc = en_uc_pmc ? 1 : 0; 1181 send_msg.en_mc = en_mc_pmc ? 1 : 0; 1182 send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC, 1183 &handle->priv_flags) ? 1 : 0; 1184 1185 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1186 if (ret) 1187 dev_err(&hdev->pdev->dev, 1188 "Set promisc mode fail, status is %d.\n", ret); 1189 1190 return ret; 1191 } 1192 1193 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 1194 bool en_mc_pmc) 1195 { 1196 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1197 bool en_bc_pmc; 1198 1199 en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2; 1200 1201 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 1202 en_bc_pmc); 1203 } 1204 1205 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) 1206 { 1207 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1208 1209 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1210 hclgevf_task_schedule(hdev, 0); 1211 } 1212 1213 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) 1214 { 1215 struct hnae3_handle *handle = &hdev->nic; 1216 bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; 1217 bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; 1218 int ret; 1219 1220 if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { 1221 ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); 1222 if (!ret) 1223 clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1224 } 1225 } 1226 1227 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, 1228 int stream_id, bool enable) 1229 { 1230 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1231 struct hclgevf_desc desc; 1232 int status; 1233 1234 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1235 1236 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1237 false); 1238 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1239 req->stream_id = cpu_to_le16(stream_id); 1240 if (enable) 1241 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1242 1243 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1244 if (status) 1245 dev_err(&hdev->pdev->dev, 1246 "TQP enable fail, status =%d.\n", status); 1247 1248 return status; 1249 } 1250 1251 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1252 { 1253 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1254 struct hclgevf_tqp *tqp; 1255 int i; 1256 1257 for (i = 0; i < kinfo->num_tqps; i++) { 1258 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1259 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1260 } 1261 } 1262 1263 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 1264 { 1265 struct hclge_vf_to_pf_msg send_msg; 1266 u8 host_mac[ETH_ALEN]; 1267 int status; 1268 1269 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 1270 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 1271 ETH_ALEN); 1272 if (status) { 1273 dev_err(&hdev->pdev->dev, 1274 "fail to get VF MAC from host %d", status); 1275 return status; 1276 } 1277 1278 ether_addr_copy(p, host_mac); 1279 1280 return 0; 1281 } 1282 1283 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1284 { 1285 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1286 u8 host_mac_addr[ETH_ALEN]; 1287 1288 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 1289 return; 1290 1291 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 1292 if (hdev->has_pf_mac) 1293 ether_addr_copy(p, host_mac_addr); 1294 else 1295 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1296 } 1297 1298 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1299 bool is_first) 1300 { 1301 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1302 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1303 struct hclge_vf_to_pf_msg send_msg; 1304 u8 *new_mac_addr = (u8 *)p; 1305 int status; 1306 1307 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 1308 send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1309 ether_addr_copy(send_msg.data, new_mac_addr); 1310 if (is_first && !hdev->has_pf_mac) 1311 eth_zero_addr(&send_msg.data[ETH_ALEN]); 1312 else 1313 ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 1314 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1315 if (!status) 1316 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1317 1318 return status; 1319 } 1320 1321 static struct hclgevf_mac_addr_node * 1322 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) 1323 { 1324 struct hclgevf_mac_addr_node *mac_node, *tmp; 1325 1326 list_for_each_entry_safe(mac_node, tmp, list, node) 1327 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 1328 return mac_node; 1329 1330 return NULL; 1331 } 1332 1333 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, 1334 enum HCLGEVF_MAC_NODE_STATE state) 1335 { 1336 switch (state) { 1337 /* from set_rx_mode or tmp_add_list */ 1338 case HCLGEVF_MAC_TO_ADD: 1339 if (mac_node->state == HCLGEVF_MAC_TO_DEL) 1340 mac_node->state = HCLGEVF_MAC_ACTIVE; 1341 break; 1342 /* only from set_rx_mode */ 1343 case HCLGEVF_MAC_TO_DEL: 1344 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1345 list_del(&mac_node->node); 1346 kfree(mac_node); 1347 } else { 1348 mac_node->state = HCLGEVF_MAC_TO_DEL; 1349 } 1350 break; 1351 /* only from tmp_add_list, the mac_node->state won't be 1352 * HCLGEVF_MAC_ACTIVE 1353 */ 1354 case HCLGEVF_MAC_ACTIVE: 1355 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1356 mac_node->state = HCLGEVF_MAC_ACTIVE; 1357 break; 1358 } 1359 } 1360 1361 static int hclgevf_update_mac_list(struct hnae3_handle *handle, 1362 enum HCLGEVF_MAC_NODE_STATE state, 1363 enum HCLGEVF_MAC_ADDR_TYPE mac_type, 1364 const unsigned char *addr) 1365 { 1366 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1367 struct hclgevf_mac_addr_node *mac_node; 1368 struct list_head *list; 1369 1370 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1371 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1372 1373 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1374 1375 /* if the mac addr is already in the mac list, no need to add a new 1376 * one into it, just check the mac addr state, convert it to a new 1377 * new state, or just remove it, or do nothing. 1378 */ 1379 mac_node = hclgevf_find_mac_node(list, addr); 1380 if (mac_node) { 1381 hclgevf_update_mac_node(mac_node, state); 1382 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1383 return 0; 1384 } 1385 /* if this address is never added, unnecessary to delete */ 1386 if (state == HCLGEVF_MAC_TO_DEL) { 1387 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1388 return -ENOENT; 1389 } 1390 1391 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 1392 if (!mac_node) { 1393 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1394 return -ENOMEM; 1395 } 1396 1397 mac_node->state = state; 1398 ether_addr_copy(mac_node->mac_addr, addr); 1399 list_add_tail(&mac_node->node, list); 1400 1401 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1402 return 0; 1403 } 1404 1405 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1406 const unsigned char *addr) 1407 { 1408 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1409 HCLGEVF_MAC_ADDR_UC, addr); 1410 } 1411 1412 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1413 const unsigned char *addr) 1414 { 1415 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1416 HCLGEVF_MAC_ADDR_UC, addr); 1417 } 1418 1419 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1420 const unsigned char *addr) 1421 { 1422 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1423 HCLGEVF_MAC_ADDR_MC, addr); 1424 } 1425 1426 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1427 const unsigned char *addr) 1428 { 1429 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1430 HCLGEVF_MAC_ADDR_MC, addr); 1431 } 1432 1433 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, 1434 struct hclgevf_mac_addr_node *mac_node, 1435 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1436 { 1437 struct hclge_vf_to_pf_msg send_msg; 1438 u8 code, subcode; 1439 1440 if (mac_type == HCLGEVF_MAC_ADDR_UC) { 1441 code = HCLGE_MBX_SET_UNICAST; 1442 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1443 subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; 1444 else 1445 subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; 1446 } else { 1447 code = HCLGE_MBX_SET_MULTICAST; 1448 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1449 subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; 1450 else 1451 subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; 1452 } 1453 1454 hclgevf_build_send_msg(&send_msg, code, subcode); 1455 ether_addr_copy(send_msg.data, mac_node->mac_addr); 1456 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1457 } 1458 1459 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, 1460 struct list_head *list, 1461 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1462 { 1463 struct hclgevf_mac_addr_node *mac_node, *tmp; 1464 int ret; 1465 1466 list_for_each_entry_safe(mac_node, tmp, list, node) { 1467 ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); 1468 if (ret) { 1469 dev_err(&hdev->pdev->dev, 1470 "failed to configure mac %pM, state = %d, ret = %d\n", 1471 mac_node->mac_addr, mac_node->state, ret); 1472 return; 1473 } 1474 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1475 mac_node->state = HCLGEVF_MAC_ACTIVE; 1476 } else { 1477 list_del(&mac_node->node); 1478 kfree(mac_node); 1479 } 1480 } 1481 } 1482 1483 static void hclgevf_sync_from_add_list(struct list_head *add_list, 1484 struct list_head *mac_list) 1485 { 1486 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1487 1488 list_for_each_entry_safe(mac_node, tmp, add_list, node) { 1489 /* if the mac address from tmp_add_list is not in the 1490 * uc/mc_mac_list, it means have received a TO_DEL request 1491 * during the time window of sending mac config request to PF 1492 * If mac_node state is ACTIVE, then change its state to TO_DEL, 1493 * then it will be removed at next time. If is TO_ADD, it means 1494 * send TO_ADD request failed, so just remove the mac node. 1495 */ 1496 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1497 if (new_node) { 1498 hclgevf_update_mac_node(new_node, mac_node->state); 1499 list_del(&mac_node->node); 1500 kfree(mac_node); 1501 } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { 1502 mac_node->state = HCLGEVF_MAC_TO_DEL; 1503 list_del(&mac_node->node); 1504 list_add_tail(&mac_node->node, mac_list); 1505 } else { 1506 list_del(&mac_node->node); 1507 kfree(mac_node); 1508 } 1509 } 1510 } 1511 1512 static void hclgevf_sync_from_del_list(struct list_head *del_list, 1513 struct list_head *mac_list) 1514 { 1515 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1516 1517 list_for_each_entry_safe(mac_node, tmp, del_list, node) { 1518 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1519 if (new_node) { 1520 /* If the mac addr is exist in the mac list, it means 1521 * received a new request TO_ADD during the time window 1522 * of sending mac addr configurrequest to PF, so just 1523 * change the mac state to ACTIVE. 1524 */ 1525 new_node->state = HCLGEVF_MAC_ACTIVE; 1526 list_del(&mac_node->node); 1527 kfree(mac_node); 1528 } else { 1529 list_del(&mac_node->node); 1530 list_add_tail(&mac_node->node, mac_list); 1531 } 1532 } 1533 } 1534 1535 static void hclgevf_clear_list(struct list_head *list) 1536 { 1537 struct hclgevf_mac_addr_node *mac_node, *tmp; 1538 1539 list_for_each_entry_safe(mac_node, tmp, list, node) { 1540 list_del(&mac_node->node); 1541 kfree(mac_node); 1542 } 1543 } 1544 1545 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, 1546 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1547 { 1548 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1549 struct list_head tmp_add_list, tmp_del_list; 1550 struct list_head *list; 1551 1552 INIT_LIST_HEAD(&tmp_add_list); 1553 INIT_LIST_HEAD(&tmp_del_list); 1554 1555 /* move the mac addr to the tmp_add_list and tmp_del_list, then 1556 * we can add/delete these mac addr outside the spin lock 1557 */ 1558 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1559 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1560 1561 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1562 1563 list_for_each_entry_safe(mac_node, tmp, list, node) { 1564 switch (mac_node->state) { 1565 case HCLGEVF_MAC_TO_DEL: 1566 list_del(&mac_node->node); 1567 list_add_tail(&mac_node->node, &tmp_del_list); 1568 break; 1569 case HCLGEVF_MAC_TO_ADD: 1570 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 1571 if (!new_node) 1572 goto stop_traverse; 1573 1574 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 1575 new_node->state = mac_node->state; 1576 list_add_tail(&new_node->node, &tmp_add_list); 1577 break; 1578 default: 1579 break; 1580 } 1581 } 1582 1583 stop_traverse: 1584 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1585 1586 /* delete first, in order to get max mac table space for adding */ 1587 hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); 1588 hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); 1589 1590 /* if some mac addresses were added/deleted fail, move back to the 1591 * mac_list, and retry at next time. 1592 */ 1593 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1594 1595 hclgevf_sync_from_del_list(&tmp_del_list, list); 1596 hclgevf_sync_from_add_list(&tmp_add_list, list); 1597 1598 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1599 } 1600 1601 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) 1602 { 1603 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); 1604 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); 1605 } 1606 1607 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) 1608 { 1609 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1610 1611 hclgevf_clear_list(&hdev->mac_table.uc_mac_list); 1612 hclgevf_clear_list(&hdev->mac_table.mc_mac_list); 1613 1614 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1615 } 1616 1617 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1618 __be16 proto, u16 vlan_id, 1619 bool is_kill) 1620 { 1621 #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 1622 #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 1623 #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 1624 1625 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1626 struct hclge_vf_to_pf_msg send_msg; 1627 int ret; 1628 1629 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1630 return -EINVAL; 1631 1632 if (proto != htons(ETH_P_8021Q)) 1633 return -EPROTONOSUPPORT; 1634 1635 /* When device is resetting or reset failed, firmware is unable to 1636 * handle mailbox. Just record the vlan id, and remove it after 1637 * reset finished. 1638 */ 1639 if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 1640 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { 1641 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1642 return -EBUSY; 1643 } 1644 1645 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1646 HCLGE_MBX_VLAN_FILTER); 1647 send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; 1648 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, 1649 sizeof(vlan_id)); 1650 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, 1651 sizeof(proto)); 1652 /* when remove hw vlan filter failed, record the vlan id, 1653 * and try to remove it from hw later, to be consistence 1654 * with stack. 1655 */ 1656 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1657 if (is_kill && ret) 1658 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1659 1660 return ret; 1661 } 1662 1663 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1664 { 1665 #define HCLGEVF_MAX_SYNC_COUNT 60 1666 struct hnae3_handle *handle = &hdev->nic; 1667 int ret, sync_cnt = 0; 1668 u16 vlan_id; 1669 1670 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1671 while (vlan_id != VLAN_N_VID) { 1672 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1673 vlan_id, true); 1674 if (ret) 1675 return; 1676 1677 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1678 sync_cnt++; 1679 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1680 return; 1681 1682 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1683 } 1684 } 1685 1686 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1687 { 1688 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1689 struct hclge_vf_to_pf_msg send_msg; 1690 1691 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1692 HCLGE_MBX_VLAN_RX_OFF_CFG); 1693 send_msg.data[0] = enable ? 1 : 0; 1694 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1695 } 1696 1697 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 1698 { 1699 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1700 struct hclge_vf_to_pf_msg send_msg; 1701 int ret; 1702 1703 /* disable vf queue before send queue reset msg to PF */ 1704 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); 1705 if (ret) 1706 return ret; 1707 1708 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1709 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 1710 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1711 } 1712 1713 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1714 { 1715 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1716 struct hclge_vf_to_pf_msg send_msg; 1717 1718 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1719 memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); 1720 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1721 } 1722 1723 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1724 enum hnae3_reset_notify_type type) 1725 { 1726 struct hnae3_client *client = hdev->nic_client; 1727 struct hnae3_handle *handle = &hdev->nic; 1728 int ret; 1729 1730 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1731 !client) 1732 return 0; 1733 1734 if (!client->ops->reset_notify) 1735 return -EOPNOTSUPP; 1736 1737 ret = client->ops->reset_notify(handle, type); 1738 if (ret) 1739 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1740 type, ret); 1741 1742 return ret; 1743 } 1744 1745 static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev, 1746 enum hnae3_reset_notify_type type) 1747 { 1748 struct hnae3_client *client = hdev->roce_client; 1749 struct hnae3_handle *handle = &hdev->roce; 1750 int ret; 1751 1752 if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client) 1753 return 0; 1754 1755 if (!client->ops->reset_notify) 1756 return -EOPNOTSUPP; 1757 1758 ret = client->ops->reset_notify(handle, type); 1759 if (ret) 1760 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", 1761 type, ret); 1762 return ret; 1763 } 1764 1765 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1766 { 1767 #define HCLGEVF_RESET_WAIT_US 20000 1768 #define HCLGEVF_RESET_WAIT_CNT 2000 1769 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1770 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1771 1772 u32 val; 1773 int ret; 1774 1775 if (hdev->reset_type == HNAE3_VF_RESET) 1776 ret = readl_poll_timeout(hdev->hw.io_base + 1777 HCLGEVF_VF_RST_ING, val, 1778 !(val & HCLGEVF_VF_RST_ING_BIT), 1779 HCLGEVF_RESET_WAIT_US, 1780 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1781 else 1782 ret = readl_poll_timeout(hdev->hw.io_base + 1783 HCLGEVF_RST_ING, val, 1784 !(val & HCLGEVF_RST_ING_BITS), 1785 HCLGEVF_RESET_WAIT_US, 1786 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1787 1788 /* hardware completion status should be available by this time */ 1789 if (ret) { 1790 dev_err(&hdev->pdev->dev, 1791 "couldn't get reset done status from h/w, timeout!\n"); 1792 return ret; 1793 } 1794 1795 /* we will wait a bit more to let reset of the stack to complete. This 1796 * might happen in case reset assertion was made by PF. Yes, this also 1797 * means we might end up waiting bit more even for VF reset. 1798 */ 1799 msleep(5000); 1800 1801 return 0; 1802 } 1803 1804 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 1805 { 1806 u32 reg_val; 1807 1808 reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); 1809 if (enable) 1810 reg_val |= HCLGEVF_NIC_SW_RST_RDY; 1811 else 1812 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 1813 1814 hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 1815 reg_val); 1816 } 1817 1818 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1819 { 1820 int ret; 1821 1822 /* uninitialize the nic client */ 1823 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1824 if (ret) 1825 return ret; 1826 1827 /* re-initialize the hclge device */ 1828 ret = hclgevf_reset_hdev(hdev); 1829 if (ret) { 1830 dev_err(&hdev->pdev->dev, 1831 "hclge device re-init failed, VF is disabled!\n"); 1832 return ret; 1833 } 1834 1835 /* bring up the nic client again */ 1836 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1837 if (ret) 1838 return ret; 1839 1840 /* clear handshake status with IMP */ 1841 hclgevf_reset_handshake(hdev, false); 1842 1843 /* bring up the nic to enable TX/RX again */ 1844 return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1845 } 1846 1847 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1848 { 1849 #define HCLGEVF_RESET_SYNC_TIME 100 1850 1851 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1852 struct hclge_vf_to_pf_msg send_msg; 1853 int ret; 1854 1855 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1856 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1857 if (ret) { 1858 dev_err(&hdev->pdev->dev, 1859 "failed to assert VF reset, ret = %d\n", ret); 1860 return ret; 1861 } 1862 hdev->rst_stats.vf_func_rst_cnt++; 1863 } 1864 1865 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1866 /* inform hardware that preparatory work is done */ 1867 msleep(HCLGEVF_RESET_SYNC_TIME); 1868 hclgevf_reset_handshake(hdev, true); 1869 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n", 1870 hdev->reset_type); 1871 1872 return 0; 1873 } 1874 1875 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 1876 { 1877 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 1878 hdev->rst_stats.vf_func_rst_cnt); 1879 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1880 hdev->rst_stats.flr_rst_cnt); 1881 dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 1882 hdev->rst_stats.vf_rst_cnt); 1883 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1884 hdev->rst_stats.rst_done_cnt); 1885 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1886 hdev->rst_stats.hw_rst_done_cnt); 1887 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1888 hdev->rst_stats.rst_cnt); 1889 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1890 hdev->rst_stats.rst_fail_cnt); 1891 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1892 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 1893 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 1894 hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG)); 1895 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1896 hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); 1897 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1898 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 1899 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1900 } 1901 1902 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1903 { 1904 /* recover handshake status with IMP when reset fail */ 1905 hclgevf_reset_handshake(hdev, true); 1906 hdev->rst_stats.rst_fail_cnt++; 1907 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1908 hdev->rst_stats.rst_fail_cnt); 1909 1910 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1911 set_bit(hdev->reset_type, &hdev->reset_pending); 1912 1913 if (hclgevf_is_reset_pending(hdev)) { 1914 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1915 hclgevf_reset_task_schedule(hdev); 1916 } else { 1917 set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1918 hclgevf_dump_rst_info(hdev); 1919 } 1920 } 1921 1922 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 1923 { 1924 int ret; 1925 1926 hdev->rst_stats.rst_cnt++; 1927 1928 /* perform reset of the stack & ae device for a client */ 1929 ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); 1930 if (ret) 1931 return ret; 1932 1933 rtnl_lock(); 1934 /* bring down the nic to stop any ongoing TX/RX */ 1935 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1936 rtnl_unlock(); 1937 if (ret) 1938 return ret; 1939 1940 return hclgevf_reset_prepare_wait(hdev); 1941 } 1942 1943 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 1944 { 1945 int ret; 1946 1947 hdev->rst_stats.hw_rst_done_cnt++; 1948 ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); 1949 if (ret) 1950 return ret; 1951 1952 rtnl_lock(); 1953 /* now, re-initialize the nic client and ae device */ 1954 ret = hclgevf_reset_stack(hdev); 1955 rtnl_unlock(); 1956 if (ret) { 1957 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1958 return ret; 1959 } 1960 1961 ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT); 1962 /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1 1963 * times 1964 */ 1965 if (ret && 1966 hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1) 1967 return ret; 1968 1969 ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT); 1970 if (ret) 1971 return ret; 1972 1973 hdev->last_reset_time = jiffies; 1974 hdev->rst_stats.rst_done_cnt++; 1975 hdev->rst_stats.rst_fail_cnt = 0; 1976 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1977 1978 return 0; 1979 } 1980 1981 static void hclgevf_reset(struct hclgevf_dev *hdev) 1982 { 1983 if (hclgevf_reset_prepare(hdev)) 1984 goto err_reset; 1985 1986 /* check if VF could successfully fetch the hardware reset completion 1987 * status from the hardware 1988 */ 1989 if (hclgevf_reset_wait(hdev)) { 1990 /* can't do much in this situation, will disable VF */ 1991 dev_err(&hdev->pdev->dev, 1992 "failed to fetch H/W reset completion status\n"); 1993 goto err_reset; 1994 } 1995 1996 if (hclgevf_reset_rebuild(hdev)) 1997 goto err_reset; 1998 1999 return; 2000 2001 err_reset: 2002 hclgevf_reset_err_handle(hdev); 2003 } 2004 2005 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 2006 unsigned long *addr) 2007 { 2008 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 2009 2010 /* return the highest priority reset level amongst all */ 2011 if (test_bit(HNAE3_VF_RESET, addr)) { 2012 rst_level = HNAE3_VF_RESET; 2013 clear_bit(HNAE3_VF_RESET, addr); 2014 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 2015 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2016 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 2017 rst_level = HNAE3_VF_FULL_RESET; 2018 clear_bit(HNAE3_VF_FULL_RESET, addr); 2019 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2020 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 2021 rst_level = HNAE3_VF_PF_FUNC_RESET; 2022 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 2023 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2024 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 2025 rst_level = HNAE3_VF_FUNC_RESET; 2026 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2027 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 2028 rst_level = HNAE3_FLR_RESET; 2029 clear_bit(HNAE3_FLR_RESET, addr); 2030 } 2031 2032 return rst_level; 2033 } 2034 2035 static void hclgevf_reset_event(struct pci_dev *pdev, 2036 struct hnae3_handle *handle) 2037 { 2038 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2039 struct hclgevf_dev *hdev = ae_dev->priv; 2040 2041 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 2042 2043 if (hdev->default_reset_request) 2044 hdev->reset_level = 2045 hclgevf_get_reset_level(hdev, 2046 &hdev->default_reset_request); 2047 else 2048 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2049 2050 /* reset of this VF requested */ 2051 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 2052 hclgevf_reset_task_schedule(hdev); 2053 2054 hdev->last_reset_time = jiffies; 2055 } 2056 2057 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 2058 enum hnae3_reset_type rst_type) 2059 { 2060 struct hclgevf_dev *hdev = ae_dev->priv; 2061 2062 set_bit(rst_type, &hdev->default_reset_request); 2063 } 2064 2065 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 2066 { 2067 writel(en ? 1 : 0, vector->addr); 2068 } 2069 2070 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 2071 { 2072 #define HCLGEVF_FLR_RETRY_WAIT_MS 500 2073 #define HCLGEVF_FLR_RETRY_CNT 5 2074 2075 struct hclgevf_dev *hdev = ae_dev->priv; 2076 int retry_cnt = 0; 2077 int ret; 2078 2079 retry: 2080 down(&hdev->reset_sem); 2081 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2082 hdev->reset_type = HNAE3_FLR_RESET; 2083 ret = hclgevf_reset_prepare(hdev); 2084 if (ret) { 2085 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", 2086 ret); 2087 if (hdev->reset_pending || 2088 retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) { 2089 dev_err(&hdev->pdev->dev, 2090 "reset_pending:0x%lx, retry_cnt:%d\n", 2091 hdev->reset_pending, retry_cnt); 2092 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2093 up(&hdev->reset_sem); 2094 msleep(HCLGEVF_FLR_RETRY_WAIT_MS); 2095 goto retry; 2096 } 2097 } 2098 2099 /* disable misc vector before FLR done */ 2100 hclgevf_enable_vector(&hdev->misc_vector, false); 2101 hdev->rst_stats.flr_rst_cnt++; 2102 } 2103 2104 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 2105 { 2106 struct hclgevf_dev *hdev = ae_dev->priv; 2107 int ret; 2108 2109 hclgevf_enable_vector(&hdev->misc_vector, true); 2110 2111 ret = hclgevf_reset_rebuild(hdev); 2112 if (ret) 2113 dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 2114 ret); 2115 2116 hdev->reset_type = HNAE3_NONE_RESET; 2117 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2118 up(&hdev->reset_sem); 2119 } 2120 2121 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 2122 { 2123 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2124 2125 return hdev->fw_version; 2126 } 2127 2128 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 2129 { 2130 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 2131 2132 vector->vector_irq = pci_irq_vector(hdev->pdev, 2133 HCLGEVF_MISC_VECTOR_NUM); 2134 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 2135 /* vector status always valid for Vector 0 */ 2136 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 2137 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 2138 2139 hdev->num_msi_left -= 1; 2140 hdev->num_msi_used += 1; 2141 } 2142 2143 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 2144 { 2145 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2146 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 2147 &hdev->state)) 2148 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2149 } 2150 2151 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 2152 { 2153 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2154 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 2155 &hdev->state)) 2156 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2157 } 2158 2159 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 2160 unsigned long delay) 2161 { 2162 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2163 !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2164 mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 2165 } 2166 2167 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 2168 { 2169 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 2170 2171 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 2172 return; 2173 2174 down(&hdev->reset_sem); 2175 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2176 2177 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 2178 &hdev->reset_state)) { 2179 /* PF has initmated that it is about to reset the hardware. 2180 * We now have to poll & check if hardware has actually 2181 * completed the reset sequence. On hardware reset completion, 2182 * VF needs to reset the client and ae device. 2183 */ 2184 hdev->reset_attempts = 0; 2185 2186 hdev->last_reset_time = jiffies; 2187 while ((hdev->reset_type = 2188 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 2189 != HNAE3_NONE_RESET) 2190 hclgevf_reset(hdev); 2191 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 2192 &hdev->reset_state)) { 2193 /* we could be here when either of below happens: 2194 * 1. reset was initiated due to watchdog timeout caused by 2195 * a. IMP was earlier reset and our TX got choked down and 2196 * which resulted in watchdog reacting and inducing VF 2197 * reset. This also means our cmdq would be unreliable. 2198 * b. problem in TX due to other lower layer(example link 2199 * layer not functioning properly etc.) 2200 * 2. VF reset might have been initiated due to some config 2201 * change. 2202 * 2203 * NOTE: Theres no clear way to detect above cases than to react 2204 * to the response of PF for this reset request. PF will ack the 2205 * 1b and 2. cases but we will not get any intimation about 1a 2206 * from PF as cmdq would be in unreliable state i.e. mailbox 2207 * communication between PF and VF would be broken. 2208 * 2209 * if we are never geting into pending state it means either: 2210 * 1. PF is not receiving our request which could be due to IMP 2211 * reset 2212 * 2. PF is screwed 2213 * We cannot do much for 2. but to check first we can try reset 2214 * our PCIe + stack and see if it alleviates the problem. 2215 */ 2216 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 2217 /* prepare for full reset of stack + pcie interface */ 2218 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 2219 2220 /* "defer" schedule the reset task again */ 2221 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2222 } else { 2223 hdev->reset_attempts++; 2224 2225 set_bit(hdev->reset_level, &hdev->reset_pending); 2226 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2227 } 2228 hclgevf_reset_task_schedule(hdev); 2229 } 2230 2231 hdev->reset_type = HNAE3_NONE_RESET; 2232 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2233 up(&hdev->reset_sem); 2234 } 2235 2236 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 2237 { 2238 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2239 return; 2240 2241 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 2242 return; 2243 2244 hclgevf_mbx_async_handler(hdev); 2245 2246 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2247 } 2248 2249 static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 2250 { 2251 struct hclge_vf_to_pf_msg send_msg; 2252 int ret; 2253 2254 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) 2255 return; 2256 2257 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 2258 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2259 if (ret) 2260 dev_err(&hdev->pdev->dev, 2261 "VF sends keep alive cmd failed(=%d)\n", ret); 2262 } 2263 2264 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 2265 { 2266 unsigned long delta = round_jiffies_relative(HZ); 2267 struct hnae3_handle *handle = &hdev->nic; 2268 2269 if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2270 return; 2271 2272 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 2273 delta = jiffies - hdev->last_serv_processed; 2274 2275 if (delta < round_jiffies_relative(HZ)) { 2276 delta = round_jiffies_relative(HZ) - delta; 2277 goto out; 2278 } 2279 } 2280 2281 hdev->serv_processed_cnt++; 2282 if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 2283 hclgevf_keep_alive(hdev); 2284 2285 if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 2286 hdev->last_serv_processed = jiffies; 2287 goto out; 2288 } 2289 2290 if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 2291 hclgevf_tqps_update_stats(handle); 2292 2293 /* request the link status from the PF. PF would be able to tell VF 2294 * about such updates in future so we might remove this later 2295 */ 2296 hclgevf_request_link_info(hdev); 2297 2298 hclgevf_update_link_mode(hdev); 2299 2300 hclgevf_sync_vlan_filter(hdev); 2301 2302 hclgevf_sync_mac_table(hdev); 2303 2304 hclgevf_sync_promisc_mode(hdev); 2305 2306 hdev->last_serv_processed = jiffies; 2307 2308 out: 2309 hclgevf_task_schedule(hdev, delta); 2310 } 2311 2312 static void hclgevf_service_task(struct work_struct *work) 2313 { 2314 struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 2315 service_task.work); 2316 2317 hclgevf_reset_service_task(hdev); 2318 hclgevf_mailbox_service_task(hdev); 2319 hclgevf_periodic_service_task(hdev); 2320 2321 /* Handle reset and mbx again in case periodical task delays the 2322 * handling by calling hclgevf_task_schedule() in 2323 * hclgevf_periodic_service_task() 2324 */ 2325 hclgevf_reset_service_task(hdev); 2326 hclgevf_mailbox_service_task(hdev); 2327 } 2328 2329 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 2330 { 2331 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 2332 } 2333 2334 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 2335 u32 *clearval) 2336 { 2337 u32 val, cmdq_stat_reg, rst_ing_reg; 2338 2339 /* fetch the events from their corresponding regs */ 2340 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 2341 HCLGEVF_VECTOR0_CMDQ_STATE_REG); 2342 2343 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 2344 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2345 dev_info(&hdev->pdev->dev, 2346 "receive reset interrupt 0x%x!\n", rst_ing_reg); 2347 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 2348 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2349 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 2350 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 2351 hdev->rst_stats.vf_rst_cnt++; 2352 /* set up VF hardware reset status, its PF will clear 2353 * this status when PF has initialized done. 2354 */ 2355 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 2356 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 2357 val | HCLGEVF_VF_RST_ING_BIT); 2358 return HCLGEVF_VECTOR0_EVENT_RST; 2359 } 2360 2361 /* check for vector0 mailbox(=CMDQ RX) event source */ 2362 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 2363 /* for revision 0x21, clearing interrupt is writing bit 0 2364 * to the clear register, writing bit 1 means to keep the 2365 * old value. 2366 * for revision 0x20, the clear register is a read & write 2367 * register, so we should just write 0 to the bit we are 2368 * handling, and keep other bits as cmdq_stat_reg. 2369 */ 2370 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) 2371 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2372 else 2373 *clearval = cmdq_stat_reg & 2374 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2375 2376 return HCLGEVF_VECTOR0_EVENT_MBX; 2377 } 2378 2379 /* print other vector0 event source */ 2380 dev_info(&hdev->pdev->dev, 2381 "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2382 cmdq_stat_reg); 2383 2384 return HCLGEVF_VECTOR0_EVENT_OTHER; 2385 } 2386 2387 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2388 { 2389 enum hclgevf_evt_cause event_cause; 2390 struct hclgevf_dev *hdev = data; 2391 u32 clearval; 2392 2393 hclgevf_enable_vector(&hdev->misc_vector, false); 2394 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2395 2396 switch (event_cause) { 2397 case HCLGEVF_VECTOR0_EVENT_RST: 2398 hclgevf_reset_task_schedule(hdev); 2399 break; 2400 case HCLGEVF_VECTOR0_EVENT_MBX: 2401 hclgevf_mbx_handler(hdev); 2402 break; 2403 default: 2404 break; 2405 } 2406 2407 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 2408 hclgevf_clear_event_cause(hdev, clearval); 2409 hclgevf_enable_vector(&hdev->misc_vector, true); 2410 } 2411 2412 return IRQ_HANDLED; 2413 } 2414 2415 static int hclgevf_configure(struct hclgevf_dev *hdev) 2416 { 2417 int ret; 2418 2419 /* get current port based vlan state from PF */ 2420 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2421 if (ret) 2422 return ret; 2423 2424 /* get queue configuration from PF */ 2425 ret = hclgevf_get_queue_info(hdev); 2426 if (ret) 2427 return ret; 2428 2429 /* get queue depth info from PF */ 2430 ret = hclgevf_get_queue_depth(hdev); 2431 if (ret) 2432 return ret; 2433 2434 ret = hclgevf_get_pf_media_type(hdev); 2435 if (ret) 2436 return ret; 2437 2438 /* get tc configuration from PF */ 2439 return hclgevf_get_tc_info(hdev); 2440 } 2441 2442 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 2443 { 2444 struct pci_dev *pdev = ae_dev->pdev; 2445 struct hclgevf_dev *hdev; 2446 2447 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 2448 if (!hdev) 2449 return -ENOMEM; 2450 2451 hdev->pdev = pdev; 2452 hdev->ae_dev = ae_dev; 2453 ae_dev->priv = hdev; 2454 2455 return 0; 2456 } 2457 2458 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2459 { 2460 struct hnae3_handle *roce = &hdev->roce; 2461 struct hnae3_handle *nic = &hdev->nic; 2462 2463 roce->rinfo.num_vectors = hdev->num_roce_msix; 2464 2465 if (hdev->num_msi_left < roce->rinfo.num_vectors || 2466 hdev->num_msi_left == 0) 2467 return -EINVAL; 2468 2469 roce->rinfo.base_vector = hdev->roce_base_vector; 2470 2471 roce->rinfo.netdev = nic->kinfo.netdev; 2472 roce->rinfo.roce_io_base = hdev->hw.io_base; 2473 roce->rinfo.roce_mem_base = hdev->hw.mem_base; 2474 2475 roce->pdev = nic->pdev; 2476 roce->ae_algo = nic->ae_algo; 2477 roce->numa_node_mask = nic->numa_node_mask; 2478 2479 return 0; 2480 } 2481 2482 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 2483 { 2484 struct hclgevf_cfg_gro_status_cmd *req; 2485 struct hclgevf_desc desc; 2486 int ret; 2487 2488 if (!hnae3_dev_gro_supported(hdev)) 2489 return 0; 2490 2491 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2492 false); 2493 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2494 2495 req->gro_en = en ? 1 : 0; 2496 2497 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2498 if (ret) 2499 dev_err(&hdev->pdev->dev, 2500 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2501 2502 return ret; 2503 } 2504 2505 static int hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) 2506 { 2507 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size; 2508 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2509 struct hclgevf_rss_tuple_cfg *tuple_sets; 2510 u32 i; 2511 2512 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 2513 rss_cfg->rss_size = hdev->nic.kinfo.rss_size; 2514 tuple_sets = &rss_cfg->rss_tuple_sets; 2515 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2516 u8 *rss_ind_tbl; 2517 2518 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 2519 2520 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size, 2521 sizeof(*rss_ind_tbl), GFP_KERNEL); 2522 if (!rss_ind_tbl) 2523 return -ENOMEM; 2524 2525 rss_cfg->rss_indirection_tbl = rss_ind_tbl; 2526 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2527 HCLGEVF_RSS_KEY_SIZE); 2528 2529 tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2530 tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2531 tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2532 tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2533 tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2534 tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2535 tuple_sets->ipv6_sctp_en = 2536 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ? 2537 HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT : 2538 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2539 tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2540 } 2541 2542 /* Initialize RSS indirect table */ 2543 for (i = 0; i < rss_ind_tbl_size; i++) 2544 rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 2545 2546 return 0; 2547 } 2548 2549 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2550 { 2551 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2552 int ret; 2553 2554 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2555 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 2556 rss_cfg->rss_hash_key); 2557 if (ret) 2558 return ret; 2559 2560 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2561 if (ret) 2562 return ret; 2563 } 2564 2565 ret = hclgevf_set_rss_indir_table(hdev); 2566 if (ret) 2567 return ret; 2568 2569 return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); 2570 } 2571 2572 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2573 { 2574 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2575 false); 2576 } 2577 2578 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2579 { 2580 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2581 2582 unsigned long last = hdev->serv_processed_cnt; 2583 int i = 0; 2584 2585 while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2586 i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2587 last == hdev->serv_processed_cnt) 2588 usleep_range(1, 1); 2589 } 2590 2591 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2592 { 2593 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2594 2595 if (enable) { 2596 hclgevf_task_schedule(hdev, 0); 2597 } else { 2598 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2599 2600 /* flush memory to make sure DOWN is seen by service task */ 2601 smp_mb__before_atomic(); 2602 hclgevf_flush_link_update(hdev); 2603 } 2604 } 2605 2606 static int hclgevf_ae_start(struct hnae3_handle *handle) 2607 { 2608 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2609 2610 hclgevf_reset_tqp_stats(handle); 2611 2612 hclgevf_request_link_info(hdev); 2613 2614 hclgevf_update_link_mode(hdev); 2615 2616 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2617 2618 return 0; 2619 } 2620 2621 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2622 { 2623 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2624 int i; 2625 2626 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2627 2628 if (hdev->reset_type != HNAE3_VF_RESET) 2629 for (i = 0; i < handle->kinfo.num_tqps; i++) 2630 if (hclgevf_reset_tqp(handle, i)) 2631 break; 2632 2633 hclgevf_reset_tqp_stats(handle); 2634 hclgevf_update_link_status(hdev, 0); 2635 } 2636 2637 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2638 { 2639 #define HCLGEVF_STATE_ALIVE 1 2640 #define HCLGEVF_STATE_NOT_ALIVE 0 2641 2642 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2643 struct hclge_vf_to_pf_msg send_msg; 2644 2645 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2646 send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2647 HCLGEVF_STATE_NOT_ALIVE; 2648 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2649 } 2650 2651 static int hclgevf_client_start(struct hnae3_handle *handle) 2652 { 2653 return hclgevf_set_alive(handle, true); 2654 } 2655 2656 static void hclgevf_client_stop(struct hnae3_handle *handle) 2657 { 2658 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2659 int ret; 2660 2661 ret = hclgevf_set_alive(handle, false); 2662 if (ret) 2663 dev_warn(&hdev->pdev->dev, 2664 "%s failed %d\n", __func__, ret); 2665 } 2666 2667 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2668 { 2669 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2670 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2671 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2672 2673 INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 2674 2675 mutex_init(&hdev->mbx_resp.mbx_mutex); 2676 sema_init(&hdev->reset_sem, 1); 2677 2678 spin_lock_init(&hdev->mac_table.mac_list_lock); 2679 INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); 2680 INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); 2681 2682 /* bring the device down */ 2683 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2684 } 2685 2686 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2687 { 2688 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2689 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2690 2691 if (hdev->service_task.work.func) 2692 cancel_delayed_work_sync(&hdev->service_task); 2693 2694 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2695 } 2696 2697 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2698 { 2699 struct pci_dev *pdev = hdev->pdev; 2700 int vectors; 2701 int i; 2702 2703 if (hnae3_dev_roce_supported(hdev)) 2704 vectors = pci_alloc_irq_vectors(pdev, 2705 hdev->roce_base_msix_offset + 1, 2706 hdev->num_msi, 2707 PCI_IRQ_MSIX); 2708 else 2709 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2710 hdev->num_msi, 2711 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2712 2713 if (vectors < 0) { 2714 dev_err(&pdev->dev, 2715 "failed(%d) to allocate MSI/MSI-X vectors\n", 2716 vectors); 2717 return vectors; 2718 } 2719 if (vectors < hdev->num_msi) 2720 dev_warn(&hdev->pdev->dev, 2721 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2722 hdev->num_msi, vectors); 2723 2724 hdev->num_msi = vectors; 2725 hdev->num_msi_left = vectors; 2726 2727 hdev->base_msi_vector = pdev->irq; 2728 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2729 2730 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2731 sizeof(u16), GFP_KERNEL); 2732 if (!hdev->vector_status) { 2733 pci_free_irq_vectors(pdev); 2734 return -ENOMEM; 2735 } 2736 2737 for (i = 0; i < hdev->num_msi; i++) 2738 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2739 2740 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2741 sizeof(int), GFP_KERNEL); 2742 if (!hdev->vector_irq) { 2743 devm_kfree(&pdev->dev, hdev->vector_status); 2744 pci_free_irq_vectors(pdev); 2745 return -ENOMEM; 2746 } 2747 2748 return 0; 2749 } 2750 2751 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2752 { 2753 struct pci_dev *pdev = hdev->pdev; 2754 2755 devm_kfree(&pdev->dev, hdev->vector_status); 2756 devm_kfree(&pdev->dev, hdev->vector_irq); 2757 pci_free_irq_vectors(pdev); 2758 } 2759 2760 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2761 { 2762 int ret; 2763 2764 hclgevf_get_misc_vector(hdev); 2765 2766 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2767 HCLGEVF_NAME, pci_name(hdev->pdev)); 2768 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2769 0, hdev->misc_vector.name, hdev); 2770 if (ret) { 2771 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2772 hdev->misc_vector.vector_irq); 2773 return ret; 2774 } 2775 2776 hclgevf_clear_event_cause(hdev, 0); 2777 2778 /* enable misc. vector(vector 0) */ 2779 hclgevf_enable_vector(&hdev->misc_vector, true); 2780 2781 return ret; 2782 } 2783 2784 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2785 { 2786 /* disable misc vector(vector 0) */ 2787 hclgevf_enable_vector(&hdev->misc_vector, false); 2788 synchronize_irq(hdev->misc_vector.vector_irq); 2789 free_irq(hdev->misc_vector.vector_irq, hdev); 2790 hclgevf_free_vector(hdev, 0); 2791 } 2792 2793 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2794 { 2795 struct device *dev = &hdev->pdev->dev; 2796 2797 dev_info(dev, "VF info begin:\n"); 2798 2799 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2800 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2801 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2802 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2803 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2804 dev_info(dev, "PF media type of this VF: %u\n", 2805 hdev->hw.mac.media_type); 2806 2807 dev_info(dev, "VF info end.\n"); 2808 } 2809 2810 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2811 struct hnae3_client *client) 2812 { 2813 struct hclgevf_dev *hdev = ae_dev->priv; 2814 int rst_cnt = hdev->rst_stats.rst_cnt; 2815 int ret; 2816 2817 ret = client->ops->init_instance(&hdev->nic); 2818 if (ret) 2819 return ret; 2820 2821 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2822 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 2823 rst_cnt != hdev->rst_stats.rst_cnt) { 2824 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2825 2826 client->ops->uninit_instance(&hdev->nic, 0); 2827 return -EBUSY; 2828 } 2829 2830 hnae3_set_client_init_flag(client, ae_dev, 1); 2831 2832 if (netif_msg_drv(&hdev->nic)) 2833 hclgevf_info_show(hdev); 2834 2835 return 0; 2836 } 2837 2838 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2839 struct hnae3_client *client) 2840 { 2841 struct hclgevf_dev *hdev = ae_dev->priv; 2842 int ret; 2843 2844 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2845 !hdev->nic_client) 2846 return 0; 2847 2848 ret = hclgevf_init_roce_base_info(hdev); 2849 if (ret) 2850 return ret; 2851 2852 ret = client->ops->init_instance(&hdev->roce); 2853 if (ret) 2854 return ret; 2855 2856 set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2857 hnae3_set_client_init_flag(client, ae_dev, 1); 2858 2859 return 0; 2860 } 2861 2862 static int hclgevf_init_client_instance(struct hnae3_client *client, 2863 struct hnae3_ae_dev *ae_dev) 2864 { 2865 struct hclgevf_dev *hdev = ae_dev->priv; 2866 int ret; 2867 2868 switch (client->type) { 2869 case HNAE3_CLIENT_KNIC: 2870 hdev->nic_client = client; 2871 hdev->nic.client = client; 2872 2873 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2874 if (ret) 2875 goto clear_nic; 2876 2877 ret = hclgevf_init_roce_client_instance(ae_dev, 2878 hdev->roce_client); 2879 if (ret) 2880 goto clear_roce; 2881 2882 break; 2883 case HNAE3_CLIENT_ROCE: 2884 if (hnae3_dev_roce_supported(hdev)) { 2885 hdev->roce_client = client; 2886 hdev->roce.client = client; 2887 } 2888 2889 ret = hclgevf_init_roce_client_instance(ae_dev, client); 2890 if (ret) 2891 goto clear_roce; 2892 2893 break; 2894 default: 2895 return -EINVAL; 2896 } 2897 2898 return 0; 2899 2900 clear_nic: 2901 hdev->nic_client = NULL; 2902 hdev->nic.client = NULL; 2903 return ret; 2904 clear_roce: 2905 hdev->roce_client = NULL; 2906 hdev->roce.client = NULL; 2907 return ret; 2908 } 2909 2910 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2911 struct hnae3_ae_dev *ae_dev) 2912 { 2913 struct hclgevf_dev *hdev = ae_dev->priv; 2914 2915 /* un-init roce, if it exists */ 2916 if (hdev->roce_client) { 2917 clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2918 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2919 hdev->roce_client = NULL; 2920 hdev->roce.client = NULL; 2921 } 2922 2923 /* un-init nic/unic, if this was not called by roce client */ 2924 if (client->ops->uninit_instance && hdev->nic_client && 2925 client->type != HNAE3_CLIENT_ROCE) { 2926 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2927 2928 client->ops->uninit_instance(&hdev->nic, 0); 2929 hdev->nic_client = NULL; 2930 hdev->nic.client = NULL; 2931 } 2932 } 2933 2934 static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev) 2935 { 2936 #define HCLGEVF_MEM_BAR 4 2937 2938 struct pci_dev *pdev = hdev->pdev; 2939 struct hclgevf_hw *hw = &hdev->hw; 2940 2941 /* for device does not have device memory, return directly */ 2942 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR))) 2943 return 0; 2944 2945 hw->mem_base = devm_ioremap_wc(&pdev->dev, 2946 pci_resource_start(pdev, 2947 HCLGEVF_MEM_BAR), 2948 pci_resource_len(pdev, HCLGEVF_MEM_BAR)); 2949 if (!hw->mem_base) { 2950 dev_err(&pdev->dev, "failed to map device memory\n"); 2951 return -EFAULT; 2952 } 2953 2954 return 0; 2955 } 2956 2957 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2958 { 2959 struct pci_dev *pdev = hdev->pdev; 2960 struct hclgevf_hw *hw; 2961 int ret; 2962 2963 ret = pci_enable_device(pdev); 2964 if (ret) { 2965 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2966 return ret; 2967 } 2968 2969 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2970 if (ret) { 2971 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2972 goto err_disable_device; 2973 } 2974 2975 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2976 if (ret) { 2977 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2978 goto err_disable_device; 2979 } 2980 2981 pci_set_master(pdev); 2982 hw = &hdev->hw; 2983 hw->hdev = hdev; 2984 hw->io_base = pci_iomap(pdev, 2, 0); 2985 if (!hw->io_base) { 2986 dev_err(&pdev->dev, "can't map configuration register space\n"); 2987 ret = -ENOMEM; 2988 goto err_clr_master; 2989 } 2990 2991 ret = hclgevf_dev_mem_map(hdev); 2992 if (ret) 2993 goto err_unmap_io_base; 2994 2995 return 0; 2996 2997 err_unmap_io_base: 2998 pci_iounmap(pdev, hdev->hw.io_base); 2999 err_clr_master: 3000 pci_clear_master(pdev); 3001 pci_release_regions(pdev); 3002 err_disable_device: 3003 pci_disable_device(pdev); 3004 3005 return ret; 3006 } 3007 3008 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 3009 { 3010 struct pci_dev *pdev = hdev->pdev; 3011 3012 if (hdev->hw.mem_base) 3013 devm_iounmap(&pdev->dev, hdev->hw.mem_base); 3014 3015 pci_iounmap(pdev, hdev->hw.io_base); 3016 pci_clear_master(pdev); 3017 pci_release_regions(pdev); 3018 pci_disable_device(pdev); 3019 } 3020 3021 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 3022 { 3023 struct hclgevf_query_res_cmd *req; 3024 struct hclgevf_desc desc; 3025 int ret; 3026 3027 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 3028 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 3029 if (ret) { 3030 dev_err(&hdev->pdev->dev, 3031 "query vf resource failed, ret = %d.\n", ret); 3032 return ret; 3033 } 3034 3035 req = (struct hclgevf_query_res_cmd *)desc.data; 3036 3037 if (hnae3_dev_roce_supported(hdev)) { 3038 hdev->roce_base_msix_offset = 3039 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 3040 HCLGEVF_MSIX_OFT_ROCEE_M, 3041 HCLGEVF_MSIX_OFT_ROCEE_S); 3042 hdev->num_roce_msix = 3043 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 3044 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 3045 3046 /* nic's msix numbers is always equals to the roce's. */ 3047 hdev->num_nic_msix = hdev->num_roce_msix; 3048 3049 /* VF should have NIC vectors and Roce vectors, NIC vectors 3050 * are queued before Roce vectors. The offset is fixed to 64. 3051 */ 3052 hdev->num_msi = hdev->num_roce_msix + 3053 hdev->roce_base_msix_offset; 3054 } else { 3055 hdev->num_msi = 3056 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 3057 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 3058 3059 hdev->num_nic_msix = hdev->num_msi; 3060 } 3061 3062 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 3063 dev_err(&hdev->pdev->dev, 3064 "Just %u msi resources, not enough for vf(min:2).\n", 3065 hdev->num_nic_msix); 3066 return -EINVAL; 3067 } 3068 3069 return 0; 3070 } 3071 3072 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) 3073 { 3074 #define HCLGEVF_MAX_NON_TSO_BD_NUM 8U 3075 3076 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3077 3078 ae_dev->dev_specs.max_non_tso_bd_num = 3079 HCLGEVF_MAX_NON_TSO_BD_NUM; 3080 ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 3081 ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE; 3082 ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 3083 ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME; 3084 } 3085 3086 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, 3087 struct hclgevf_desc *desc) 3088 { 3089 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3090 struct hclgevf_dev_specs_0_cmd *req0; 3091 struct hclgevf_dev_specs_1_cmd *req1; 3092 3093 req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data; 3094 req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data; 3095 3096 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; 3097 ae_dev->dev_specs.rss_ind_tbl_size = 3098 le16_to_cpu(req0->rss_ind_tbl_size); 3099 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); 3100 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); 3101 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); 3102 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); 3103 } 3104 3105 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) 3106 { 3107 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; 3108 3109 if (!dev_specs->max_non_tso_bd_num) 3110 dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM; 3111 if (!dev_specs->rss_ind_tbl_size) 3112 dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 3113 if (!dev_specs->rss_key_size) 3114 dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE; 3115 if (!dev_specs->max_int_gl) 3116 dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 3117 if (!dev_specs->max_frm_size) 3118 dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME; 3119 } 3120 3121 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) 3122 { 3123 struct hclgevf_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; 3124 int ret; 3125 int i; 3126 3127 /* set default specifications as devices lower than version V3 do not 3128 * support querying specifications from firmware. 3129 */ 3130 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 3131 hclgevf_set_default_dev_specs(hdev); 3132 return 0; 3133 } 3134 3135 for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 3136 hclgevf_cmd_setup_basic_desc(&desc[i], 3137 HCLGEVF_OPC_QUERY_DEV_SPECS, true); 3138 desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT); 3139 } 3140 hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS, 3141 true); 3142 3143 ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM); 3144 if (ret) 3145 return ret; 3146 3147 hclgevf_parse_dev_specs(hdev, desc); 3148 hclgevf_check_dev_specs(hdev); 3149 3150 return 0; 3151 } 3152 3153 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 3154 { 3155 struct pci_dev *pdev = hdev->pdev; 3156 int ret = 0; 3157 3158 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 3159 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3160 hclgevf_misc_irq_uninit(hdev); 3161 hclgevf_uninit_msi(hdev); 3162 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3163 } 3164 3165 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3166 pci_set_master(pdev); 3167 ret = hclgevf_init_msi(hdev); 3168 if (ret) { 3169 dev_err(&pdev->dev, 3170 "failed(%d) to init MSI/MSI-X\n", ret); 3171 return ret; 3172 } 3173 3174 ret = hclgevf_misc_irq_init(hdev); 3175 if (ret) { 3176 hclgevf_uninit_msi(hdev); 3177 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 3178 ret); 3179 return ret; 3180 } 3181 3182 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3183 } 3184 3185 return ret; 3186 } 3187 3188 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) 3189 { 3190 struct hclge_vf_to_pf_msg send_msg; 3191 3192 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, 3193 HCLGE_MBX_VPORT_LIST_CLEAR); 3194 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3195 } 3196 3197 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 3198 { 3199 struct pci_dev *pdev = hdev->pdev; 3200 int ret; 3201 3202 ret = hclgevf_pci_reset(hdev); 3203 if (ret) { 3204 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 3205 return ret; 3206 } 3207 3208 ret = hclgevf_cmd_init(hdev); 3209 if (ret) { 3210 dev_err(&pdev->dev, "cmd failed %d\n", ret); 3211 return ret; 3212 } 3213 3214 ret = hclgevf_rss_init_hw(hdev); 3215 if (ret) { 3216 dev_err(&hdev->pdev->dev, 3217 "failed(%d) to initialize RSS\n", ret); 3218 return ret; 3219 } 3220 3221 ret = hclgevf_config_gro(hdev, true); 3222 if (ret) 3223 return ret; 3224 3225 ret = hclgevf_init_vlan_config(hdev); 3226 if (ret) { 3227 dev_err(&hdev->pdev->dev, 3228 "failed(%d) to initialize VLAN config\n", ret); 3229 return ret; 3230 } 3231 3232 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 3233 3234 dev_info(&hdev->pdev->dev, "Reset done\n"); 3235 3236 return 0; 3237 } 3238 3239 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 3240 { 3241 struct pci_dev *pdev = hdev->pdev; 3242 int ret; 3243 3244 ret = hclgevf_pci_init(hdev); 3245 if (ret) 3246 return ret; 3247 3248 ret = hclgevf_cmd_queue_init(hdev); 3249 if (ret) 3250 goto err_cmd_queue_init; 3251 3252 ret = hclgevf_cmd_init(hdev); 3253 if (ret) 3254 goto err_cmd_init; 3255 3256 /* Get vf resource */ 3257 ret = hclgevf_query_vf_resource(hdev); 3258 if (ret) 3259 goto err_cmd_init; 3260 3261 ret = hclgevf_query_dev_specs(hdev); 3262 if (ret) { 3263 dev_err(&pdev->dev, 3264 "failed to query dev specifications, ret = %d\n", ret); 3265 goto err_cmd_init; 3266 } 3267 3268 ret = hclgevf_init_msi(hdev); 3269 if (ret) { 3270 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 3271 goto err_cmd_init; 3272 } 3273 3274 hclgevf_state_init(hdev); 3275 hdev->reset_level = HNAE3_VF_FUNC_RESET; 3276 hdev->reset_type = HNAE3_NONE_RESET; 3277 3278 ret = hclgevf_misc_irq_init(hdev); 3279 if (ret) 3280 goto err_misc_irq_init; 3281 3282 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3283 3284 ret = hclgevf_configure(hdev); 3285 if (ret) { 3286 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 3287 goto err_config; 3288 } 3289 3290 ret = hclgevf_alloc_tqps(hdev); 3291 if (ret) { 3292 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 3293 goto err_config; 3294 } 3295 3296 ret = hclgevf_set_handle_info(hdev); 3297 if (ret) 3298 goto err_config; 3299 3300 ret = hclgevf_config_gro(hdev, true); 3301 if (ret) 3302 goto err_config; 3303 3304 /* Initialize RSS for this VF */ 3305 ret = hclgevf_rss_init_cfg(hdev); 3306 if (ret) { 3307 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); 3308 goto err_config; 3309 } 3310 3311 ret = hclgevf_rss_init_hw(hdev); 3312 if (ret) { 3313 dev_err(&hdev->pdev->dev, 3314 "failed(%d) to initialize RSS\n", ret); 3315 goto err_config; 3316 } 3317 3318 /* ensure vf tbl list as empty before init*/ 3319 ret = hclgevf_clear_vport_list(hdev); 3320 if (ret) { 3321 dev_err(&pdev->dev, 3322 "failed to clear tbl list configuration, ret = %d.\n", 3323 ret); 3324 goto err_config; 3325 } 3326 3327 ret = hclgevf_init_vlan_config(hdev); 3328 if (ret) { 3329 dev_err(&hdev->pdev->dev, 3330 "failed(%d) to initialize VLAN config\n", ret); 3331 goto err_config; 3332 } 3333 3334 hdev->last_reset_time = jiffies; 3335 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 3336 HCLGEVF_DRIVER_NAME); 3337 3338 hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 3339 3340 return 0; 3341 3342 err_config: 3343 hclgevf_misc_irq_uninit(hdev); 3344 err_misc_irq_init: 3345 hclgevf_state_uninit(hdev); 3346 hclgevf_uninit_msi(hdev); 3347 err_cmd_init: 3348 hclgevf_cmd_uninit(hdev); 3349 err_cmd_queue_init: 3350 hclgevf_pci_uninit(hdev); 3351 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3352 return ret; 3353 } 3354 3355 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 3356 { 3357 struct hclge_vf_to_pf_msg send_msg; 3358 3359 hclgevf_state_uninit(hdev); 3360 3361 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 3362 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3363 3364 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3365 hclgevf_misc_irq_uninit(hdev); 3366 hclgevf_uninit_msi(hdev); 3367 } 3368 3369 hclgevf_cmd_uninit(hdev); 3370 hclgevf_pci_uninit(hdev); 3371 hclgevf_uninit_mac_list(hdev); 3372 } 3373 3374 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 3375 { 3376 struct pci_dev *pdev = ae_dev->pdev; 3377 int ret; 3378 3379 ret = hclgevf_alloc_hdev(ae_dev); 3380 if (ret) { 3381 dev_err(&pdev->dev, "hclge device allocation failed\n"); 3382 return ret; 3383 } 3384 3385 ret = hclgevf_init_hdev(ae_dev->priv); 3386 if (ret) { 3387 dev_err(&pdev->dev, "hclge device initialization failed\n"); 3388 return ret; 3389 } 3390 3391 return 0; 3392 } 3393 3394 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 3395 { 3396 struct hclgevf_dev *hdev = ae_dev->priv; 3397 3398 hclgevf_uninit_hdev(hdev); 3399 ae_dev->priv = NULL; 3400 } 3401 3402 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3403 { 3404 struct hnae3_handle *nic = &hdev->nic; 3405 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3406 3407 return min_t(u32, hdev->rss_size_max, 3408 hdev->num_tqps / kinfo->tc_info.num_tc); 3409 } 3410 3411 /** 3412 * hclgevf_get_channels - Get the current channels enabled and max supported. 3413 * @handle: hardware information for network interface 3414 * @ch: ethtool channels structure 3415 * 3416 * We don't support separate tx and rx queues as channels. The other count 3417 * represents how many queues are being used for control. max_combined counts 3418 * how many queue pairs we can support. They may not be mapped 1 to 1 with 3419 * q_vectors since we support a lot more queue pairs than q_vectors. 3420 **/ 3421 static void hclgevf_get_channels(struct hnae3_handle *handle, 3422 struct ethtool_channels *ch) 3423 { 3424 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3425 3426 ch->max_combined = hclgevf_get_max_channels(hdev); 3427 ch->other_count = 0; 3428 ch->max_other = 0; 3429 ch->combined_count = handle->kinfo.rss_size; 3430 } 3431 3432 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 3433 u16 *alloc_tqps, u16 *max_rss_size) 3434 { 3435 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3436 3437 *alloc_tqps = hdev->num_tqps; 3438 *max_rss_size = hdev->rss_size_max; 3439 } 3440 3441 static void hclgevf_update_rss_size(struct hnae3_handle *handle, 3442 u32 new_tqps_num) 3443 { 3444 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3445 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3446 u16 max_rss_size; 3447 3448 kinfo->req_rss_size = new_tqps_num; 3449 3450 max_rss_size = min_t(u16, hdev->rss_size_max, 3451 hdev->num_tqps / kinfo->tc_info.num_tc); 3452 3453 /* Use the user's configuration when it is not larger than 3454 * max_rss_size, otherwise, use the maximum specification value. 3455 */ 3456 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 3457 kinfo->req_rss_size <= max_rss_size) 3458 kinfo->rss_size = kinfo->req_rss_size; 3459 else if (kinfo->rss_size > max_rss_size || 3460 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 3461 kinfo->rss_size = max_rss_size; 3462 3463 kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size; 3464 } 3465 3466 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 3467 bool rxfh_configured) 3468 { 3469 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3470 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3471 u16 cur_rss_size = kinfo->rss_size; 3472 u16 cur_tqps = kinfo->num_tqps; 3473 u32 *rss_indir; 3474 unsigned int i; 3475 int ret; 3476 3477 hclgevf_update_rss_size(handle, new_tqps_num); 3478 3479 ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); 3480 if (ret) 3481 return ret; 3482 3483 /* RSS indirection table has been configuared by user */ 3484 if (rxfh_configured) 3485 goto out; 3486 3487 /* Reinitializes the rss indirect table according to the new RSS size */ 3488 rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size, 3489 sizeof(u32), GFP_KERNEL); 3490 if (!rss_indir) 3491 return -ENOMEM; 3492 3493 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 3494 rss_indir[i] = i % kinfo->rss_size; 3495 3496 hdev->rss_cfg.rss_size = kinfo->rss_size; 3497 3498 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 3499 if (ret) 3500 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 3501 ret); 3502 3503 kfree(rss_indir); 3504 3505 out: 3506 if (!ret) 3507 dev_info(&hdev->pdev->dev, 3508 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 3509 cur_rss_size, kinfo->rss_size, 3510 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); 3511 3512 return ret; 3513 } 3514 3515 static int hclgevf_get_status(struct hnae3_handle *handle) 3516 { 3517 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3518 3519 return hdev->hw.mac.link; 3520 } 3521 3522 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 3523 u8 *auto_neg, u32 *speed, 3524 u8 *duplex) 3525 { 3526 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3527 3528 if (speed) 3529 *speed = hdev->hw.mac.speed; 3530 if (duplex) 3531 *duplex = hdev->hw.mac.duplex; 3532 if (auto_neg) 3533 *auto_neg = AUTONEG_DISABLE; 3534 } 3535 3536 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 3537 u8 duplex) 3538 { 3539 hdev->hw.mac.speed = speed; 3540 hdev->hw.mac.duplex = duplex; 3541 } 3542 3543 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 3544 { 3545 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3546 3547 return hclgevf_config_gro(hdev, enable); 3548 } 3549 3550 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 3551 u8 *module_type) 3552 { 3553 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3554 3555 if (media_type) 3556 *media_type = hdev->hw.mac.media_type; 3557 3558 if (module_type) 3559 *module_type = hdev->hw.mac.module_type; 3560 } 3561 3562 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 3563 { 3564 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3565 3566 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 3567 } 3568 3569 static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle) 3570 { 3571 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3572 3573 return test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 3574 } 3575 3576 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 3577 { 3578 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3579 3580 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 3581 } 3582 3583 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 3584 { 3585 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3586 3587 return hdev->rst_stats.hw_rst_done_cnt; 3588 } 3589 3590 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 3591 unsigned long *supported, 3592 unsigned long *advertising) 3593 { 3594 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3595 3596 *supported = hdev->hw.mac.supported; 3597 *advertising = hdev->hw.mac.advertising; 3598 } 3599 3600 #define MAX_SEPARATE_NUM 4 3601 #define SEPARATOR_VALUE 0xFFFFFFFF 3602 #define REG_NUM_PER_LINE 4 3603 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 3604 3605 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 3606 { 3607 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 3608 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3609 3610 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 3611 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 3612 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 3613 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 3614 3615 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 3616 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 3617 } 3618 3619 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 3620 void *data) 3621 { 3622 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3623 int i, j, reg_um, separator_num; 3624 u32 *reg = data; 3625 3626 *version = hdev->fw_version; 3627 3628 /* fetching per-VF registers values from VF PCIe register space */ 3629 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 3630 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3631 for (i = 0; i < reg_um; i++) 3632 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 3633 for (i = 0; i < separator_num; i++) 3634 *reg++ = SEPARATOR_VALUE; 3635 3636 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 3637 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3638 for (i = 0; i < reg_um; i++) 3639 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 3640 for (i = 0; i < separator_num; i++) 3641 *reg++ = SEPARATOR_VALUE; 3642 3643 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 3644 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3645 for (j = 0; j < hdev->num_tqps; j++) { 3646 for (i = 0; i < reg_um; i++) 3647 *reg++ = hclgevf_read_dev(&hdev->hw, 3648 ring_reg_addr_list[i] + 3649 0x200 * j); 3650 for (i = 0; i < separator_num; i++) 3651 *reg++ = SEPARATOR_VALUE; 3652 } 3653 3654 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 3655 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3656 for (j = 0; j < hdev->num_msi_used - 1; j++) { 3657 for (i = 0; i < reg_um; i++) 3658 *reg++ = hclgevf_read_dev(&hdev->hw, 3659 tqp_intr_reg_addr_list[i] + 3660 4 * j); 3661 for (i = 0; i < separator_num; i++) 3662 *reg++ = SEPARATOR_VALUE; 3663 } 3664 } 3665 3666 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 3667 u8 *port_base_vlan_info, u8 data_size) 3668 { 3669 struct hnae3_handle *nic = &hdev->nic; 3670 struct hclge_vf_to_pf_msg send_msg; 3671 int ret; 3672 3673 rtnl_lock(); 3674 3675 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 3676 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { 3677 dev_warn(&hdev->pdev->dev, 3678 "is resetting when updating port based vlan info\n"); 3679 rtnl_unlock(); 3680 return; 3681 } 3682 3683 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3684 if (ret) { 3685 rtnl_unlock(); 3686 return; 3687 } 3688 3689 /* send msg to PF and wait update port based vlan info */ 3690 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3691 HCLGE_MBX_PORT_BASE_VLAN_CFG); 3692 memcpy(send_msg.data, port_base_vlan_info, data_size); 3693 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3694 if (!ret) { 3695 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3696 nic->port_base_vlan_state = state; 3697 else 3698 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3699 } 3700 3701 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 3702 rtnl_unlock(); 3703 } 3704 3705 static const struct hnae3_ae_ops hclgevf_ops = { 3706 .init_ae_dev = hclgevf_init_ae_dev, 3707 .uninit_ae_dev = hclgevf_uninit_ae_dev, 3708 .flr_prepare = hclgevf_flr_prepare, 3709 .flr_done = hclgevf_flr_done, 3710 .init_client_instance = hclgevf_init_client_instance, 3711 .uninit_client_instance = hclgevf_uninit_client_instance, 3712 .start = hclgevf_ae_start, 3713 .stop = hclgevf_ae_stop, 3714 .client_start = hclgevf_client_start, 3715 .client_stop = hclgevf_client_stop, 3716 .map_ring_to_vector = hclgevf_map_ring_to_vector, 3717 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3718 .get_vector = hclgevf_get_vector, 3719 .put_vector = hclgevf_put_vector, 3720 .reset_queue = hclgevf_reset_tqp, 3721 .get_mac_addr = hclgevf_get_mac_addr, 3722 .set_mac_addr = hclgevf_set_mac_addr, 3723 .add_uc_addr = hclgevf_add_uc_addr, 3724 .rm_uc_addr = hclgevf_rm_uc_addr, 3725 .add_mc_addr = hclgevf_add_mc_addr, 3726 .rm_mc_addr = hclgevf_rm_mc_addr, 3727 .get_stats = hclgevf_get_stats, 3728 .update_stats = hclgevf_update_stats, 3729 .get_strings = hclgevf_get_strings, 3730 .get_sset_count = hclgevf_get_sset_count, 3731 .get_rss_key_size = hclgevf_get_rss_key_size, 3732 .get_rss = hclgevf_get_rss, 3733 .set_rss = hclgevf_set_rss, 3734 .get_rss_tuple = hclgevf_get_rss_tuple, 3735 .set_rss_tuple = hclgevf_set_rss_tuple, 3736 .get_tc_size = hclgevf_get_tc_size, 3737 .get_fw_version = hclgevf_get_fw_version, 3738 .set_vlan_filter = hclgevf_set_vlan_filter, 3739 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 3740 .reset_event = hclgevf_reset_event, 3741 .set_default_reset_request = hclgevf_set_def_reset_request, 3742 .set_channels = hclgevf_set_channels, 3743 .get_channels = hclgevf_get_channels, 3744 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 3745 .get_regs_len = hclgevf_get_regs_len, 3746 .get_regs = hclgevf_get_regs, 3747 .get_status = hclgevf_get_status, 3748 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3749 .get_media_type = hclgevf_get_media_type, 3750 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 3751 .ae_dev_resetting = hclgevf_ae_dev_resetting, 3752 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 3753 .set_gro_en = hclgevf_gro_en, 3754 .set_mtu = hclgevf_set_mtu, 3755 .get_global_queue_id = hclgevf_get_qid_global, 3756 .set_timer_task = hclgevf_set_timer_task, 3757 .get_link_mode = hclgevf_get_link_mode, 3758 .set_promisc_mode = hclgevf_set_promisc_mode, 3759 .request_update_promisc_mode = hclgevf_request_update_promisc_mode, 3760 .get_cmdq_stat = hclgevf_get_cmdq_stat, 3761 }; 3762 3763 static struct hnae3_ae_algo ae_algovf = { 3764 .ops = &hclgevf_ops, 3765 .pdev_id_table = ae_algovf_pci_tbl, 3766 }; 3767 3768 static int hclgevf_init(void) 3769 { 3770 pr_info("%s is initializing\n", HCLGEVF_NAME); 3771 3772 hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME); 3773 if (!hclgevf_wq) { 3774 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 3775 return -ENOMEM; 3776 } 3777 3778 hnae3_register_ae_algo(&ae_algovf); 3779 3780 return 0; 3781 } 3782 3783 static void hclgevf_exit(void) 3784 { 3785 hnae3_unregister_ae_algo(&ae_algovf); 3786 destroy_workqueue(hclgevf_wq); 3787 } 3788 module_init(hclgevf_init); 3789 module_exit(hclgevf_exit); 3790 3791 MODULE_LICENSE("GPL"); 3792 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3793 MODULE_DESCRIPTION("HCLGEVF Driver"); 3794 MODULE_VERSION(HCLGEVF_MOD_VERSION); 3795