1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 #define HCLGEVF_RESET_MAX_FAIL_CNT 5 15 16 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 17 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 18 unsigned long delay); 19 20 static struct hnae3_ae_algo ae_algovf; 21 22 static struct workqueue_struct *hclgevf_wq; 23 24 static const struct pci_device_id ae_algovf_pci_tbl[] = { 25 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, 26 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 27 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 28 /* required last entry */ 29 {0, } 30 }; 31 32 static const u8 hclgevf_hash_key[] = { 33 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 34 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 35 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 36 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 37 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 38 }; 39 40 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 41 42 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 43 HCLGEVF_CMDQ_TX_ADDR_H_REG, 44 HCLGEVF_CMDQ_TX_DEPTH_REG, 45 HCLGEVF_CMDQ_TX_TAIL_REG, 46 HCLGEVF_CMDQ_TX_HEAD_REG, 47 HCLGEVF_CMDQ_RX_ADDR_L_REG, 48 HCLGEVF_CMDQ_RX_ADDR_H_REG, 49 HCLGEVF_CMDQ_RX_DEPTH_REG, 50 HCLGEVF_CMDQ_RX_TAIL_REG, 51 HCLGEVF_CMDQ_RX_HEAD_REG, 52 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 53 HCLGEVF_VECTOR0_CMDQ_STATE_REG, 54 HCLGEVF_CMDQ_INTR_EN_REG, 55 HCLGEVF_CMDQ_INTR_GEN_REG}; 56 57 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 58 HCLGEVF_RST_ING, 59 HCLGEVF_GRO_EN_REG}; 60 61 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 62 HCLGEVF_RING_RX_ADDR_H_REG, 63 HCLGEVF_RING_RX_BD_NUM_REG, 64 HCLGEVF_RING_RX_BD_LENGTH_REG, 65 HCLGEVF_RING_RX_MERGE_EN_REG, 66 HCLGEVF_RING_RX_TAIL_REG, 67 HCLGEVF_RING_RX_HEAD_REG, 68 HCLGEVF_RING_RX_FBD_NUM_REG, 69 HCLGEVF_RING_RX_OFFSET_REG, 70 HCLGEVF_RING_RX_FBD_OFFSET_REG, 71 HCLGEVF_RING_RX_STASH_REG, 72 HCLGEVF_RING_RX_BD_ERR_REG, 73 HCLGEVF_RING_TX_ADDR_L_REG, 74 HCLGEVF_RING_TX_ADDR_H_REG, 75 HCLGEVF_RING_TX_BD_NUM_REG, 76 HCLGEVF_RING_TX_PRIORITY_REG, 77 HCLGEVF_RING_TX_TC_REG, 78 HCLGEVF_RING_TX_MERGE_EN_REG, 79 HCLGEVF_RING_TX_TAIL_REG, 80 HCLGEVF_RING_TX_HEAD_REG, 81 HCLGEVF_RING_TX_FBD_NUM_REG, 82 HCLGEVF_RING_TX_OFFSET_REG, 83 HCLGEVF_RING_TX_EBD_NUM_REG, 84 HCLGEVF_RING_TX_EBD_OFFSET_REG, 85 HCLGEVF_RING_TX_BD_ERR_REG, 86 HCLGEVF_RING_EN_REG}; 87 88 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 89 HCLGEVF_TQP_INTR_GL0_REG, 90 HCLGEVF_TQP_INTR_GL1_REG, 91 HCLGEVF_TQP_INTR_GL2_REG, 92 HCLGEVF_TQP_INTR_RL_REG}; 93 94 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 95 { 96 if (!handle->client) 97 return container_of(handle, struct hclgevf_dev, nic); 98 else if (handle->client->type == HNAE3_CLIENT_ROCE) 99 return container_of(handle, struct hclgevf_dev, roce); 100 else 101 return container_of(handle, struct hclgevf_dev, nic); 102 } 103 104 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 105 { 106 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 107 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 108 struct hclgevf_desc desc; 109 struct hclgevf_tqp *tqp; 110 int status; 111 int i; 112 113 for (i = 0; i < kinfo->num_tqps; i++) { 114 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 115 hclgevf_cmd_setup_basic_desc(&desc, 116 HCLGEVF_OPC_QUERY_RX_STATUS, 117 true); 118 119 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 120 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 121 if (status) { 122 dev_err(&hdev->pdev->dev, 123 "Query tqp stat fail, status = %d,queue = %d\n", 124 status, i); 125 return status; 126 } 127 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 128 le32_to_cpu(desc.data[1]); 129 130 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 131 true); 132 133 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 134 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 135 if (status) { 136 dev_err(&hdev->pdev->dev, 137 "Query tqp stat fail, status = %d,queue = %d\n", 138 status, i); 139 return status; 140 } 141 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 142 le32_to_cpu(desc.data[1]); 143 } 144 145 return 0; 146 } 147 148 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 149 { 150 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 151 struct hclgevf_tqp *tqp; 152 u64 *buff = data; 153 int i; 154 155 for (i = 0; i < kinfo->num_tqps; i++) { 156 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 157 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 158 } 159 for (i = 0; i < kinfo->num_tqps; i++) { 160 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 161 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 162 } 163 164 return buff; 165 } 166 167 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 168 { 169 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 170 171 return kinfo->num_tqps * 2; 172 } 173 174 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 175 { 176 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 177 u8 *buff = data; 178 int i; 179 180 for (i = 0; i < kinfo->num_tqps; i++) { 181 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 182 struct hclgevf_tqp, q); 183 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", 184 tqp->index); 185 buff += ETH_GSTRING_LEN; 186 } 187 188 for (i = 0; i < kinfo->num_tqps; i++) { 189 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 190 struct hclgevf_tqp, q); 191 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", 192 tqp->index); 193 buff += ETH_GSTRING_LEN; 194 } 195 196 return buff; 197 } 198 199 static void hclgevf_update_stats(struct hnae3_handle *handle, 200 struct net_device_stats *net_stats) 201 { 202 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 203 int status; 204 205 status = hclgevf_tqps_update_stats(handle); 206 if (status) 207 dev_err(&hdev->pdev->dev, 208 "VF update of TQPS stats fail, status = %d.\n", 209 status); 210 } 211 212 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 213 { 214 if (strset == ETH_SS_TEST) 215 return -EOPNOTSUPP; 216 else if (strset == ETH_SS_STATS) 217 return hclgevf_tqps_get_sset_count(handle, strset); 218 219 return 0; 220 } 221 222 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 223 u8 *data) 224 { 225 u8 *p = (char *)data; 226 227 if (strset == ETH_SS_STATS) 228 p = hclgevf_tqps_get_strings(handle, p); 229 } 230 231 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 232 { 233 hclgevf_tqps_get_stats(handle, data); 234 } 235 236 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 237 u8 subcode) 238 { 239 if (msg) { 240 memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 241 msg->code = code; 242 msg->subcode = subcode; 243 } 244 } 245 246 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) 247 { 248 struct hclge_vf_to_pf_msg send_msg; 249 u8 resp_msg; 250 int status; 251 252 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0); 253 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 254 sizeof(resp_msg)); 255 if (status) { 256 dev_err(&hdev->pdev->dev, 257 "VF request to get TC info from PF failed %d", 258 status); 259 return status; 260 } 261 262 hdev->hw_tc_map = resp_msg; 263 264 return 0; 265 } 266 267 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 268 { 269 struct hnae3_handle *nic = &hdev->nic; 270 struct hclge_vf_to_pf_msg send_msg; 271 u8 resp_msg; 272 int ret; 273 274 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 275 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 276 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 277 sizeof(u8)); 278 if (ret) { 279 dev_err(&hdev->pdev->dev, 280 "VF request to get port based vlan state failed %d", 281 ret); 282 return ret; 283 } 284 285 nic->port_base_vlan_state = resp_msg; 286 287 return 0; 288 } 289 290 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 291 { 292 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 293 #define HCLGEVF_TQPS_ALLOC_OFFSET 0 294 #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 295 #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 296 297 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 298 struct hclge_vf_to_pf_msg send_msg; 299 int status; 300 301 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 302 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 303 HCLGEVF_TQPS_RSS_INFO_LEN); 304 if (status) { 305 dev_err(&hdev->pdev->dev, 306 "VF request to get tqp info from PF failed %d", 307 status); 308 return status; 309 } 310 311 memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], 312 sizeof(u16)); 313 memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], 314 sizeof(u16)); 315 memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], 316 sizeof(u16)); 317 318 return 0; 319 } 320 321 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 322 { 323 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 324 #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 325 #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 326 327 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 328 struct hclge_vf_to_pf_msg send_msg; 329 int ret; 330 331 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 332 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 333 HCLGEVF_TQPS_DEPTH_INFO_LEN); 334 if (ret) { 335 dev_err(&hdev->pdev->dev, 336 "VF request to get tqp depth info from PF failed %d", 337 ret); 338 return ret; 339 } 340 341 memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], 342 sizeof(u16)); 343 memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], 344 sizeof(u16)); 345 346 return 0; 347 } 348 349 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 350 { 351 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 352 struct hclge_vf_to_pf_msg send_msg; 353 u16 qid_in_pf = 0; 354 u8 resp_data[2]; 355 int ret; 356 357 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 358 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 359 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 360 sizeof(resp_data)); 361 if (!ret) 362 qid_in_pf = *(u16 *)resp_data; 363 364 return qid_in_pf; 365 } 366 367 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 368 { 369 struct hclge_vf_to_pf_msg send_msg; 370 u8 resp_msg[2]; 371 int ret; 372 373 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 374 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 375 sizeof(resp_msg)); 376 if (ret) { 377 dev_err(&hdev->pdev->dev, 378 "VF request to get the pf port media type failed %d", 379 ret); 380 return ret; 381 } 382 383 hdev->hw.mac.media_type = resp_msg[0]; 384 hdev->hw.mac.module_type = resp_msg[1]; 385 386 return 0; 387 } 388 389 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 390 { 391 struct hclgevf_tqp *tqp; 392 int i; 393 394 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 395 sizeof(struct hclgevf_tqp), GFP_KERNEL); 396 if (!hdev->htqp) 397 return -ENOMEM; 398 399 tqp = hdev->htqp; 400 401 for (i = 0; i < hdev->num_tqps; i++) { 402 tqp->dev = &hdev->pdev->dev; 403 tqp->index = i; 404 405 tqp->q.ae_algo = &ae_algovf; 406 tqp->q.buf_size = hdev->rx_buf_len; 407 tqp->q.tx_desc_num = hdev->num_tx_desc; 408 tqp->q.rx_desc_num = hdev->num_rx_desc; 409 410 /* need an extended offset to configure queues >= 411 * HCLGEVF_TQP_MAX_SIZE_DEV_V2. 412 */ 413 if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2) 414 tqp->q.io_base = hdev->hw.io_base + 415 HCLGEVF_TQP_REG_OFFSET + 416 i * HCLGEVF_TQP_REG_SIZE; 417 else 418 tqp->q.io_base = hdev->hw.io_base + 419 HCLGEVF_TQP_REG_OFFSET + 420 HCLGEVF_TQP_EXT_REG_OFFSET + 421 (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) * 422 HCLGEVF_TQP_REG_SIZE; 423 424 tqp++; 425 } 426 427 return 0; 428 } 429 430 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 431 { 432 struct hnae3_handle *nic = &hdev->nic; 433 struct hnae3_knic_private_info *kinfo; 434 u16 new_tqps = hdev->num_tqps; 435 unsigned int i; 436 u8 num_tc = 0; 437 438 kinfo = &nic->kinfo; 439 kinfo->num_tx_desc = hdev->num_tx_desc; 440 kinfo->num_rx_desc = hdev->num_rx_desc; 441 kinfo->rx_buf_len = hdev->rx_buf_len; 442 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 443 if (hdev->hw_tc_map & BIT(i)) 444 num_tc++; 445 446 num_tc = num_tc ? num_tc : 1; 447 kinfo->tc_info.num_tc = num_tc; 448 kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc); 449 new_tqps = kinfo->rss_size * num_tc; 450 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 451 452 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 453 sizeof(struct hnae3_queue *), GFP_KERNEL); 454 if (!kinfo->tqp) 455 return -ENOMEM; 456 457 for (i = 0; i < kinfo->num_tqps; i++) { 458 hdev->htqp[i].q.handle = &hdev->nic; 459 hdev->htqp[i].q.tqp_index = i; 460 kinfo->tqp[i] = &hdev->htqp[i].q; 461 } 462 463 /* after init the max rss_size and tqps, adjust the default tqp numbers 464 * and rss size with the actual vector numbers 465 */ 466 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 467 kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc, 468 kinfo->rss_size); 469 470 return 0; 471 } 472 473 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 474 { 475 struct hclge_vf_to_pf_msg send_msg; 476 int status; 477 478 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 479 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 480 if (status) 481 dev_err(&hdev->pdev->dev, 482 "VF failed to fetch link status(%d) from PF", status); 483 } 484 485 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 486 { 487 struct hnae3_handle *rhandle = &hdev->roce; 488 struct hnae3_handle *handle = &hdev->nic; 489 struct hnae3_client *rclient; 490 struct hnae3_client *client; 491 492 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 493 return; 494 495 client = handle->client; 496 rclient = hdev->roce_client; 497 498 link_state = 499 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 500 if (link_state != hdev->hw.mac.link) { 501 client->ops->link_status_change(handle, !!link_state); 502 if (rclient && rclient->ops->link_status_change) 503 rclient->ops->link_status_change(rhandle, !!link_state); 504 hdev->hw.mac.link = link_state; 505 } 506 507 clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 508 } 509 510 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 511 { 512 #define HCLGEVF_ADVERTISING 0 513 #define HCLGEVF_SUPPORTED 1 514 515 struct hclge_vf_to_pf_msg send_msg; 516 517 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 518 send_msg.data[0] = HCLGEVF_ADVERTISING; 519 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 520 send_msg.data[0] = HCLGEVF_SUPPORTED; 521 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 522 } 523 524 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 525 { 526 struct hnae3_handle *nic = &hdev->nic; 527 int ret; 528 529 nic->ae_algo = &ae_algovf; 530 nic->pdev = hdev->pdev; 531 nic->numa_node_mask = hdev->numa_node_mask; 532 nic->flags |= HNAE3_SUPPORT_VF; 533 534 ret = hclgevf_knic_setup(hdev); 535 if (ret) 536 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 537 ret); 538 return ret; 539 } 540 541 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 542 { 543 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 544 dev_warn(&hdev->pdev->dev, 545 "vector(vector_id %d) has been freed.\n", vector_id); 546 return; 547 } 548 549 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 550 hdev->num_msi_left += 1; 551 hdev->num_msi_used -= 1; 552 } 553 554 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 555 struct hnae3_vector_info *vector_info) 556 { 557 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 558 struct hnae3_vector_info *vector = vector_info; 559 int alloc = 0; 560 int i, j; 561 562 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 563 vector_num = min(hdev->num_msi_left, vector_num); 564 565 for (j = 0; j < vector_num; j++) { 566 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 567 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 568 vector->vector = pci_irq_vector(hdev->pdev, i); 569 vector->io_addr = hdev->hw.io_base + 570 HCLGEVF_VECTOR_REG_BASE + 571 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 572 hdev->vector_status[i] = 0; 573 hdev->vector_irq[i] = vector->vector; 574 575 vector++; 576 alloc++; 577 578 break; 579 } 580 } 581 } 582 hdev->num_msi_left -= alloc; 583 hdev->num_msi_used += alloc; 584 585 return alloc; 586 } 587 588 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 589 { 590 int i; 591 592 for (i = 0; i < hdev->num_msi; i++) 593 if (vector == hdev->vector_irq[i]) 594 return i; 595 596 return -EINVAL; 597 } 598 599 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 600 const u8 hfunc, const u8 *key) 601 { 602 struct hclgevf_rss_config_cmd *req; 603 unsigned int key_offset = 0; 604 struct hclgevf_desc desc; 605 int key_counts; 606 int key_size; 607 int ret; 608 609 key_counts = HCLGEVF_RSS_KEY_SIZE; 610 req = (struct hclgevf_rss_config_cmd *)desc.data; 611 612 while (key_counts) { 613 hclgevf_cmd_setup_basic_desc(&desc, 614 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 615 false); 616 617 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 618 req->hash_config |= 619 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 620 621 key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 622 memcpy(req->hash_key, 623 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 624 625 key_counts -= key_size; 626 key_offset++; 627 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 628 if (ret) { 629 dev_err(&hdev->pdev->dev, 630 "Configure RSS config fail, status = %d\n", 631 ret); 632 return ret; 633 } 634 } 635 636 return 0; 637 } 638 639 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 640 { 641 return HCLGEVF_RSS_KEY_SIZE; 642 } 643 644 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 645 { 646 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 647 struct hclgevf_rss_indirection_table_cmd *req; 648 struct hclgevf_desc desc; 649 int rss_cfg_tbl_num; 650 int status; 651 int i, j; 652 653 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 654 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size / 655 HCLGEVF_RSS_CFG_TBL_SIZE; 656 657 for (i = 0; i < rss_cfg_tbl_num; i++) { 658 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 659 false); 660 req->start_table_index = 661 cpu_to_le16(i * HCLGEVF_RSS_CFG_TBL_SIZE); 662 req->rss_set_bitmap = cpu_to_le16(HCLGEVF_RSS_SET_BITMAP_MSK); 663 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 664 req->rss_result[j] = 665 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 666 667 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 668 if (status) { 669 dev_err(&hdev->pdev->dev, 670 "VF failed(=%d) to set RSS indirection table\n", 671 status); 672 return status; 673 } 674 } 675 676 return 0; 677 } 678 679 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 680 { 681 struct hclgevf_rss_tc_mode_cmd *req; 682 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 683 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 684 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 685 struct hclgevf_desc desc; 686 u16 roundup_size; 687 unsigned int i; 688 int status; 689 690 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 691 692 roundup_size = roundup_pow_of_two(rss_size); 693 roundup_size = ilog2(roundup_size); 694 695 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 696 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 697 tc_size[i] = roundup_size; 698 tc_offset[i] = rss_size * i; 699 } 700 701 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 702 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 703 u16 mode = 0; 704 705 hnae3_set_bit(mode, HCLGEVF_RSS_TC_VALID_B, 706 (tc_valid[i] & 0x1)); 707 hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M, 708 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 709 hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M, 710 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 711 712 req->rss_tc_mode[i] = cpu_to_le16(mode); 713 } 714 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 715 if (status) 716 dev_err(&hdev->pdev->dev, 717 "VF failed(=%d) to set rss tc mode\n", status); 718 719 return status; 720 } 721 722 /* for revision 0x20, vf shared the same rss config with pf */ 723 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 724 { 725 #define HCLGEVF_RSS_MBX_RESP_LEN 8 726 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 727 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 728 struct hclge_vf_to_pf_msg send_msg; 729 u16 msg_num, hash_key_index; 730 u8 index; 731 int ret; 732 733 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 734 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 735 HCLGEVF_RSS_MBX_RESP_LEN; 736 for (index = 0; index < msg_num; index++) { 737 send_msg.data[0] = index; 738 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 739 HCLGEVF_RSS_MBX_RESP_LEN); 740 if (ret) { 741 dev_err(&hdev->pdev->dev, 742 "VF get rss hash key from PF failed, ret=%d", 743 ret); 744 return ret; 745 } 746 747 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 748 if (index == msg_num - 1) 749 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 750 &resp_msg[0], 751 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 752 else 753 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 754 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 755 } 756 757 return 0; 758 } 759 760 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 761 u8 *hfunc) 762 { 763 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 764 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 765 int i, ret; 766 767 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 768 /* Get hash algorithm */ 769 if (hfunc) { 770 switch (rss_cfg->hash_algo) { 771 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 772 *hfunc = ETH_RSS_HASH_TOP; 773 break; 774 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 775 *hfunc = ETH_RSS_HASH_XOR; 776 break; 777 default: 778 *hfunc = ETH_RSS_HASH_UNKNOWN; 779 break; 780 } 781 } 782 783 /* Get the RSS Key required by the user */ 784 if (key) 785 memcpy(key, rss_cfg->rss_hash_key, 786 HCLGEVF_RSS_KEY_SIZE); 787 } else { 788 if (hfunc) 789 *hfunc = ETH_RSS_HASH_TOP; 790 if (key) { 791 ret = hclgevf_get_rss_hash_key(hdev); 792 if (ret) 793 return ret; 794 memcpy(key, rss_cfg->rss_hash_key, 795 HCLGEVF_RSS_KEY_SIZE); 796 } 797 } 798 799 if (indir) 800 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 801 indir[i] = rss_cfg->rss_indirection_tbl[i]; 802 803 return 0; 804 } 805 806 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 807 const u8 *key, const u8 hfunc) 808 { 809 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 810 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 811 int ret, i; 812 813 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 814 /* Set the RSS Hash Key if specififed by the user */ 815 if (key) { 816 switch (hfunc) { 817 case ETH_RSS_HASH_TOP: 818 rss_cfg->hash_algo = 819 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 820 break; 821 case ETH_RSS_HASH_XOR: 822 rss_cfg->hash_algo = 823 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 824 break; 825 case ETH_RSS_HASH_NO_CHANGE: 826 break; 827 default: 828 return -EINVAL; 829 } 830 831 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 832 key); 833 if (ret) 834 return ret; 835 836 /* Update the shadow RSS key with user specified qids */ 837 memcpy(rss_cfg->rss_hash_key, key, 838 HCLGEVF_RSS_KEY_SIZE); 839 } 840 } 841 842 /* update the shadow RSS table with user specified qids */ 843 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 844 rss_cfg->rss_indirection_tbl[i] = indir[i]; 845 846 /* update the hardware */ 847 return hclgevf_set_rss_indir_table(hdev); 848 } 849 850 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 851 { 852 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 853 854 if (nfc->data & RXH_L4_B_2_3) 855 hash_sets |= HCLGEVF_D_PORT_BIT; 856 else 857 hash_sets &= ~HCLGEVF_D_PORT_BIT; 858 859 if (nfc->data & RXH_IP_SRC) 860 hash_sets |= HCLGEVF_S_IP_BIT; 861 else 862 hash_sets &= ~HCLGEVF_S_IP_BIT; 863 864 if (nfc->data & RXH_IP_DST) 865 hash_sets |= HCLGEVF_D_IP_BIT; 866 else 867 hash_sets &= ~HCLGEVF_D_IP_BIT; 868 869 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 870 hash_sets |= HCLGEVF_V_TAG_BIT; 871 872 return hash_sets; 873 } 874 875 static int hclgevf_init_rss_tuple_cmd(struct hnae3_handle *handle, 876 struct ethtool_rxnfc *nfc, 877 struct hclgevf_rss_input_tuple_cmd *req) 878 { 879 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 880 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 881 u8 tuple_sets; 882 883 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 884 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 885 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 886 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 887 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 888 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 889 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 890 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 891 892 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 893 switch (nfc->flow_type) { 894 case TCP_V4_FLOW: 895 req->ipv4_tcp_en = tuple_sets; 896 break; 897 case TCP_V6_FLOW: 898 req->ipv6_tcp_en = tuple_sets; 899 break; 900 case UDP_V4_FLOW: 901 req->ipv4_udp_en = tuple_sets; 902 break; 903 case UDP_V6_FLOW: 904 req->ipv6_udp_en = tuple_sets; 905 break; 906 case SCTP_V4_FLOW: 907 req->ipv4_sctp_en = tuple_sets; 908 break; 909 case SCTP_V6_FLOW: 910 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && 911 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))) 912 return -EINVAL; 913 914 req->ipv6_sctp_en = tuple_sets; 915 break; 916 case IPV4_FLOW: 917 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 918 break; 919 case IPV6_FLOW: 920 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 921 break; 922 default: 923 return -EINVAL; 924 } 925 926 return 0; 927 } 928 929 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 930 struct ethtool_rxnfc *nfc) 931 { 932 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 933 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 934 struct hclgevf_rss_input_tuple_cmd *req; 935 struct hclgevf_desc desc; 936 int ret; 937 938 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 939 return -EOPNOTSUPP; 940 941 if (nfc->data & 942 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 943 return -EINVAL; 944 945 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 946 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 947 948 ret = hclgevf_init_rss_tuple_cmd(handle, nfc, req); 949 if (ret) { 950 dev_err(&hdev->pdev->dev, 951 "failed to init rss tuple cmd, ret = %d\n", ret); 952 return ret; 953 } 954 955 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 956 if (ret) { 957 dev_err(&hdev->pdev->dev, 958 "Set rss tuple fail, status = %d\n", ret); 959 return ret; 960 } 961 962 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 963 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 964 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 965 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 966 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 967 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 968 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 969 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 970 return 0; 971 } 972 973 static int hclgevf_get_rss_tuple_by_flow_type(struct hclgevf_dev *hdev, 974 int flow_type, u8 *tuple_sets) 975 { 976 switch (flow_type) { 977 case TCP_V4_FLOW: 978 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_tcp_en; 979 break; 980 case UDP_V4_FLOW: 981 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_udp_en; 982 break; 983 case TCP_V6_FLOW: 984 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_tcp_en; 985 break; 986 case UDP_V6_FLOW: 987 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_udp_en; 988 break; 989 case SCTP_V4_FLOW: 990 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_sctp_en; 991 break; 992 case SCTP_V6_FLOW: 993 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_sctp_en; 994 break; 995 case IPV4_FLOW: 996 case IPV6_FLOW: 997 *tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 998 break; 999 default: 1000 return -EINVAL; 1001 } 1002 1003 return 0; 1004 } 1005 1006 static u64 hclgevf_convert_rss_tuple(u8 tuple_sets) 1007 { 1008 u64 tuple_data = 0; 1009 1010 if (tuple_sets & HCLGEVF_D_PORT_BIT) 1011 tuple_data |= RXH_L4_B_2_3; 1012 if (tuple_sets & HCLGEVF_S_PORT_BIT) 1013 tuple_data |= RXH_L4_B_0_1; 1014 if (tuple_sets & HCLGEVF_D_IP_BIT) 1015 tuple_data |= RXH_IP_DST; 1016 if (tuple_sets & HCLGEVF_S_IP_BIT) 1017 tuple_data |= RXH_IP_SRC; 1018 1019 return tuple_data; 1020 } 1021 1022 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 1023 struct ethtool_rxnfc *nfc) 1024 { 1025 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1026 u8 tuple_sets; 1027 int ret; 1028 1029 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 1030 return -EOPNOTSUPP; 1031 1032 nfc->data = 0; 1033 1034 ret = hclgevf_get_rss_tuple_by_flow_type(hdev, nfc->flow_type, 1035 &tuple_sets); 1036 if (ret || !tuple_sets) 1037 return ret; 1038 1039 nfc->data = hclgevf_convert_rss_tuple(tuple_sets); 1040 1041 return 0; 1042 } 1043 1044 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 1045 struct hclgevf_rss_cfg *rss_cfg) 1046 { 1047 struct hclgevf_rss_input_tuple_cmd *req; 1048 struct hclgevf_desc desc; 1049 int ret; 1050 1051 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 1052 1053 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 1054 1055 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 1056 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 1057 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 1058 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 1059 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 1060 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 1061 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 1062 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 1063 1064 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1065 if (ret) 1066 dev_err(&hdev->pdev->dev, 1067 "Configure rss input fail, status = %d\n", ret); 1068 return ret; 1069 } 1070 1071 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 1072 { 1073 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1074 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1075 1076 return rss_cfg->rss_size; 1077 } 1078 1079 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 1080 int vector_id, 1081 struct hnae3_ring_chain_node *ring_chain) 1082 { 1083 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1084 struct hclge_vf_to_pf_msg send_msg; 1085 struct hnae3_ring_chain_node *node; 1086 int status; 1087 int i = 0; 1088 1089 memset(&send_msg, 0, sizeof(send_msg)); 1090 send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 1091 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1092 send_msg.vector_id = vector_id; 1093 1094 for (node = ring_chain; node; node = node->next) { 1095 send_msg.param[i].ring_type = 1096 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1097 1098 send_msg.param[i].tqp_index = node->tqp_index; 1099 send_msg.param[i].int_gl_index = 1100 hnae3_get_field(node->int_gl_idx, 1101 HNAE3_RING_GL_IDX_M, 1102 HNAE3_RING_GL_IDX_S); 1103 1104 i++; 1105 if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 1106 send_msg.ring_num = i; 1107 1108 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 1109 NULL, 0); 1110 if (status) { 1111 dev_err(&hdev->pdev->dev, 1112 "Map TQP fail, status is %d.\n", 1113 status); 1114 return status; 1115 } 1116 i = 0; 1117 } 1118 } 1119 1120 return 0; 1121 } 1122 1123 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1124 struct hnae3_ring_chain_node *ring_chain) 1125 { 1126 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1127 int vector_id; 1128 1129 vector_id = hclgevf_get_vector_index(hdev, vector); 1130 if (vector_id < 0) { 1131 dev_err(&handle->pdev->dev, 1132 "Get vector index fail. ret =%d\n", vector_id); 1133 return vector_id; 1134 } 1135 1136 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1137 } 1138 1139 static int hclgevf_unmap_ring_from_vector( 1140 struct hnae3_handle *handle, 1141 int vector, 1142 struct hnae3_ring_chain_node *ring_chain) 1143 { 1144 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1145 int ret, vector_id; 1146 1147 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1148 return 0; 1149 1150 vector_id = hclgevf_get_vector_index(hdev, vector); 1151 if (vector_id < 0) { 1152 dev_err(&handle->pdev->dev, 1153 "Get vector index fail. ret =%d\n", vector_id); 1154 return vector_id; 1155 } 1156 1157 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1158 if (ret) 1159 dev_err(&handle->pdev->dev, 1160 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1161 vector_id, 1162 ret); 1163 1164 return ret; 1165 } 1166 1167 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1168 { 1169 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1170 int vector_id; 1171 1172 vector_id = hclgevf_get_vector_index(hdev, vector); 1173 if (vector_id < 0) { 1174 dev_err(&handle->pdev->dev, 1175 "hclgevf_put_vector get vector index fail. ret =%d\n", 1176 vector_id); 1177 return vector_id; 1178 } 1179 1180 hclgevf_free_vector(hdev, vector_id); 1181 1182 return 0; 1183 } 1184 1185 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1186 bool en_uc_pmc, bool en_mc_pmc, 1187 bool en_bc_pmc) 1188 { 1189 struct hnae3_handle *handle = &hdev->nic; 1190 struct hclge_vf_to_pf_msg send_msg; 1191 int ret; 1192 1193 memset(&send_msg, 0, sizeof(send_msg)); 1194 send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 1195 send_msg.en_bc = en_bc_pmc ? 1 : 0; 1196 send_msg.en_uc = en_uc_pmc ? 1 : 0; 1197 send_msg.en_mc = en_mc_pmc ? 1 : 0; 1198 send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC, 1199 &handle->priv_flags) ? 1 : 0; 1200 1201 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1202 if (ret) 1203 dev_err(&hdev->pdev->dev, 1204 "Set promisc mode fail, status is %d.\n", ret); 1205 1206 return ret; 1207 } 1208 1209 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 1210 bool en_mc_pmc) 1211 { 1212 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1213 bool en_bc_pmc; 1214 1215 en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2; 1216 1217 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 1218 en_bc_pmc); 1219 } 1220 1221 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) 1222 { 1223 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1224 1225 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1226 hclgevf_task_schedule(hdev, 0); 1227 } 1228 1229 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) 1230 { 1231 struct hnae3_handle *handle = &hdev->nic; 1232 bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; 1233 bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; 1234 int ret; 1235 1236 if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { 1237 ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); 1238 if (!ret) 1239 clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1240 } 1241 } 1242 1243 static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id, 1244 u16 stream_id, bool enable) 1245 { 1246 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1247 struct hclgevf_desc desc; 1248 1249 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1250 1251 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1252 false); 1253 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1254 req->stream_id = cpu_to_le16(stream_id); 1255 if (enable) 1256 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1257 1258 return hclgevf_cmd_send(&hdev->hw, &desc, 1); 1259 } 1260 1261 static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable) 1262 { 1263 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1264 int ret; 1265 u16 i; 1266 1267 for (i = 0; i < handle->kinfo.num_tqps; i++) { 1268 ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable); 1269 if (ret) 1270 return ret; 1271 } 1272 1273 return 0; 1274 } 1275 1276 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1277 { 1278 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1279 struct hclgevf_tqp *tqp; 1280 int i; 1281 1282 for (i = 0; i < kinfo->num_tqps; i++) { 1283 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1284 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1285 } 1286 } 1287 1288 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 1289 { 1290 struct hclge_vf_to_pf_msg send_msg; 1291 u8 host_mac[ETH_ALEN]; 1292 int status; 1293 1294 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 1295 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 1296 ETH_ALEN); 1297 if (status) { 1298 dev_err(&hdev->pdev->dev, 1299 "fail to get VF MAC from host %d", status); 1300 return status; 1301 } 1302 1303 ether_addr_copy(p, host_mac); 1304 1305 return 0; 1306 } 1307 1308 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1309 { 1310 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1311 u8 host_mac_addr[ETH_ALEN]; 1312 1313 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 1314 return; 1315 1316 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 1317 if (hdev->has_pf_mac) 1318 ether_addr_copy(p, host_mac_addr); 1319 else 1320 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1321 } 1322 1323 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1324 bool is_first) 1325 { 1326 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1327 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1328 struct hclge_vf_to_pf_msg send_msg; 1329 u8 *new_mac_addr = (u8 *)p; 1330 int status; 1331 1332 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 1333 send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1334 ether_addr_copy(send_msg.data, new_mac_addr); 1335 if (is_first && !hdev->has_pf_mac) 1336 eth_zero_addr(&send_msg.data[ETH_ALEN]); 1337 else 1338 ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 1339 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1340 if (!status) 1341 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1342 1343 return status; 1344 } 1345 1346 static struct hclgevf_mac_addr_node * 1347 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) 1348 { 1349 struct hclgevf_mac_addr_node *mac_node, *tmp; 1350 1351 list_for_each_entry_safe(mac_node, tmp, list, node) 1352 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 1353 return mac_node; 1354 1355 return NULL; 1356 } 1357 1358 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, 1359 enum HCLGEVF_MAC_NODE_STATE state) 1360 { 1361 switch (state) { 1362 /* from set_rx_mode or tmp_add_list */ 1363 case HCLGEVF_MAC_TO_ADD: 1364 if (mac_node->state == HCLGEVF_MAC_TO_DEL) 1365 mac_node->state = HCLGEVF_MAC_ACTIVE; 1366 break; 1367 /* only from set_rx_mode */ 1368 case HCLGEVF_MAC_TO_DEL: 1369 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1370 list_del(&mac_node->node); 1371 kfree(mac_node); 1372 } else { 1373 mac_node->state = HCLGEVF_MAC_TO_DEL; 1374 } 1375 break; 1376 /* only from tmp_add_list, the mac_node->state won't be 1377 * HCLGEVF_MAC_ACTIVE 1378 */ 1379 case HCLGEVF_MAC_ACTIVE: 1380 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1381 mac_node->state = HCLGEVF_MAC_ACTIVE; 1382 break; 1383 } 1384 } 1385 1386 static int hclgevf_update_mac_list(struct hnae3_handle *handle, 1387 enum HCLGEVF_MAC_NODE_STATE state, 1388 enum HCLGEVF_MAC_ADDR_TYPE mac_type, 1389 const unsigned char *addr) 1390 { 1391 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1392 struct hclgevf_mac_addr_node *mac_node; 1393 struct list_head *list; 1394 1395 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1396 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1397 1398 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1399 1400 /* if the mac addr is already in the mac list, no need to add a new 1401 * one into it, just check the mac addr state, convert it to a new 1402 * new state, or just remove it, or do nothing. 1403 */ 1404 mac_node = hclgevf_find_mac_node(list, addr); 1405 if (mac_node) { 1406 hclgevf_update_mac_node(mac_node, state); 1407 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1408 return 0; 1409 } 1410 /* if this address is never added, unnecessary to delete */ 1411 if (state == HCLGEVF_MAC_TO_DEL) { 1412 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1413 return -ENOENT; 1414 } 1415 1416 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 1417 if (!mac_node) { 1418 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1419 return -ENOMEM; 1420 } 1421 1422 mac_node->state = state; 1423 ether_addr_copy(mac_node->mac_addr, addr); 1424 list_add_tail(&mac_node->node, list); 1425 1426 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1427 return 0; 1428 } 1429 1430 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1431 const unsigned char *addr) 1432 { 1433 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1434 HCLGEVF_MAC_ADDR_UC, addr); 1435 } 1436 1437 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1438 const unsigned char *addr) 1439 { 1440 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1441 HCLGEVF_MAC_ADDR_UC, addr); 1442 } 1443 1444 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1445 const unsigned char *addr) 1446 { 1447 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1448 HCLGEVF_MAC_ADDR_MC, addr); 1449 } 1450 1451 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1452 const unsigned char *addr) 1453 { 1454 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1455 HCLGEVF_MAC_ADDR_MC, addr); 1456 } 1457 1458 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, 1459 struct hclgevf_mac_addr_node *mac_node, 1460 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1461 { 1462 struct hclge_vf_to_pf_msg send_msg; 1463 u8 code, subcode; 1464 1465 if (mac_type == HCLGEVF_MAC_ADDR_UC) { 1466 code = HCLGE_MBX_SET_UNICAST; 1467 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1468 subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; 1469 else 1470 subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; 1471 } else { 1472 code = HCLGE_MBX_SET_MULTICAST; 1473 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1474 subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; 1475 else 1476 subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; 1477 } 1478 1479 hclgevf_build_send_msg(&send_msg, code, subcode); 1480 ether_addr_copy(send_msg.data, mac_node->mac_addr); 1481 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1482 } 1483 1484 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, 1485 struct list_head *list, 1486 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1487 { 1488 struct hclgevf_mac_addr_node *mac_node, *tmp; 1489 int ret; 1490 1491 list_for_each_entry_safe(mac_node, tmp, list, node) { 1492 ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); 1493 if (ret) { 1494 dev_err(&hdev->pdev->dev, 1495 "failed to configure mac %pM, state = %d, ret = %d\n", 1496 mac_node->mac_addr, mac_node->state, ret); 1497 return; 1498 } 1499 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1500 mac_node->state = HCLGEVF_MAC_ACTIVE; 1501 } else { 1502 list_del(&mac_node->node); 1503 kfree(mac_node); 1504 } 1505 } 1506 } 1507 1508 static void hclgevf_sync_from_add_list(struct list_head *add_list, 1509 struct list_head *mac_list) 1510 { 1511 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1512 1513 list_for_each_entry_safe(mac_node, tmp, add_list, node) { 1514 /* if the mac address from tmp_add_list is not in the 1515 * uc/mc_mac_list, it means have received a TO_DEL request 1516 * during the time window of sending mac config request to PF 1517 * If mac_node state is ACTIVE, then change its state to TO_DEL, 1518 * then it will be removed at next time. If is TO_ADD, it means 1519 * send TO_ADD request failed, so just remove the mac node. 1520 */ 1521 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1522 if (new_node) { 1523 hclgevf_update_mac_node(new_node, mac_node->state); 1524 list_del(&mac_node->node); 1525 kfree(mac_node); 1526 } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { 1527 mac_node->state = HCLGEVF_MAC_TO_DEL; 1528 list_del(&mac_node->node); 1529 list_add_tail(&mac_node->node, mac_list); 1530 } else { 1531 list_del(&mac_node->node); 1532 kfree(mac_node); 1533 } 1534 } 1535 } 1536 1537 static void hclgevf_sync_from_del_list(struct list_head *del_list, 1538 struct list_head *mac_list) 1539 { 1540 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1541 1542 list_for_each_entry_safe(mac_node, tmp, del_list, node) { 1543 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1544 if (new_node) { 1545 /* If the mac addr is exist in the mac list, it means 1546 * received a new request TO_ADD during the time window 1547 * of sending mac addr configurrequest to PF, so just 1548 * change the mac state to ACTIVE. 1549 */ 1550 new_node->state = HCLGEVF_MAC_ACTIVE; 1551 list_del(&mac_node->node); 1552 kfree(mac_node); 1553 } else { 1554 list_del(&mac_node->node); 1555 list_add_tail(&mac_node->node, mac_list); 1556 } 1557 } 1558 } 1559 1560 static void hclgevf_clear_list(struct list_head *list) 1561 { 1562 struct hclgevf_mac_addr_node *mac_node, *tmp; 1563 1564 list_for_each_entry_safe(mac_node, tmp, list, node) { 1565 list_del(&mac_node->node); 1566 kfree(mac_node); 1567 } 1568 } 1569 1570 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, 1571 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1572 { 1573 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1574 struct list_head tmp_add_list, tmp_del_list; 1575 struct list_head *list; 1576 1577 INIT_LIST_HEAD(&tmp_add_list); 1578 INIT_LIST_HEAD(&tmp_del_list); 1579 1580 /* move the mac addr to the tmp_add_list and tmp_del_list, then 1581 * we can add/delete these mac addr outside the spin lock 1582 */ 1583 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1584 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1585 1586 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1587 1588 list_for_each_entry_safe(mac_node, tmp, list, node) { 1589 switch (mac_node->state) { 1590 case HCLGEVF_MAC_TO_DEL: 1591 list_del(&mac_node->node); 1592 list_add_tail(&mac_node->node, &tmp_del_list); 1593 break; 1594 case HCLGEVF_MAC_TO_ADD: 1595 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 1596 if (!new_node) 1597 goto stop_traverse; 1598 1599 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 1600 new_node->state = mac_node->state; 1601 list_add_tail(&new_node->node, &tmp_add_list); 1602 break; 1603 default: 1604 break; 1605 } 1606 } 1607 1608 stop_traverse: 1609 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1610 1611 /* delete first, in order to get max mac table space for adding */ 1612 hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); 1613 hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); 1614 1615 /* if some mac addresses were added/deleted fail, move back to the 1616 * mac_list, and retry at next time. 1617 */ 1618 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1619 1620 hclgevf_sync_from_del_list(&tmp_del_list, list); 1621 hclgevf_sync_from_add_list(&tmp_add_list, list); 1622 1623 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1624 } 1625 1626 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) 1627 { 1628 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); 1629 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); 1630 } 1631 1632 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) 1633 { 1634 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1635 1636 hclgevf_clear_list(&hdev->mac_table.uc_mac_list); 1637 hclgevf_clear_list(&hdev->mac_table.mc_mac_list); 1638 1639 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1640 } 1641 1642 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1643 __be16 proto, u16 vlan_id, 1644 bool is_kill) 1645 { 1646 #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 1647 #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 1648 #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 1649 1650 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1651 struct hclge_vf_to_pf_msg send_msg; 1652 int ret; 1653 1654 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1655 return -EINVAL; 1656 1657 if (proto != htons(ETH_P_8021Q)) 1658 return -EPROTONOSUPPORT; 1659 1660 /* When device is resetting or reset failed, firmware is unable to 1661 * handle mailbox. Just record the vlan id, and remove it after 1662 * reset finished. 1663 */ 1664 if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 1665 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { 1666 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1667 return -EBUSY; 1668 } 1669 1670 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1671 HCLGE_MBX_VLAN_FILTER); 1672 send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; 1673 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, 1674 sizeof(vlan_id)); 1675 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, 1676 sizeof(proto)); 1677 /* when remove hw vlan filter failed, record the vlan id, 1678 * and try to remove it from hw later, to be consistence 1679 * with stack. 1680 */ 1681 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1682 if (is_kill && ret) 1683 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1684 1685 return ret; 1686 } 1687 1688 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1689 { 1690 #define HCLGEVF_MAX_SYNC_COUNT 60 1691 struct hnae3_handle *handle = &hdev->nic; 1692 int ret, sync_cnt = 0; 1693 u16 vlan_id; 1694 1695 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1696 while (vlan_id != VLAN_N_VID) { 1697 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1698 vlan_id, true); 1699 if (ret) 1700 return; 1701 1702 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1703 sync_cnt++; 1704 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1705 return; 1706 1707 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1708 } 1709 } 1710 1711 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1712 { 1713 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1714 struct hclge_vf_to_pf_msg send_msg; 1715 1716 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1717 HCLGE_MBX_VLAN_RX_OFF_CFG); 1718 send_msg.data[0] = enable ? 1 : 0; 1719 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1720 } 1721 1722 static int hclgevf_reset_tqp(struct hnae3_handle *handle) 1723 { 1724 #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U 1725 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1726 struct hclge_vf_to_pf_msg send_msg; 1727 u8 return_status = 0; 1728 int ret; 1729 u16 i; 1730 1731 /* disable vf queue before send queue reset msg to PF */ 1732 ret = hclgevf_tqp_enable(handle, false); 1733 if (ret) { 1734 dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n", 1735 ret); 1736 return ret; 1737 } 1738 1739 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1740 1741 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status, 1742 sizeof(return_status)); 1743 if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE) 1744 return ret; 1745 1746 for (i = 1; i < handle->kinfo.num_tqps; i++) { 1747 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1748 memcpy(send_msg.data, &i, sizeof(i)); 1749 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1750 if (ret) 1751 return ret; 1752 } 1753 1754 return 0; 1755 } 1756 1757 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1758 { 1759 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1760 struct hclge_vf_to_pf_msg send_msg; 1761 1762 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1763 memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); 1764 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1765 } 1766 1767 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1768 enum hnae3_reset_notify_type type) 1769 { 1770 struct hnae3_client *client = hdev->nic_client; 1771 struct hnae3_handle *handle = &hdev->nic; 1772 int ret; 1773 1774 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1775 !client) 1776 return 0; 1777 1778 if (!client->ops->reset_notify) 1779 return -EOPNOTSUPP; 1780 1781 ret = client->ops->reset_notify(handle, type); 1782 if (ret) 1783 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1784 type, ret); 1785 1786 return ret; 1787 } 1788 1789 static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev, 1790 enum hnae3_reset_notify_type type) 1791 { 1792 struct hnae3_client *client = hdev->roce_client; 1793 struct hnae3_handle *handle = &hdev->roce; 1794 int ret; 1795 1796 if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client) 1797 return 0; 1798 1799 if (!client->ops->reset_notify) 1800 return -EOPNOTSUPP; 1801 1802 ret = client->ops->reset_notify(handle, type); 1803 if (ret) 1804 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", 1805 type, ret); 1806 return ret; 1807 } 1808 1809 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1810 { 1811 #define HCLGEVF_RESET_WAIT_US 20000 1812 #define HCLGEVF_RESET_WAIT_CNT 2000 1813 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1814 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1815 1816 u32 val; 1817 int ret; 1818 1819 if (hdev->reset_type == HNAE3_VF_RESET) 1820 ret = readl_poll_timeout(hdev->hw.io_base + 1821 HCLGEVF_VF_RST_ING, val, 1822 !(val & HCLGEVF_VF_RST_ING_BIT), 1823 HCLGEVF_RESET_WAIT_US, 1824 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1825 else 1826 ret = readl_poll_timeout(hdev->hw.io_base + 1827 HCLGEVF_RST_ING, val, 1828 !(val & HCLGEVF_RST_ING_BITS), 1829 HCLGEVF_RESET_WAIT_US, 1830 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1831 1832 /* hardware completion status should be available by this time */ 1833 if (ret) { 1834 dev_err(&hdev->pdev->dev, 1835 "couldn't get reset done status from h/w, timeout!\n"); 1836 return ret; 1837 } 1838 1839 /* we will wait a bit more to let reset of the stack to complete. This 1840 * might happen in case reset assertion was made by PF. Yes, this also 1841 * means we might end up waiting bit more even for VF reset. 1842 */ 1843 msleep(5000); 1844 1845 return 0; 1846 } 1847 1848 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 1849 { 1850 u32 reg_val; 1851 1852 reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); 1853 if (enable) 1854 reg_val |= HCLGEVF_NIC_SW_RST_RDY; 1855 else 1856 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 1857 1858 hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 1859 reg_val); 1860 } 1861 1862 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1863 { 1864 int ret; 1865 1866 /* uninitialize the nic client */ 1867 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1868 if (ret) 1869 return ret; 1870 1871 /* re-initialize the hclge device */ 1872 ret = hclgevf_reset_hdev(hdev); 1873 if (ret) { 1874 dev_err(&hdev->pdev->dev, 1875 "hclge device re-init failed, VF is disabled!\n"); 1876 return ret; 1877 } 1878 1879 /* bring up the nic client again */ 1880 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1881 if (ret) 1882 return ret; 1883 1884 /* clear handshake status with IMP */ 1885 hclgevf_reset_handshake(hdev, false); 1886 1887 /* bring up the nic to enable TX/RX again */ 1888 return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1889 } 1890 1891 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1892 { 1893 #define HCLGEVF_RESET_SYNC_TIME 100 1894 1895 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1896 struct hclge_vf_to_pf_msg send_msg; 1897 int ret; 1898 1899 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1900 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1901 if (ret) { 1902 dev_err(&hdev->pdev->dev, 1903 "failed to assert VF reset, ret = %d\n", ret); 1904 return ret; 1905 } 1906 hdev->rst_stats.vf_func_rst_cnt++; 1907 } 1908 1909 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1910 /* inform hardware that preparatory work is done */ 1911 msleep(HCLGEVF_RESET_SYNC_TIME); 1912 hclgevf_reset_handshake(hdev, true); 1913 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n", 1914 hdev->reset_type); 1915 1916 return 0; 1917 } 1918 1919 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 1920 { 1921 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 1922 hdev->rst_stats.vf_func_rst_cnt); 1923 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1924 hdev->rst_stats.flr_rst_cnt); 1925 dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 1926 hdev->rst_stats.vf_rst_cnt); 1927 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1928 hdev->rst_stats.rst_done_cnt); 1929 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1930 hdev->rst_stats.hw_rst_done_cnt); 1931 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1932 hdev->rst_stats.rst_cnt); 1933 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1934 hdev->rst_stats.rst_fail_cnt); 1935 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1936 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 1937 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 1938 hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG)); 1939 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1940 hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); 1941 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1942 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 1943 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1944 } 1945 1946 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1947 { 1948 /* recover handshake status with IMP when reset fail */ 1949 hclgevf_reset_handshake(hdev, true); 1950 hdev->rst_stats.rst_fail_cnt++; 1951 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1952 hdev->rst_stats.rst_fail_cnt); 1953 1954 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1955 set_bit(hdev->reset_type, &hdev->reset_pending); 1956 1957 if (hclgevf_is_reset_pending(hdev)) { 1958 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1959 hclgevf_reset_task_schedule(hdev); 1960 } else { 1961 set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1962 hclgevf_dump_rst_info(hdev); 1963 } 1964 } 1965 1966 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 1967 { 1968 int ret; 1969 1970 hdev->rst_stats.rst_cnt++; 1971 1972 /* perform reset of the stack & ae device for a client */ 1973 ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); 1974 if (ret) 1975 return ret; 1976 1977 rtnl_lock(); 1978 /* bring down the nic to stop any ongoing TX/RX */ 1979 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1980 rtnl_unlock(); 1981 if (ret) 1982 return ret; 1983 1984 return hclgevf_reset_prepare_wait(hdev); 1985 } 1986 1987 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 1988 { 1989 int ret; 1990 1991 hdev->rst_stats.hw_rst_done_cnt++; 1992 ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); 1993 if (ret) 1994 return ret; 1995 1996 rtnl_lock(); 1997 /* now, re-initialize the nic client and ae device */ 1998 ret = hclgevf_reset_stack(hdev); 1999 rtnl_unlock(); 2000 if (ret) { 2001 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 2002 return ret; 2003 } 2004 2005 ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT); 2006 /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1 2007 * times 2008 */ 2009 if (ret && 2010 hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1) 2011 return ret; 2012 2013 ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT); 2014 if (ret) 2015 return ret; 2016 2017 hdev->last_reset_time = jiffies; 2018 hdev->rst_stats.rst_done_cnt++; 2019 hdev->rst_stats.rst_fail_cnt = 0; 2020 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2021 2022 return 0; 2023 } 2024 2025 static void hclgevf_reset(struct hclgevf_dev *hdev) 2026 { 2027 if (hclgevf_reset_prepare(hdev)) 2028 goto err_reset; 2029 2030 /* check if VF could successfully fetch the hardware reset completion 2031 * status from the hardware 2032 */ 2033 if (hclgevf_reset_wait(hdev)) { 2034 /* can't do much in this situation, will disable VF */ 2035 dev_err(&hdev->pdev->dev, 2036 "failed to fetch H/W reset completion status\n"); 2037 goto err_reset; 2038 } 2039 2040 if (hclgevf_reset_rebuild(hdev)) 2041 goto err_reset; 2042 2043 return; 2044 2045 err_reset: 2046 hclgevf_reset_err_handle(hdev); 2047 } 2048 2049 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 2050 unsigned long *addr) 2051 { 2052 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 2053 2054 /* return the highest priority reset level amongst all */ 2055 if (test_bit(HNAE3_VF_RESET, addr)) { 2056 rst_level = HNAE3_VF_RESET; 2057 clear_bit(HNAE3_VF_RESET, addr); 2058 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 2059 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2060 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 2061 rst_level = HNAE3_VF_FULL_RESET; 2062 clear_bit(HNAE3_VF_FULL_RESET, addr); 2063 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2064 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 2065 rst_level = HNAE3_VF_PF_FUNC_RESET; 2066 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 2067 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2068 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 2069 rst_level = HNAE3_VF_FUNC_RESET; 2070 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2071 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 2072 rst_level = HNAE3_FLR_RESET; 2073 clear_bit(HNAE3_FLR_RESET, addr); 2074 } 2075 2076 return rst_level; 2077 } 2078 2079 static void hclgevf_reset_event(struct pci_dev *pdev, 2080 struct hnae3_handle *handle) 2081 { 2082 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2083 struct hclgevf_dev *hdev = ae_dev->priv; 2084 2085 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 2086 2087 if (hdev->default_reset_request) 2088 hdev->reset_level = 2089 hclgevf_get_reset_level(hdev, 2090 &hdev->default_reset_request); 2091 else 2092 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2093 2094 /* reset of this VF requested */ 2095 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 2096 hclgevf_reset_task_schedule(hdev); 2097 2098 hdev->last_reset_time = jiffies; 2099 } 2100 2101 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 2102 enum hnae3_reset_type rst_type) 2103 { 2104 struct hclgevf_dev *hdev = ae_dev->priv; 2105 2106 set_bit(rst_type, &hdev->default_reset_request); 2107 } 2108 2109 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 2110 { 2111 writel(en ? 1 : 0, vector->addr); 2112 } 2113 2114 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) 2115 { 2116 #define HCLGEVF_FLR_RETRY_WAIT_MS 500 2117 #define HCLGEVF_FLR_RETRY_CNT 5 2118 2119 struct hclgevf_dev *hdev = ae_dev->priv; 2120 int retry_cnt = 0; 2121 int ret; 2122 2123 retry: 2124 down(&hdev->reset_sem); 2125 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2126 hdev->reset_type = HNAE3_FLR_RESET; 2127 ret = hclgevf_reset_prepare(hdev); 2128 if (ret) { 2129 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", 2130 ret); 2131 if (hdev->reset_pending || 2132 retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) { 2133 dev_err(&hdev->pdev->dev, 2134 "reset_pending:0x%lx, retry_cnt:%d\n", 2135 hdev->reset_pending, retry_cnt); 2136 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2137 up(&hdev->reset_sem); 2138 msleep(HCLGEVF_FLR_RETRY_WAIT_MS); 2139 goto retry; 2140 } 2141 } 2142 2143 /* disable misc vector before FLR done */ 2144 hclgevf_enable_vector(&hdev->misc_vector, false); 2145 hdev->rst_stats.flr_rst_cnt++; 2146 } 2147 2148 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) 2149 { 2150 struct hclgevf_dev *hdev = ae_dev->priv; 2151 int ret; 2152 2153 hclgevf_enable_vector(&hdev->misc_vector, true); 2154 2155 ret = hclgevf_reset_rebuild(hdev); 2156 if (ret) 2157 dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 2158 ret); 2159 2160 hdev->reset_type = HNAE3_NONE_RESET; 2161 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2162 up(&hdev->reset_sem); 2163 } 2164 2165 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 2166 { 2167 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2168 2169 return hdev->fw_version; 2170 } 2171 2172 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 2173 { 2174 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 2175 2176 vector->vector_irq = pci_irq_vector(hdev->pdev, 2177 HCLGEVF_MISC_VECTOR_NUM); 2178 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 2179 /* vector status always valid for Vector 0 */ 2180 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 2181 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 2182 2183 hdev->num_msi_left -= 1; 2184 hdev->num_msi_used += 1; 2185 } 2186 2187 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 2188 { 2189 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2190 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 2191 &hdev->state)) 2192 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2193 } 2194 2195 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 2196 { 2197 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2198 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 2199 &hdev->state)) 2200 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2201 } 2202 2203 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 2204 unsigned long delay) 2205 { 2206 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2207 !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2208 mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 2209 } 2210 2211 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 2212 { 2213 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 2214 2215 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 2216 return; 2217 2218 down(&hdev->reset_sem); 2219 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2220 2221 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 2222 &hdev->reset_state)) { 2223 /* PF has initmated that it is about to reset the hardware. 2224 * We now have to poll & check if hardware has actually 2225 * completed the reset sequence. On hardware reset completion, 2226 * VF needs to reset the client and ae device. 2227 */ 2228 hdev->reset_attempts = 0; 2229 2230 hdev->last_reset_time = jiffies; 2231 while ((hdev->reset_type = 2232 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 2233 != HNAE3_NONE_RESET) 2234 hclgevf_reset(hdev); 2235 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 2236 &hdev->reset_state)) { 2237 /* we could be here when either of below happens: 2238 * 1. reset was initiated due to watchdog timeout caused by 2239 * a. IMP was earlier reset and our TX got choked down and 2240 * which resulted in watchdog reacting and inducing VF 2241 * reset. This also means our cmdq would be unreliable. 2242 * b. problem in TX due to other lower layer(example link 2243 * layer not functioning properly etc.) 2244 * 2. VF reset might have been initiated due to some config 2245 * change. 2246 * 2247 * NOTE: Theres no clear way to detect above cases than to react 2248 * to the response of PF for this reset request. PF will ack the 2249 * 1b and 2. cases but we will not get any intimation about 1a 2250 * from PF as cmdq would be in unreliable state i.e. mailbox 2251 * communication between PF and VF would be broken. 2252 * 2253 * if we are never geting into pending state it means either: 2254 * 1. PF is not receiving our request which could be due to IMP 2255 * reset 2256 * 2. PF is screwed 2257 * We cannot do much for 2. but to check first we can try reset 2258 * our PCIe + stack and see if it alleviates the problem. 2259 */ 2260 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 2261 /* prepare for full reset of stack + pcie interface */ 2262 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 2263 2264 /* "defer" schedule the reset task again */ 2265 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2266 } else { 2267 hdev->reset_attempts++; 2268 2269 set_bit(hdev->reset_level, &hdev->reset_pending); 2270 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2271 } 2272 hclgevf_reset_task_schedule(hdev); 2273 } 2274 2275 hdev->reset_type = HNAE3_NONE_RESET; 2276 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2277 up(&hdev->reset_sem); 2278 } 2279 2280 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 2281 { 2282 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2283 return; 2284 2285 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 2286 return; 2287 2288 hclgevf_mbx_async_handler(hdev); 2289 2290 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2291 } 2292 2293 static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 2294 { 2295 struct hclge_vf_to_pf_msg send_msg; 2296 int ret; 2297 2298 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) 2299 return; 2300 2301 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 2302 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2303 if (ret) 2304 dev_err(&hdev->pdev->dev, 2305 "VF sends keep alive cmd failed(=%d)\n", ret); 2306 } 2307 2308 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 2309 { 2310 unsigned long delta = round_jiffies_relative(HZ); 2311 struct hnae3_handle *handle = &hdev->nic; 2312 2313 if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2314 return; 2315 2316 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 2317 delta = jiffies - hdev->last_serv_processed; 2318 2319 if (delta < round_jiffies_relative(HZ)) { 2320 delta = round_jiffies_relative(HZ) - delta; 2321 goto out; 2322 } 2323 } 2324 2325 hdev->serv_processed_cnt++; 2326 if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 2327 hclgevf_keep_alive(hdev); 2328 2329 if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 2330 hdev->last_serv_processed = jiffies; 2331 goto out; 2332 } 2333 2334 if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 2335 hclgevf_tqps_update_stats(handle); 2336 2337 /* request the link status from the PF. PF would be able to tell VF 2338 * about such updates in future so we might remove this later 2339 */ 2340 hclgevf_request_link_info(hdev); 2341 2342 hclgevf_update_link_mode(hdev); 2343 2344 hclgevf_sync_vlan_filter(hdev); 2345 2346 hclgevf_sync_mac_table(hdev); 2347 2348 hclgevf_sync_promisc_mode(hdev); 2349 2350 hdev->last_serv_processed = jiffies; 2351 2352 out: 2353 hclgevf_task_schedule(hdev, delta); 2354 } 2355 2356 static void hclgevf_service_task(struct work_struct *work) 2357 { 2358 struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 2359 service_task.work); 2360 2361 hclgevf_reset_service_task(hdev); 2362 hclgevf_mailbox_service_task(hdev); 2363 hclgevf_periodic_service_task(hdev); 2364 2365 /* Handle reset and mbx again in case periodical task delays the 2366 * handling by calling hclgevf_task_schedule() in 2367 * hclgevf_periodic_service_task() 2368 */ 2369 hclgevf_reset_service_task(hdev); 2370 hclgevf_mailbox_service_task(hdev); 2371 } 2372 2373 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 2374 { 2375 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 2376 } 2377 2378 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 2379 u32 *clearval) 2380 { 2381 u32 val, cmdq_stat_reg, rst_ing_reg; 2382 2383 /* fetch the events from their corresponding regs */ 2384 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 2385 HCLGEVF_VECTOR0_CMDQ_STATE_REG); 2386 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 2387 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2388 dev_info(&hdev->pdev->dev, 2389 "receive reset interrupt 0x%x!\n", rst_ing_reg); 2390 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 2391 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2392 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 2393 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 2394 hdev->rst_stats.vf_rst_cnt++; 2395 /* set up VF hardware reset status, its PF will clear 2396 * this status when PF has initialized done. 2397 */ 2398 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 2399 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 2400 val | HCLGEVF_VF_RST_ING_BIT); 2401 return HCLGEVF_VECTOR0_EVENT_RST; 2402 } 2403 2404 /* check for vector0 mailbox(=CMDQ RX) event source */ 2405 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 2406 /* for revision 0x21, clearing interrupt is writing bit 0 2407 * to the clear register, writing bit 1 means to keep the 2408 * old value. 2409 * for revision 0x20, the clear register is a read & write 2410 * register, so we should just write 0 to the bit we are 2411 * handling, and keep other bits as cmdq_stat_reg. 2412 */ 2413 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) 2414 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2415 else 2416 *clearval = cmdq_stat_reg & 2417 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2418 2419 return HCLGEVF_VECTOR0_EVENT_MBX; 2420 } 2421 2422 /* print other vector0 event source */ 2423 dev_info(&hdev->pdev->dev, 2424 "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2425 cmdq_stat_reg); 2426 2427 return HCLGEVF_VECTOR0_EVENT_OTHER; 2428 } 2429 2430 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2431 { 2432 enum hclgevf_evt_cause event_cause; 2433 struct hclgevf_dev *hdev = data; 2434 u32 clearval; 2435 2436 hclgevf_enable_vector(&hdev->misc_vector, false); 2437 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2438 2439 switch (event_cause) { 2440 case HCLGEVF_VECTOR0_EVENT_RST: 2441 hclgevf_reset_task_schedule(hdev); 2442 break; 2443 case HCLGEVF_VECTOR0_EVENT_MBX: 2444 hclgevf_mbx_handler(hdev); 2445 break; 2446 default: 2447 break; 2448 } 2449 2450 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 2451 hclgevf_clear_event_cause(hdev, clearval); 2452 hclgevf_enable_vector(&hdev->misc_vector, true); 2453 } 2454 2455 return IRQ_HANDLED; 2456 } 2457 2458 static int hclgevf_configure(struct hclgevf_dev *hdev) 2459 { 2460 int ret; 2461 2462 /* get current port based vlan state from PF */ 2463 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2464 if (ret) 2465 return ret; 2466 2467 /* get queue configuration from PF */ 2468 ret = hclgevf_get_queue_info(hdev); 2469 if (ret) 2470 return ret; 2471 2472 /* get queue depth info from PF */ 2473 ret = hclgevf_get_queue_depth(hdev); 2474 if (ret) 2475 return ret; 2476 2477 ret = hclgevf_get_pf_media_type(hdev); 2478 if (ret) 2479 return ret; 2480 2481 /* get tc configuration from PF */ 2482 return hclgevf_get_tc_info(hdev); 2483 } 2484 2485 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 2486 { 2487 struct pci_dev *pdev = ae_dev->pdev; 2488 struct hclgevf_dev *hdev; 2489 2490 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 2491 if (!hdev) 2492 return -ENOMEM; 2493 2494 hdev->pdev = pdev; 2495 hdev->ae_dev = ae_dev; 2496 ae_dev->priv = hdev; 2497 2498 return 0; 2499 } 2500 2501 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2502 { 2503 struct hnae3_handle *roce = &hdev->roce; 2504 struct hnae3_handle *nic = &hdev->nic; 2505 2506 roce->rinfo.num_vectors = hdev->num_roce_msix; 2507 2508 if (hdev->num_msi_left < roce->rinfo.num_vectors || 2509 hdev->num_msi_left == 0) 2510 return -EINVAL; 2511 2512 roce->rinfo.base_vector = hdev->roce_base_vector; 2513 2514 roce->rinfo.netdev = nic->kinfo.netdev; 2515 roce->rinfo.roce_io_base = hdev->hw.io_base; 2516 roce->rinfo.roce_mem_base = hdev->hw.mem_base; 2517 2518 roce->pdev = nic->pdev; 2519 roce->ae_algo = nic->ae_algo; 2520 roce->numa_node_mask = nic->numa_node_mask; 2521 2522 return 0; 2523 } 2524 2525 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 2526 { 2527 struct hclgevf_cfg_gro_status_cmd *req; 2528 struct hclgevf_desc desc; 2529 int ret; 2530 2531 if (!hnae3_dev_gro_supported(hdev)) 2532 return 0; 2533 2534 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2535 false); 2536 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2537 2538 req->gro_en = en ? 1 : 0; 2539 2540 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2541 if (ret) 2542 dev_err(&hdev->pdev->dev, 2543 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2544 2545 return ret; 2546 } 2547 2548 static int hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) 2549 { 2550 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size; 2551 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2552 struct hclgevf_rss_tuple_cfg *tuple_sets; 2553 u32 i; 2554 2555 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 2556 rss_cfg->rss_size = hdev->nic.kinfo.rss_size; 2557 tuple_sets = &rss_cfg->rss_tuple_sets; 2558 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2559 u8 *rss_ind_tbl; 2560 2561 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 2562 2563 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size, 2564 sizeof(*rss_ind_tbl), GFP_KERNEL); 2565 if (!rss_ind_tbl) 2566 return -ENOMEM; 2567 2568 rss_cfg->rss_indirection_tbl = rss_ind_tbl; 2569 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2570 HCLGEVF_RSS_KEY_SIZE); 2571 2572 tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2573 tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2574 tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2575 tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2576 tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2577 tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2578 tuple_sets->ipv6_sctp_en = 2579 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ? 2580 HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT : 2581 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2582 tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2583 } 2584 2585 /* Initialize RSS indirect table */ 2586 for (i = 0; i < rss_ind_tbl_size; i++) 2587 rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 2588 2589 return 0; 2590 } 2591 2592 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2593 { 2594 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2595 int ret; 2596 2597 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2598 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 2599 rss_cfg->rss_hash_key); 2600 if (ret) 2601 return ret; 2602 2603 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2604 if (ret) 2605 return ret; 2606 } 2607 2608 ret = hclgevf_set_rss_indir_table(hdev); 2609 if (ret) 2610 return ret; 2611 2612 return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); 2613 } 2614 2615 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2616 { 2617 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2618 false); 2619 } 2620 2621 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2622 { 2623 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2624 2625 unsigned long last = hdev->serv_processed_cnt; 2626 int i = 0; 2627 2628 while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2629 i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2630 last == hdev->serv_processed_cnt) 2631 usleep_range(1, 1); 2632 } 2633 2634 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2635 { 2636 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2637 2638 if (enable) { 2639 hclgevf_task_schedule(hdev, 0); 2640 } else { 2641 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2642 2643 /* flush memory to make sure DOWN is seen by service task */ 2644 smp_mb__before_atomic(); 2645 hclgevf_flush_link_update(hdev); 2646 } 2647 } 2648 2649 static int hclgevf_ae_start(struct hnae3_handle *handle) 2650 { 2651 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2652 2653 hclgevf_reset_tqp_stats(handle); 2654 2655 hclgevf_request_link_info(hdev); 2656 2657 hclgevf_update_link_mode(hdev); 2658 2659 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2660 2661 return 0; 2662 } 2663 2664 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2665 { 2666 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2667 2668 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2669 2670 if (hdev->reset_type != HNAE3_VF_RESET) 2671 hclgevf_reset_tqp(handle); 2672 2673 hclgevf_reset_tqp_stats(handle); 2674 hclgevf_update_link_status(hdev, 0); 2675 } 2676 2677 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2678 { 2679 #define HCLGEVF_STATE_ALIVE 1 2680 #define HCLGEVF_STATE_NOT_ALIVE 0 2681 2682 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2683 struct hclge_vf_to_pf_msg send_msg; 2684 2685 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2686 send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2687 HCLGEVF_STATE_NOT_ALIVE; 2688 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2689 } 2690 2691 static int hclgevf_client_start(struct hnae3_handle *handle) 2692 { 2693 return hclgevf_set_alive(handle, true); 2694 } 2695 2696 static void hclgevf_client_stop(struct hnae3_handle *handle) 2697 { 2698 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2699 int ret; 2700 2701 ret = hclgevf_set_alive(handle, false); 2702 if (ret) 2703 dev_warn(&hdev->pdev->dev, 2704 "%s failed %d\n", __func__, ret); 2705 } 2706 2707 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2708 { 2709 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2710 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2711 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2712 2713 INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 2714 2715 mutex_init(&hdev->mbx_resp.mbx_mutex); 2716 sema_init(&hdev->reset_sem, 1); 2717 2718 spin_lock_init(&hdev->mac_table.mac_list_lock); 2719 INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); 2720 INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); 2721 2722 /* bring the device down */ 2723 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2724 } 2725 2726 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2727 { 2728 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2729 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2730 2731 if (hdev->service_task.work.func) 2732 cancel_delayed_work_sync(&hdev->service_task); 2733 2734 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2735 } 2736 2737 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2738 { 2739 struct pci_dev *pdev = hdev->pdev; 2740 int vectors; 2741 int i; 2742 2743 if (hnae3_dev_roce_supported(hdev)) 2744 vectors = pci_alloc_irq_vectors(pdev, 2745 hdev->roce_base_msix_offset + 1, 2746 hdev->num_msi, 2747 PCI_IRQ_MSIX); 2748 else 2749 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2750 hdev->num_msi, 2751 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2752 2753 if (vectors < 0) { 2754 dev_err(&pdev->dev, 2755 "failed(%d) to allocate MSI/MSI-X vectors\n", 2756 vectors); 2757 return vectors; 2758 } 2759 if (vectors < hdev->num_msi) 2760 dev_warn(&hdev->pdev->dev, 2761 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2762 hdev->num_msi, vectors); 2763 2764 hdev->num_msi = vectors; 2765 hdev->num_msi_left = vectors; 2766 2767 hdev->base_msi_vector = pdev->irq; 2768 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2769 2770 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2771 sizeof(u16), GFP_KERNEL); 2772 if (!hdev->vector_status) { 2773 pci_free_irq_vectors(pdev); 2774 return -ENOMEM; 2775 } 2776 2777 for (i = 0; i < hdev->num_msi; i++) 2778 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2779 2780 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2781 sizeof(int), GFP_KERNEL); 2782 if (!hdev->vector_irq) { 2783 devm_kfree(&pdev->dev, hdev->vector_status); 2784 pci_free_irq_vectors(pdev); 2785 return -ENOMEM; 2786 } 2787 2788 return 0; 2789 } 2790 2791 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2792 { 2793 struct pci_dev *pdev = hdev->pdev; 2794 2795 devm_kfree(&pdev->dev, hdev->vector_status); 2796 devm_kfree(&pdev->dev, hdev->vector_irq); 2797 pci_free_irq_vectors(pdev); 2798 } 2799 2800 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2801 { 2802 int ret; 2803 2804 hclgevf_get_misc_vector(hdev); 2805 2806 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2807 HCLGEVF_NAME, pci_name(hdev->pdev)); 2808 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2809 0, hdev->misc_vector.name, hdev); 2810 if (ret) { 2811 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2812 hdev->misc_vector.vector_irq); 2813 return ret; 2814 } 2815 2816 hclgevf_clear_event_cause(hdev, 0); 2817 2818 /* enable misc. vector(vector 0) */ 2819 hclgevf_enable_vector(&hdev->misc_vector, true); 2820 2821 return ret; 2822 } 2823 2824 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2825 { 2826 /* disable misc vector(vector 0) */ 2827 hclgevf_enable_vector(&hdev->misc_vector, false); 2828 synchronize_irq(hdev->misc_vector.vector_irq); 2829 free_irq(hdev->misc_vector.vector_irq, hdev); 2830 hclgevf_free_vector(hdev, 0); 2831 } 2832 2833 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2834 { 2835 struct device *dev = &hdev->pdev->dev; 2836 2837 dev_info(dev, "VF info begin:\n"); 2838 2839 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2840 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2841 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2842 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2843 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2844 dev_info(dev, "PF media type of this VF: %u\n", 2845 hdev->hw.mac.media_type); 2846 2847 dev_info(dev, "VF info end.\n"); 2848 } 2849 2850 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2851 struct hnae3_client *client) 2852 { 2853 struct hclgevf_dev *hdev = ae_dev->priv; 2854 int rst_cnt = hdev->rst_stats.rst_cnt; 2855 int ret; 2856 2857 ret = client->ops->init_instance(&hdev->nic); 2858 if (ret) 2859 return ret; 2860 2861 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2862 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 2863 rst_cnt != hdev->rst_stats.rst_cnt) { 2864 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2865 2866 client->ops->uninit_instance(&hdev->nic, 0); 2867 return -EBUSY; 2868 } 2869 2870 hnae3_set_client_init_flag(client, ae_dev, 1); 2871 2872 if (netif_msg_drv(&hdev->nic)) 2873 hclgevf_info_show(hdev); 2874 2875 return 0; 2876 } 2877 2878 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2879 struct hnae3_client *client) 2880 { 2881 struct hclgevf_dev *hdev = ae_dev->priv; 2882 int ret; 2883 2884 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2885 !hdev->nic_client) 2886 return 0; 2887 2888 ret = hclgevf_init_roce_base_info(hdev); 2889 if (ret) 2890 return ret; 2891 2892 ret = client->ops->init_instance(&hdev->roce); 2893 if (ret) 2894 return ret; 2895 2896 set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2897 hnae3_set_client_init_flag(client, ae_dev, 1); 2898 2899 return 0; 2900 } 2901 2902 static int hclgevf_init_client_instance(struct hnae3_client *client, 2903 struct hnae3_ae_dev *ae_dev) 2904 { 2905 struct hclgevf_dev *hdev = ae_dev->priv; 2906 int ret; 2907 2908 switch (client->type) { 2909 case HNAE3_CLIENT_KNIC: 2910 hdev->nic_client = client; 2911 hdev->nic.client = client; 2912 2913 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2914 if (ret) 2915 goto clear_nic; 2916 2917 ret = hclgevf_init_roce_client_instance(ae_dev, 2918 hdev->roce_client); 2919 if (ret) 2920 goto clear_roce; 2921 2922 break; 2923 case HNAE3_CLIENT_ROCE: 2924 if (hnae3_dev_roce_supported(hdev)) { 2925 hdev->roce_client = client; 2926 hdev->roce.client = client; 2927 } 2928 2929 ret = hclgevf_init_roce_client_instance(ae_dev, client); 2930 if (ret) 2931 goto clear_roce; 2932 2933 break; 2934 default: 2935 return -EINVAL; 2936 } 2937 2938 return 0; 2939 2940 clear_nic: 2941 hdev->nic_client = NULL; 2942 hdev->nic.client = NULL; 2943 return ret; 2944 clear_roce: 2945 hdev->roce_client = NULL; 2946 hdev->roce.client = NULL; 2947 return ret; 2948 } 2949 2950 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2951 struct hnae3_ae_dev *ae_dev) 2952 { 2953 struct hclgevf_dev *hdev = ae_dev->priv; 2954 2955 /* un-init roce, if it exists */ 2956 if (hdev->roce_client) { 2957 clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2958 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2959 hdev->roce_client = NULL; 2960 hdev->roce.client = NULL; 2961 } 2962 2963 /* un-init nic/unic, if this was not called by roce client */ 2964 if (client->ops->uninit_instance && hdev->nic_client && 2965 client->type != HNAE3_CLIENT_ROCE) { 2966 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2967 2968 client->ops->uninit_instance(&hdev->nic, 0); 2969 hdev->nic_client = NULL; 2970 hdev->nic.client = NULL; 2971 } 2972 } 2973 2974 static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev) 2975 { 2976 #define HCLGEVF_MEM_BAR 4 2977 2978 struct pci_dev *pdev = hdev->pdev; 2979 struct hclgevf_hw *hw = &hdev->hw; 2980 2981 /* for device does not have device memory, return directly */ 2982 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR))) 2983 return 0; 2984 2985 hw->mem_base = devm_ioremap_wc(&pdev->dev, 2986 pci_resource_start(pdev, 2987 HCLGEVF_MEM_BAR), 2988 pci_resource_len(pdev, HCLGEVF_MEM_BAR)); 2989 if (!hw->mem_base) { 2990 dev_err(&pdev->dev, "failed to map device memory\n"); 2991 return -EFAULT; 2992 } 2993 2994 return 0; 2995 } 2996 2997 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2998 { 2999 struct pci_dev *pdev = hdev->pdev; 3000 struct hclgevf_hw *hw; 3001 int ret; 3002 3003 ret = pci_enable_device(pdev); 3004 if (ret) { 3005 dev_err(&pdev->dev, "failed to enable PCI device\n"); 3006 return ret; 3007 } 3008 3009 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3010 if (ret) { 3011 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 3012 goto err_disable_device; 3013 } 3014 3015 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 3016 if (ret) { 3017 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 3018 goto err_disable_device; 3019 } 3020 3021 pci_set_master(pdev); 3022 hw = &hdev->hw; 3023 hw->hdev = hdev; 3024 hw->io_base = pci_iomap(pdev, 2, 0); 3025 if (!hw->io_base) { 3026 dev_err(&pdev->dev, "can't map configuration register space\n"); 3027 ret = -ENOMEM; 3028 goto err_clr_master; 3029 } 3030 3031 ret = hclgevf_dev_mem_map(hdev); 3032 if (ret) 3033 goto err_unmap_io_base; 3034 3035 return 0; 3036 3037 err_unmap_io_base: 3038 pci_iounmap(pdev, hdev->hw.io_base); 3039 err_clr_master: 3040 pci_clear_master(pdev); 3041 pci_release_regions(pdev); 3042 err_disable_device: 3043 pci_disable_device(pdev); 3044 3045 return ret; 3046 } 3047 3048 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 3049 { 3050 struct pci_dev *pdev = hdev->pdev; 3051 3052 if (hdev->hw.mem_base) 3053 devm_iounmap(&pdev->dev, hdev->hw.mem_base); 3054 3055 pci_iounmap(pdev, hdev->hw.io_base); 3056 pci_clear_master(pdev); 3057 pci_release_regions(pdev); 3058 pci_disable_device(pdev); 3059 } 3060 3061 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 3062 { 3063 struct hclgevf_query_res_cmd *req; 3064 struct hclgevf_desc desc; 3065 int ret; 3066 3067 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 3068 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 3069 if (ret) { 3070 dev_err(&hdev->pdev->dev, 3071 "query vf resource failed, ret = %d.\n", ret); 3072 return ret; 3073 } 3074 3075 req = (struct hclgevf_query_res_cmd *)desc.data; 3076 3077 if (hnae3_dev_roce_supported(hdev)) { 3078 hdev->roce_base_msix_offset = 3079 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 3080 HCLGEVF_MSIX_OFT_ROCEE_M, 3081 HCLGEVF_MSIX_OFT_ROCEE_S); 3082 hdev->num_roce_msix = 3083 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 3084 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 3085 3086 /* nic's msix numbers is always equals to the roce's. */ 3087 hdev->num_nic_msix = hdev->num_roce_msix; 3088 3089 /* VF should have NIC vectors and Roce vectors, NIC vectors 3090 * are queued before Roce vectors. The offset is fixed to 64. 3091 */ 3092 hdev->num_msi = hdev->num_roce_msix + 3093 hdev->roce_base_msix_offset; 3094 } else { 3095 hdev->num_msi = 3096 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 3097 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 3098 3099 hdev->num_nic_msix = hdev->num_msi; 3100 } 3101 3102 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 3103 dev_err(&hdev->pdev->dev, 3104 "Just %u msi resources, not enough for vf(min:2).\n", 3105 hdev->num_nic_msix); 3106 return -EINVAL; 3107 } 3108 3109 return 0; 3110 } 3111 3112 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) 3113 { 3114 #define HCLGEVF_MAX_NON_TSO_BD_NUM 8U 3115 3116 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3117 3118 ae_dev->dev_specs.max_non_tso_bd_num = 3119 HCLGEVF_MAX_NON_TSO_BD_NUM; 3120 ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 3121 ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE; 3122 ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 3123 ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME; 3124 } 3125 3126 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, 3127 struct hclgevf_desc *desc) 3128 { 3129 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3130 struct hclgevf_dev_specs_0_cmd *req0; 3131 struct hclgevf_dev_specs_1_cmd *req1; 3132 3133 req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data; 3134 req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data; 3135 3136 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; 3137 ae_dev->dev_specs.rss_ind_tbl_size = 3138 le16_to_cpu(req0->rss_ind_tbl_size); 3139 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); 3140 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); 3141 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); 3142 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); 3143 } 3144 3145 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) 3146 { 3147 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; 3148 3149 if (!dev_specs->max_non_tso_bd_num) 3150 dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM; 3151 if (!dev_specs->rss_ind_tbl_size) 3152 dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 3153 if (!dev_specs->rss_key_size) 3154 dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE; 3155 if (!dev_specs->max_int_gl) 3156 dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 3157 if (!dev_specs->max_frm_size) 3158 dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME; 3159 } 3160 3161 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) 3162 { 3163 struct hclgevf_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; 3164 int ret; 3165 int i; 3166 3167 /* set default specifications as devices lower than version V3 do not 3168 * support querying specifications from firmware. 3169 */ 3170 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 3171 hclgevf_set_default_dev_specs(hdev); 3172 return 0; 3173 } 3174 3175 for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 3176 hclgevf_cmd_setup_basic_desc(&desc[i], 3177 HCLGEVF_OPC_QUERY_DEV_SPECS, true); 3178 desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT); 3179 } 3180 hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS, 3181 true); 3182 3183 ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM); 3184 if (ret) 3185 return ret; 3186 3187 hclgevf_parse_dev_specs(hdev, desc); 3188 hclgevf_check_dev_specs(hdev); 3189 3190 return 0; 3191 } 3192 3193 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 3194 { 3195 struct pci_dev *pdev = hdev->pdev; 3196 int ret = 0; 3197 3198 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 3199 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3200 hclgevf_misc_irq_uninit(hdev); 3201 hclgevf_uninit_msi(hdev); 3202 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3203 } 3204 3205 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3206 pci_set_master(pdev); 3207 ret = hclgevf_init_msi(hdev); 3208 if (ret) { 3209 dev_err(&pdev->dev, 3210 "failed(%d) to init MSI/MSI-X\n", ret); 3211 return ret; 3212 } 3213 3214 ret = hclgevf_misc_irq_init(hdev); 3215 if (ret) { 3216 hclgevf_uninit_msi(hdev); 3217 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 3218 ret); 3219 return ret; 3220 } 3221 3222 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3223 } 3224 3225 return ret; 3226 } 3227 3228 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) 3229 { 3230 struct hclge_vf_to_pf_msg send_msg; 3231 3232 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, 3233 HCLGE_MBX_VPORT_LIST_CLEAR); 3234 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3235 } 3236 3237 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 3238 { 3239 struct pci_dev *pdev = hdev->pdev; 3240 int ret; 3241 3242 ret = hclgevf_pci_reset(hdev); 3243 if (ret) { 3244 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 3245 return ret; 3246 } 3247 3248 ret = hclgevf_cmd_init(hdev); 3249 if (ret) { 3250 dev_err(&pdev->dev, "cmd failed %d\n", ret); 3251 return ret; 3252 } 3253 3254 ret = hclgevf_rss_init_hw(hdev); 3255 if (ret) { 3256 dev_err(&hdev->pdev->dev, 3257 "failed(%d) to initialize RSS\n", ret); 3258 return ret; 3259 } 3260 3261 ret = hclgevf_config_gro(hdev, true); 3262 if (ret) 3263 return ret; 3264 3265 ret = hclgevf_init_vlan_config(hdev); 3266 if (ret) { 3267 dev_err(&hdev->pdev->dev, 3268 "failed(%d) to initialize VLAN config\n", ret); 3269 return ret; 3270 } 3271 3272 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 3273 3274 dev_info(&hdev->pdev->dev, "Reset done\n"); 3275 3276 return 0; 3277 } 3278 3279 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 3280 { 3281 struct pci_dev *pdev = hdev->pdev; 3282 int ret; 3283 3284 ret = hclgevf_pci_init(hdev); 3285 if (ret) 3286 return ret; 3287 3288 ret = hclgevf_cmd_queue_init(hdev); 3289 if (ret) 3290 goto err_cmd_queue_init; 3291 3292 ret = hclgevf_cmd_init(hdev); 3293 if (ret) 3294 goto err_cmd_init; 3295 3296 /* Get vf resource */ 3297 ret = hclgevf_query_vf_resource(hdev); 3298 if (ret) 3299 goto err_cmd_init; 3300 3301 ret = hclgevf_query_dev_specs(hdev); 3302 if (ret) { 3303 dev_err(&pdev->dev, 3304 "failed to query dev specifications, ret = %d\n", ret); 3305 goto err_cmd_init; 3306 } 3307 3308 ret = hclgevf_init_msi(hdev); 3309 if (ret) { 3310 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 3311 goto err_cmd_init; 3312 } 3313 3314 hclgevf_state_init(hdev); 3315 hdev->reset_level = HNAE3_VF_FUNC_RESET; 3316 hdev->reset_type = HNAE3_NONE_RESET; 3317 3318 ret = hclgevf_misc_irq_init(hdev); 3319 if (ret) 3320 goto err_misc_irq_init; 3321 3322 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3323 3324 ret = hclgevf_configure(hdev); 3325 if (ret) { 3326 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 3327 goto err_config; 3328 } 3329 3330 ret = hclgevf_alloc_tqps(hdev); 3331 if (ret) { 3332 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 3333 goto err_config; 3334 } 3335 3336 ret = hclgevf_set_handle_info(hdev); 3337 if (ret) 3338 goto err_config; 3339 3340 ret = hclgevf_config_gro(hdev, true); 3341 if (ret) 3342 goto err_config; 3343 3344 /* Initialize RSS for this VF */ 3345 ret = hclgevf_rss_init_cfg(hdev); 3346 if (ret) { 3347 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); 3348 goto err_config; 3349 } 3350 3351 ret = hclgevf_rss_init_hw(hdev); 3352 if (ret) { 3353 dev_err(&hdev->pdev->dev, 3354 "failed(%d) to initialize RSS\n", ret); 3355 goto err_config; 3356 } 3357 3358 /* ensure vf tbl list as empty before init*/ 3359 ret = hclgevf_clear_vport_list(hdev); 3360 if (ret) { 3361 dev_err(&pdev->dev, 3362 "failed to clear tbl list configuration, ret = %d.\n", 3363 ret); 3364 goto err_config; 3365 } 3366 3367 ret = hclgevf_init_vlan_config(hdev); 3368 if (ret) { 3369 dev_err(&hdev->pdev->dev, 3370 "failed(%d) to initialize VLAN config\n", ret); 3371 goto err_config; 3372 } 3373 3374 hdev->last_reset_time = jiffies; 3375 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 3376 HCLGEVF_DRIVER_NAME); 3377 3378 hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 3379 3380 return 0; 3381 3382 err_config: 3383 hclgevf_misc_irq_uninit(hdev); 3384 err_misc_irq_init: 3385 hclgevf_state_uninit(hdev); 3386 hclgevf_uninit_msi(hdev); 3387 err_cmd_init: 3388 hclgevf_cmd_uninit(hdev); 3389 err_cmd_queue_init: 3390 hclgevf_pci_uninit(hdev); 3391 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3392 return ret; 3393 } 3394 3395 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 3396 { 3397 struct hclge_vf_to_pf_msg send_msg; 3398 3399 hclgevf_state_uninit(hdev); 3400 3401 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 3402 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3403 3404 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3405 hclgevf_misc_irq_uninit(hdev); 3406 hclgevf_uninit_msi(hdev); 3407 } 3408 3409 hclgevf_cmd_uninit(hdev); 3410 hclgevf_pci_uninit(hdev); 3411 hclgevf_uninit_mac_list(hdev); 3412 } 3413 3414 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 3415 { 3416 struct pci_dev *pdev = ae_dev->pdev; 3417 int ret; 3418 3419 ret = hclgevf_alloc_hdev(ae_dev); 3420 if (ret) { 3421 dev_err(&pdev->dev, "hclge device allocation failed\n"); 3422 return ret; 3423 } 3424 3425 ret = hclgevf_init_hdev(ae_dev->priv); 3426 if (ret) { 3427 dev_err(&pdev->dev, "hclge device initialization failed\n"); 3428 return ret; 3429 } 3430 3431 return 0; 3432 } 3433 3434 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 3435 { 3436 struct hclgevf_dev *hdev = ae_dev->priv; 3437 3438 hclgevf_uninit_hdev(hdev); 3439 ae_dev->priv = NULL; 3440 } 3441 3442 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3443 { 3444 struct hnae3_handle *nic = &hdev->nic; 3445 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3446 3447 return min_t(u32, hdev->rss_size_max, 3448 hdev->num_tqps / kinfo->tc_info.num_tc); 3449 } 3450 3451 /** 3452 * hclgevf_get_channels - Get the current channels enabled and max supported. 3453 * @handle: hardware information for network interface 3454 * @ch: ethtool channels structure 3455 * 3456 * We don't support separate tx and rx queues as channels. The other count 3457 * represents how many queues are being used for control. max_combined counts 3458 * how many queue pairs we can support. They may not be mapped 1 to 1 with 3459 * q_vectors since we support a lot more queue pairs than q_vectors. 3460 **/ 3461 static void hclgevf_get_channels(struct hnae3_handle *handle, 3462 struct ethtool_channels *ch) 3463 { 3464 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3465 3466 ch->max_combined = hclgevf_get_max_channels(hdev); 3467 ch->other_count = 0; 3468 ch->max_other = 0; 3469 ch->combined_count = handle->kinfo.rss_size; 3470 } 3471 3472 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 3473 u16 *alloc_tqps, u16 *max_rss_size) 3474 { 3475 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3476 3477 *alloc_tqps = hdev->num_tqps; 3478 *max_rss_size = hdev->rss_size_max; 3479 } 3480 3481 static void hclgevf_update_rss_size(struct hnae3_handle *handle, 3482 u32 new_tqps_num) 3483 { 3484 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3485 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3486 u16 max_rss_size; 3487 3488 kinfo->req_rss_size = new_tqps_num; 3489 3490 max_rss_size = min_t(u16, hdev->rss_size_max, 3491 hdev->num_tqps / kinfo->tc_info.num_tc); 3492 3493 /* Use the user's configuration when it is not larger than 3494 * max_rss_size, otherwise, use the maximum specification value. 3495 */ 3496 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 3497 kinfo->req_rss_size <= max_rss_size) 3498 kinfo->rss_size = kinfo->req_rss_size; 3499 else if (kinfo->rss_size > max_rss_size || 3500 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 3501 kinfo->rss_size = max_rss_size; 3502 3503 kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size; 3504 } 3505 3506 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 3507 bool rxfh_configured) 3508 { 3509 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3510 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3511 u16 cur_rss_size = kinfo->rss_size; 3512 u16 cur_tqps = kinfo->num_tqps; 3513 u32 *rss_indir; 3514 unsigned int i; 3515 int ret; 3516 3517 hclgevf_update_rss_size(handle, new_tqps_num); 3518 3519 ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); 3520 if (ret) 3521 return ret; 3522 3523 /* RSS indirection table has been configuared by user */ 3524 if (rxfh_configured) 3525 goto out; 3526 3527 /* Reinitializes the rss indirect table according to the new RSS size */ 3528 rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size, 3529 sizeof(u32), GFP_KERNEL); 3530 if (!rss_indir) 3531 return -ENOMEM; 3532 3533 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 3534 rss_indir[i] = i % kinfo->rss_size; 3535 3536 hdev->rss_cfg.rss_size = kinfo->rss_size; 3537 3538 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 3539 if (ret) 3540 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 3541 ret); 3542 3543 kfree(rss_indir); 3544 3545 out: 3546 if (!ret) 3547 dev_info(&hdev->pdev->dev, 3548 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 3549 cur_rss_size, kinfo->rss_size, 3550 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); 3551 3552 return ret; 3553 } 3554 3555 static int hclgevf_get_status(struct hnae3_handle *handle) 3556 { 3557 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3558 3559 return hdev->hw.mac.link; 3560 } 3561 3562 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 3563 u8 *auto_neg, u32 *speed, 3564 u8 *duplex) 3565 { 3566 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3567 3568 if (speed) 3569 *speed = hdev->hw.mac.speed; 3570 if (duplex) 3571 *duplex = hdev->hw.mac.duplex; 3572 if (auto_neg) 3573 *auto_neg = AUTONEG_DISABLE; 3574 } 3575 3576 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 3577 u8 duplex) 3578 { 3579 hdev->hw.mac.speed = speed; 3580 hdev->hw.mac.duplex = duplex; 3581 } 3582 3583 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 3584 { 3585 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3586 3587 return hclgevf_config_gro(hdev, enable); 3588 } 3589 3590 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 3591 u8 *module_type) 3592 { 3593 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3594 3595 if (media_type) 3596 *media_type = hdev->hw.mac.media_type; 3597 3598 if (module_type) 3599 *module_type = hdev->hw.mac.module_type; 3600 } 3601 3602 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 3603 { 3604 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3605 3606 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 3607 } 3608 3609 static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle) 3610 { 3611 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3612 3613 return test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 3614 } 3615 3616 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 3617 { 3618 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3619 3620 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 3621 } 3622 3623 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 3624 { 3625 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3626 3627 return hdev->rst_stats.hw_rst_done_cnt; 3628 } 3629 3630 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 3631 unsigned long *supported, 3632 unsigned long *advertising) 3633 { 3634 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3635 3636 *supported = hdev->hw.mac.supported; 3637 *advertising = hdev->hw.mac.advertising; 3638 } 3639 3640 #define MAX_SEPARATE_NUM 4 3641 #define SEPARATOR_VALUE 0xFFFFFFFF 3642 #define REG_NUM_PER_LINE 4 3643 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 3644 3645 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 3646 { 3647 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 3648 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3649 3650 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 3651 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 3652 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 3653 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 3654 3655 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 3656 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 3657 } 3658 3659 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 3660 void *data) 3661 { 3662 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3663 int i, j, reg_um, separator_num; 3664 u32 *reg = data; 3665 3666 *version = hdev->fw_version; 3667 3668 /* fetching per-VF registers values from VF PCIe register space */ 3669 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 3670 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3671 for (i = 0; i < reg_um; i++) 3672 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 3673 for (i = 0; i < separator_num; i++) 3674 *reg++ = SEPARATOR_VALUE; 3675 3676 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 3677 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3678 for (i = 0; i < reg_um; i++) 3679 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 3680 for (i = 0; i < separator_num; i++) 3681 *reg++ = SEPARATOR_VALUE; 3682 3683 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 3684 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3685 for (j = 0; j < hdev->num_tqps; j++) { 3686 for (i = 0; i < reg_um; i++) 3687 *reg++ = hclgevf_read_dev(&hdev->hw, 3688 ring_reg_addr_list[i] + 3689 0x200 * j); 3690 for (i = 0; i < separator_num; i++) 3691 *reg++ = SEPARATOR_VALUE; 3692 } 3693 3694 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 3695 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3696 for (j = 0; j < hdev->num_msi_used - 1; j++) { 3697 for (i = 0; i < reg_um; i++) 3698 *reg++ = hclgevf_read_dev(&hdev->hw, 3699 tqp_intr_reg_addr_list[i] + 3700 4 * j); 3701 for (i = 0; i < separator_num; i++) 3702 *reg++ = SEPARATOR_VALUE; 3703 } 3704 } 3705 3706 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 3707 u8 *port_base_vlan_info, u8 data_size) 3708 { 3709 struct hnae3_handle *nic = &hdev->nic; 3710 struct hclge_vf_to_pf_msg send_msg; 3711 int ret; 3712 3713 rtnl_lock(); 3714 3715 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 3716 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { 3717 dev_warn(&hdev->pdev->dev, 3718 "is resetting when updating port based vlan info\n"); 3719 rtnl_unlock(); 3720 return; 3721 } 3722 3723 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3724 if (ret) { 3725 rtnl_unlock(); 3726 return; 3727 } 3728 3729 /* send msg to PF and wait update port based vlan info */ 3730 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3731 HCLGE_MBX_PORT_BASE_VLAN_CFG); 3732 memcpy(send_msg.data, port_base_vlan_info, data_size); 3733 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3734 if (!ret) { 3735 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3736 nic->port_base_vlan_state = state; 3737 else 3738 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3739 } 3740 3741 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 3742 rtnl_unlock(); 3743 } 3744 3745 static const struct hnae3_ae_ops hclgevf_ops = { 3746 .init_ae_dev = hclgevf_init_ae_dev, 3747 .uninit_ae_dev = hclgevf_uninit_ae_dev, 3748 .flr_prepare = hclgevf_flr_prepare, 3749 .flr_done = hclgevf_flr_done, 3750 .init_client_instance = hclgevf_init_client_instance, 3751 .uninit_client_instance = hclgevf_uninit_client_instance, 3752 .start = hclgevf_ae_start, 3753 .stop = hclgevf_ae_stop, 3754 .client_start = hclgevf_client_start, 3755 .client_stop = hclgevf_client_stop, 3756 .map_ring_to_vector = hclgevf_map_ring_to_vector, 3757 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3758 .get_vector = hclgevf_get_vector, 3759 .put_vector = hclgevf_put_vector, 3760 .reset_queue = hclgevf_reset_tqp, 3761 .get_mac_addr = hclgevf_get_mac_addr, 3762 .set_mac_addr = hclgevf_set_mac_addr, 3763 .add_uc_addr = hclgevf_add_uc_addr, 3764 .rm_uc_addr = hclgevf_rm_uc_addr, 3765 .add_mc_addr = hclgevf_add_mc_addr, 3766 .rm_mc_addr = hclgevf_rm_mc_addr, 3767 .get_stats = hclgevf_get_stats, 3768 .update_stats = hclgevf_update_stats, 3769 .get_strings = hclgevf_get_strings, 3770 .get_sset_count = hclgevf_get_sset_count, 3771 .get_rss_key_size = hclgevf_get_rss_key_size, 3772 .get_rss = hclgevf_get_rss, 3773 .set_rss = hclgevf_set_rss, 3774 .get_rss_tuple = hclgevf_get_rss_tuple, 3775 .set_rss_tuple = hclgevf_set_rss_tuple, 3776 .get_tc_size = hclgevf_get_tc_size, 3777 .get_fw_version = hclgevf_get_fw_version, 3778 .set_vlan_filter = hclgevf_set_vlan_filter, 3779 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 3780 .reset_event = hclgevf_reset_event, 3781 .set_default_reset_request = hclgevf_set_def_reset_request, 3782 .set_channels = hclgevf_set_channels, 3783 .get_channels = hclgevf_get_channels, 3784 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 3785 .get_regs_len = hclgevf_get_regs_len, 3786 .get_regs = hclgevf_get_regs, 3787 .get_status = hclgevf_get_status, 3788 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3789 .get_media_type = hclgevf_get_media_type, 3790 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 3791 .ae_dev_resetting = hclgevf_ae_dev_resetting, 3792 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 3793 .set_gro_en = hclgevf_gro_en, 3794 .set_mtu = hclgevf_set_mtu, 3795 .get_global_queue_id = hclgevf_get_qid_global, 3796 .set_timer_task = hclgevf_set_timer_task, 3797 .get_link_mode = hclgevf_get_link_mode, 3798 .set_promisc_mode = hclgevf_set_promisc_mode, 3799 .request_update_promisc_mode = hclgevf_request_update_promisc_mode, 3800 .get_cmdq_stat = hclgevf_get_cmdq_stat, 3801 }; 3802 3803 static struct hnae3_ae_algo ae_algovf = { 3804 .ops = &hclgevf_ops, 3805 .pdev_id_table = ae_algovf_pci_tbl, 3806 }; 3807 3808 static int hclgevf_init(void) 3809 { 3810 pr_info("%s is initializing\n", HCLGEVF_NAME); 3811 3812 hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME); 3813 if (!hclgevf_wq) { 3814 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 3815 return -ENOMEM; 3816 } 3817 3818 hnae3_register_ae_algo(&ae_algovf); 3819 3820 return 0; 3821 } 3822 3823 static void hclgevf_exit(void) 3824 { 3825 hnae3_unregister_ae_algo(&ae_algovf); 3826 destroy_workqueue(hclgevf_wq); 3827 } 3828 module_init(hclgevf_init); 3829 module_exit(hclgevf_exit); 3830 3831 MODULE_LICENSE("GPL"); 3832 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3833 MODULE_DESCRIPTION("HCLGEVF Driver"); 3834 MODULE_VERSION(HCLGEVF_MOD_VERSION); 3835