1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 #include "hclgevf_devlink.h" 12 13 #define HCLGEVF_NAME "hclgevf" 14 15 #define HCLGEVF_RESET_MAX_FAIL_CNT 5 16 17 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 18 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 19 unsigned long delay); 20 21 static struct hnae3_ae_algo ae_algovf; 22 23 static struct workqueue_struct *hclgevf_wq; 24 25 static const struct pci_device_id ae_algovf_pci_tbl[] = { 26 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, 27 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 28 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 29 /* required last entry */ 30 {0, } 31 }; 32 33 static const u8 hclgevf_hash_key[] = { 34 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 35 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 36 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 37 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 38 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 39 }; 40 41 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 42 43 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 44 HCLGEVF_CMDQ_TX_ADDR_H_REG, 45 HCLGEVF_CMDQ_TX_DEPTH_REG, 46 HCLGEVF_CMDQ_TX_TAIL_REG, 47 HCLGEVF_CMDQ_TX_HEAD_REG, 48 HCLGEVF_CMDQ_RX_ADDR_L_REG, 49 HCLGEVF_CMDQ_RX_ADDR_H_REG, 50 HCLGEVF_CMDQ_RX_DEPTH_REG, 51 HCLGEVF_CMDQ_RX_TAIL_REG, 52 HCLGEVF_CMDQ_RX_HEAD_REG, 53 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 54 HCLGEVF_VECTOR0_CMDQ_STATE_REG, 55 HCLGEVF_CMDQ_INTR_EN_REG, 56 HCLGEVF_CMDQ_INTR_GEN_REG}; 57 58 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 59 HCLGEVF_RST_ING, 60 HCLGEVF_GRO_EN_REG}; 61 62 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 63 HCLGEVF_RING_RX_ADDR_H_REG, 64 HCLGEVF_RING_RX_BD_NUM_REG, 65 HCLGEVF_RING_RX_BD_LENGTH_REG, 66 HCLGEVF_RING_RX_MERGE_EN_REG, 67 HCLGEVF_RING_RX_TAIL_REG, 68 HCLGEVF_RING_RX_HEAD_REG, 69 HCLGEVF_RING_RX_FBD_NUM_REG, 70 HCLGEVF_RING_RX_OFFSET_REG, 71 HCLGEVF_RING_RX_FBD_OFFSET_REG, 72 HCLGEVF_RING_RX_STASH_REG, 73 HCLGEVF_RING_RX_BD_ERR_REG, 74 HCLGEVF_RING_TX_ADDR_L_REG, 75 HCLGEVF_RING_TX_ADDR_H_REG, 76 HCLGEVF_RING_TX_BD_NUM_REG, 77 HCLGEVF_RING_TX_PRIORITY_REG, 78 HCLGEVF_RING_TX_TC_REG, 79 HCLGEVF_RING_TX_MERGE_EN_REG, 80 HCLGEVF_RING_TX_TAIL_REG, 81 HCLGEVF_RING_TX_HEAD_REG, 82 HCLGEVF_RING_TX_FBD_NUM_REG, 83 HCLGEVF_RING_TX_OFFSET_REG, 84 HCLGEVF_RING_TX_EBD_NUM_REG, 85 HCLGEVF_RING_TX_EBD_OFFSET_REG, 86 HCLGEVF_RING_TX_BD_ERR_REG, 87 HCLGEVF_RING_EN_REG}; 88 89 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 90 HCLGEVF_TQP_INTR_GL0_REG, 91 HCLGEVF_TQP_INTR_GL1_REG, 92 HCLGEVF_TQP_INTR_GL2_REG, 93 HCLGEVF_TQP_INTR_RL_REG}; 94 95 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 96 { 97 if (!handle->client) 98 return container_of(handle, struct hclgevf_dev, nic); 99 else if (handle->client->type == HNAE3_CLIENT_ROCE) 100 return container_of(handle, struct hclgevf_dev, roce); 101 else 102 return container_of(handle, struct hclgevf_dev, nic); 103 } 104 105 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 106 { 107 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 108 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 109 struct hclgevf_desc desc; 110 struct hclgevf_tqp *tqp; 111 int status; 112 int i; 113 114 for (i = 0; i < kinfo->num_tqps; i++) { 115 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 116 hclgevf_cmd_setup_basic_desc(&desc, 117 HCLGEVF_OPC_QUERY_RX_STATUS, 118 true); 119 120 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 121 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 122 if (status) { 123 dev_err(&hdev->pdev->dev, 124 "Query tqp stat fail, status = %d,queue = %d\n", 125 status, i); 126 return status; 127 } 128 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 129 le32_to_cpu(desc.data[1]); 130 131 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 132 true); 133 134 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 135 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 136 if (status) { 137 dev_err(&hdev->pdev->dev, 138 "Query tqp stat fail, status = %d,queue = %d\n", 139 status, i); 140 return status; 141 } 142 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 143 le32_to_cpu(desc.data[1]); 144 } 145 146 return 0; 147 } 148 149 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 150 { 151 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 152 struct hclgevf_tqp *tqp; 153 u64 *buff = data; 154 int i; 155 156 for (i = 0; i < kinfo->num_tqps; i++) { 157 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 158 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 159 } 160 for (i = 0; i < kinfo->num_tqps; i++) { 161 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 162 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 163 } 164 165 return buff; 166 } 167 168 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 169 { 170 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 171 172 return kinfo->num_tqps * 2; 173 } 174 175 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 176 { 177 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 178 u8 *buff = data; 179 int i; 180 181 for (i = 0; i < kinfo->num_tqps; i++) { 182 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 183 struct hclgevf_tqp, q); 184 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", 185 tqp->index); 186 buff += ETH_GSTRING_LEN; 187 } 188 189 for (i = 0; i < kinfo->num_tqps; i++) { 190 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 191 struct hclgevf_tqp, q); 192 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", 193 tqp->index); 194 buff += ETH_GSTRING_LEN; 195 } 196 197 return buff; 198 } 199 200 static void hclgevf_update_stats(struct hnae3_handle *handle, 201 struct net_device_stats *net_stats) 202 { 203 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 204 int status; 205 206 status = hclgevf_tqps_update_stats(handle); 207 if (status) 208 dev_err(&hdev->pdev->dev, 209 "VF update of TQPS stats fail, status = %d.\n", 210 status); 211 } 212 213 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 214 { 215 if (strset == ETH_SS_TEST) 216 return -EOPNOTSUPP; 217 else if (strset == ETH_SS_STATS) 218 return hclgevf_tqps_get_sset_count(handle, strset); 219 220 return 0; 221 } 222 223 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 224 u8 *data) 225 { 226 u8 *p = (char *)data; 227 228 if (strset == ETH_SS_STATS) 229 p = hclgevf_tqps_get_strings(handle, p); 230 } 231 232 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 233 { 234 hclgevf_tqps_get_stats(handle, data); 235 } 236 237 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 238 u8 subcode) 239 { 240 if (msg) { 241 memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 242 msg->code = code; 243 msg->subcode = subcode; 244 } 245 } 246 247 static int hclgevf_get_basic_info(struct hclgevf_dev *hdev) 248 { 249 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 250 u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE]; 251 struct hclge_basic_info *basic_info; 252 struct hclge_vf_to_pf_msg send_msg; 253 unsigned long caps; 254 int status; 255 256 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0); 257 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 258 sizeof(resp_msg)); 259 if (status) { 260 dev_err(&hdev->pdev->dev, 261 "failed to get basic info from pf, ret = %d", status); 262 return status; 263 } 264 265 basic_info = (struct hclge_basic_info *)resp_msg; 266 267 hdev->hw_tc_map = basic_info->hw_tc_map; 268 hdev->mbx_api_version = basic_info->mbx_api_version; 269 caps = basic_info->pf_caps; 270 if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps)) 271 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); 272 273 return 0; 274 } 275 276 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 277 { 278 struct hnae3_handle *nic = &hdev->nic; 279 struct hclge_vf_to_pf_msg send_msg; 280 u8 resp_msg; 281 int ret; 282 283 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 284 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 285 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 286 sizeof(u8)); 287 if (ret) { 288 dev_err(&hdev->pdev->dev, 289 "VF request to get port based vlan state failed %d", 290 ret); 291 return ret; 292 } 293 294 nic->port_base_vlan_state = resp_msg; 295 296 return 0; 297 } 298 299 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 300 { 301 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 302 #define HCLGEVF_TQPS_ALLOC_OFFSET 0 303 #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 304 #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 305 306 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 307 struct hclge_vf_to_pf_msg send_msg; 308 int status; 309 310 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 311 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 312 HCLGEVF_TQPS_RSS_INFO_LEN); 313 if (status) { 314 dev_err(&hdev->pdev->dev, 315 "VF request to get tqp info from PF failed %d", 316 status); 317 return status; 318 } 319 320 memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], 321 sizeof(u16)); 322 memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], 323 sizeof(u16)); 324 memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], 325 sizeof(u16)); 326 327 return 0; 328 } 329 330 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 331 { 332 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 333 #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 334 #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 335 336 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 337 struct hclge_vf_to_pf_msg send_msg; 338 int ret; 339 340 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 341 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 342 HCLGEVF_TQPS_DEPTH_INFO_LEN); 343 if (ret) { 344 dev_err(&hdev->pdev->dev, 345 "VF request to get tqp depth info from PF failed %d", 346 ret); 347 return ret; 348 } 349 350 memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], 351 sizeof(u16)); 352 memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], 353 sizeof(u16)); 354 355 return 0; 356 } 357 358 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 359 { 360 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 361 struct hclge_vf_to_pf_msg send_msg; 362 u16 qid_in_pf = 0; 363 u8 resp_data[2]; 364 int ret; 365 366 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 367 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 368 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 369 sizeof(resp_data)); 370 if (!ret) 371 qid_in_pf = *(u16 *)resp_data; 372 373 return qid_in_pf; 374 } 375 376 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 377 { 378 struct hclge_vf_to_pf_msg send_msg; 379 u8 resp_msg[2]; 380 int ret; 381 382 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 383 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 384 sizeof(resp_msg)); 385 if (ret) { 386 dev_err(&hdev->pdev->dev, 387 "VF request to get the pf port media type failed %d", 388 ret); 389 return ret; 390 } 391 392 hdev->hw.mac.media_type = resp_msg[0]; 393 hdev->hw.mac.module_type = resp_msg[1]; 394 395 return 0; 396 } 397 398 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 399 { 400 struct hclgevf_tqp *tqp; 401 int i; 402 403 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 404 sizeof(struct hclgevf_tqp), GFP_KERNEL); 405 if (!hdev->htqp) 406 return -ENOMEM; 407 408 tqp = hdev->htqp; 409 410 for (i = 0; i < hdev->num_tqps; i++) { 411 tqp->dev = &hdev->pdev->dev; 412 tqp->index = i; 413 414 tqp->q.ae_algo = &ae_algovf; 415 tqp->q.buf_size = hdev->rx_buf_len; 416 tqp->q.tx_desc_num = hdev->num_tx_desc; 417 tqp->q.rx_desc_num = hdev->num_rx_desc; 418 419 /* need an extended offset to configure queues >= 420 * HCLGEVF_TQP_MAX_SIZE_DEV_V2. 421 */ 422 if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2) 423 tqp->q.io_base = hdev->hw.io_base + 424 HCLGEVF_TQP_REG_OFFSET + 425 i * HCLGEVF_TQP_REG_SIZE; 426 else 427 tqp->q.io_base = hdev->hw.io_base + 428 HCLGEVF_TQP_REG_OFFSET + 429 HCLGEVF_TQP_EXT_REG_OFFSET + 430 (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) * 431 HCLGEVF_TQP_REG_SIZE; 432 433 tqp++; 434 } 435 436 return 0; 437 } 438 439 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 440 { 441 struct hnae3_handle *nic = &hdev->nic; 442 struct hnae3_knic_private_info *kinfo; 443 u16 new_tqps = hdev->num_tqps; 444 unsigned int i; 445 u8 num_tc = 0; 446 447 kinfo = &nic->kinfo; 448 kinfo->num_tx_desc = hdev->num_tx_desc; 449 kinfo->num_rx_desc = hdev->num_rx_desc; 450 kinfo->rx_buf_len = hdev->rx_buf_len; 451 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 452 if (hdev->hw_tc_map & BIT(i)) 453 num_tc++; 454 455 num_tc = num_tc ? num_tc : 1; 456 kinfo->tc_info.num_tc = num_tc; 457 kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc); 458 new_tqps = kinfo->rss_size * num_tc; 459 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 460 461 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 462 sizeof(struct hnae3_queue *), GFP_KERNEL); 463 if (!kinfo->tqp) 464 return -ENOMEM; 465 466 for (i = 0; i < kinfo->num_tqps; i++) { 467 hdev->htqp[i].q.handle = &hdev->nic; 468 hdev->htqp[i].q.tqp_index = i; 469 kinfo->tqp[i] = &hdev->htqp[i].q; 470 } 471 472 /* after init the max rss_size and tqps, adjust the default tqp numbers 473 * and rss size with the actual vector numbers 474 */ 475 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 476 kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc, 477 kinfo->rss_size); 478 479 return 0; 480 } 481 482 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 483 { 484 struct hclge_vf_to_pf_msg send_msg; 485 int status; 486 487 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 488 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 489 if (status) 490 dev_err(&hdev->pdev->dev, 491 "VF failed to fetch link status(%d) from PF", status); 492 } 493 494 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 495 { 496 struct hnae3_handle *rhandle = &hdev->roce; 497 struct hnae3_handle *handle = &hdev->nic; 498 struct hnae3_client *rclient; 499 struct hnae3_client *client; 500 501 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 502 return; 503 504 client = handle->client; 505 rclient = hdev->roce_client; 506 507 link_state = 508 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 509 if (link_state != hdev->hw.mac.link) { 510 client->ops->link_status_change(handle, !!link_state); 511 if (rclient && rclient->ops->link_status_change) 512 rclient->ops->link_status_change(rhandle, !!link_state); 513 hdev->hw.mac.link = link_state; 514 } 515 516 clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 517 } 518 519 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 520 { 521 #define HCLGEVF_ADVERTISING 0 522 #define HCLGEVF_SUPPORTED 1 523 524 struct hclge_vf_to_pf_msg send_msg; 525 526 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 527 send_msg.data[0] = HCLGEVF_ADVERTISING; 528 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 529 send_msg.data[0] = HCLGEVF_SUPPORTED; 530 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 531 } 532 533 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 534 { 535 struct hnae3_handle *nic = &hdev->nic; 536 int ret; 537 538 nic->ae_algo = &ae_algovf; 539 nic->pdev = hdev->pdev; 540 nic->numa_node_mask = hdev->numa_node_mask; 541 nic->flags |= HNAE3_SUPPORT_VF; 542 543 ret = hclgevf_knic_setup(hdev); 544 if (ret) 545 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 546 ret); 547 return ret; 548 } 549 550 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 551 { 552 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 553 dev_warn(&hdev->pdev->dev, 554 "vector(vector_id %d) has been freed.\n", vector_id); 555 return; 556 } 557 558 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 559 hdev->num_msi_left += 1; 560 hdev->num_msi_used -= 1; 561 } 562 563 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 564 struct hnae3_vector_info *vector_info) 565 { 566 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 567 struct hnae3_vector_info *vector = vector_info; 568 int alloc = 0; 569 int i, j; 570 571 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 572 vector_num = min(hdev->num_msi_left, vector_num); 573 574 for (j = 0; j < vector_num; j++) { 575 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 576 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 577 vector->vector = pci_irq_vector(hdev->pdev, i); 578 vector->io_addr = hdev->hw.io_base + 579 HCLGEVF_VECTOR_REG_BASE + 580 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 581 hdev->vector_status[i] = 0; 582 hdev->vector_irq[i] = vector->vector; 583 584 vector++; 585 alloc++; 586 587 break; 588 } 589 } 590 } 591 hdev->num_msi_left -= alloc; 592 hdev->num_msi_used += alloc; 593 594 return alloc; 595 } 596 597 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 598 { 599 int i; 600 601 for (i = 0; i < hdev->num_msi; i++) 602 if (vector == hdev->vector_irq[i]) 603 return i; 604 605 return -EINVAL; 606 } 607 608 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 609 const u8 hfunc, const u8 *key) 610 { 611 struct hclgevf_rss_config_cmd *req; 612 unsigned int key_offset = 0; 613 struct hclgevf_desc desc; 614 int key_counts; 615 int key_size; 616 int ret; 617 618 key_counts = HCLGEVF_RSS_KEY_SIZE; 619 req = (struct hclgevf_rss_config_cmd *)desc.data; 620 621 while (key_counts) { 622 hclgevf_cmd_setup_basic_desc(&desc, 623 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 624 false); 625 626 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 627 req->hash_config |= 628 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 629 630 key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 631 memcpy(req->hash_key, 632 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 633 634 key_counts -= key_size; 635 key_offset++; 636 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 637 if (ret) { 638 dev_err(&hdev->pdev->dev, 639 "Configure RSS config fail, status = %d\n", 640 ret); 641 return ret; 642 } 643 } 644 645 return 0; 646 } 647 648 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 649 { 650 return HCLGEVF_RSS_KEY_SIZE; 651 } 652 653 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 654 { 655 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 656 struct hclgevf_rss_indirection_table_cmd *req; 657 struct hclgevf_desc desc; 658 int rss_cfg_tbl_num; 659 int status; 660 int i, j; 661 662 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 663 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size / 664 HCLGEVF_RSS_CFG_TBL_SIZE; 665 666 for (i = 0; i < rss_cfg_tbl_num; i++) { 667 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 668 false); 669 req->start_table_index = 670 cpu_to_le16(i * HCLGEVF_RSS_CFG_TBL_SIZE); 671 req->rss_set_bitmap = cpu_to_le16(HCLGEVF_RSS_SET_BITMAP_MSK); 672 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 673 req->rss_result[j] = 674 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 675 676 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 677 if (status) { 678 dev_err(&hdev->pdev->dev, 679 "VF failed(=%d) to set RSS indirection table\n", 680 status); 681 return status; 682 } 683 } 684 685 return 0; 686 } 687 688 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 689 { 690 struct hclgevf_rss_tc_mode_cmd *req; 691 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 692 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 693 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 694 struct hclgevf_desc desc; 695 u16 roundup_size; 696 unsigned int i; 697 int status; 698 699 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 700 701 roundup_size = roundup_pow_of_two(rss_size); 702 roundup_size = ilog2(roundup_size); 703 704 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 705 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 706 tc_size[i] = roundup_size; 707 tc_offset[i] = rss_size * i; 708 } 709 710 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 711 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 712 u16 mode = 0; 713 714 hnae3_set_bit(mode, HCLGEVF_RSS_TC_VALID_B, 715 (tc_valid[i] & 0x1)); 716 hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M, 717 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 718 hnae3_set_bit(mode, HCLGEVF_RSS_TC_SIZE_MSB_B, 719 tc_size[i] >> HCLGEVF_RSS_TC_SIZE_MSB_OFFSET & 720 0x1); 721 hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M, 722 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 723 724 req->rss_tc_mode[i] = cpu_to_le16(mode); 725 } 726 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 727 if (status) 728 dev_err(&hdev->pdev->dev, 729 "VF failed(=%d) to set rss tc mode\n", status); 730 731 return status; 732 } 733 734 /* for revision 0x20, vf shared the same rss config with pf */ 735 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 736 { 737 #define HCLGEVF_RSS_MBX_RESP_LEN 8 738 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 739 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 740 struct hclge_vf_to_pf_msg send_msg; 741 u16 msg_num, hash_key_index; 742 u8 index; 743 int ret; 744 745 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 746 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 747 HCLGEVF_RSS_MBX_RESP_LEN; 748 for (index = 0; index < msg_num; index++) { 749 send_msg.data[0] = index; 750 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 751 HCLGEVF_RSS_MBX_RESP_LEN); 752 if (ret) { 753 dev_err(&hdev->pdev->dev, 754 "VF get rss hash key from PF failed, ret=%d", 755 ret); 756 return ret; 757 } 758 759 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 760 if (index == msg_num - 1) 761 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 762 &resp_msg[0], 763 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 764 else 765 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 766 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 767 } 768 769 return 0; 770 } 771 772 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 773 u8 *hfunc) 774 { 775 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 776 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 777 int i, ret; 778 779 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 780 /* Get hash algorithm */ 781 if (hfunc) { 782 switch (rss_cfg->hash_algo) { 783 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 784 *hfunc = ETH_RSS_HASH_TOP; 785 break; 786 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 787 *hfunc = ETH_RSS_HASH_XOR; 788 break; 789 default: 790 *hfunc = ETH_RSS_HASH_UNKNOWN; 791 break; 792 } 793 } 794 795 /* Get the RSS Key required by the user */ 796 if (key) 797 memcpy(key, rss_cfg->rss_hash_key, 798 HCLGEVF_RSS_KEY_SIZE); 799 } else { 800 if (hfunc) 801 *hfunc = ETH_RSS_HASH_TOP; 802 if (key) { 803 ret = hclgevf_get_rss_hash_key(hdev); 804 if (ret) 805 return ret; 806 memcpy(key, rss_cfg->rss_hash_key, 807 HCLGEVF_RSS_KEY_SIZE); 808 } 809 } 810 811 if (indir) 812 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 813 indir[i] = rss_cfg->rss_indirection_tbl[i]; 814 815 return 0; 816 } 817 818 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 819 const u8 *key, const u8 hfunc) 820 { 821 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 822 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 823 int ret, i; 824 825 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 826 /* Set the RSS Hash Key if specififed by the user */ 827 if (key) { 828 switch (hfunc) { 829 case ETH_RSS_HASH_TOP: 830 rss_cfg->hash_algo = 831 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 832 break; 833 case ETH_RSS_HASH_XOR: 834 rss_cfg->hash_algo = 835 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 836 break; 837 case ETH_RSS_HASH_NO_CHANGE: 838 break; 839 default: 840 return -EINVAL; 841 } 842 843 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 844 key); 845 if (ret) 846 return ret; 847 848 /* Update the shadow RSS key with user specified qids */ 849 memcpy(rss_cfg->rss_hash_key, key, 850 HCLGEVF_RSS_KEY_SIZE); 851 } 852 } 853 854 /* update the shadow RSS table with user specified qids */ 855 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 856 rss_cfg->rss_indirection_tbl[i] = indir[i]; 857 858 /* update the hardware */ 859 return hclgevf_set_rss_indir_table(hdev); 860 } 861 862 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 863 { 864 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 865 866 if (nfc->data & RXH_L4_B_2_3) 867 hash_sets |= HCLGEVF_D_PORT_BIT; 868 else 869 hash_sets &= ~HCLGEVF_D_PORT_BIT; 870 871 if (nfc->data & RXH_IP_SRC) 872 hash_sets |= HCLGEVF_S_IP_BIT; 873 else 874 hash_sets &= ~HCLGEVF_S_IP_BIT; 875 876 if (nfc->data & RXH_IP_DST) 877 hash_sets |= HCLGEVF_D_IP_BIT; 878 else 879 hash_sets &= ~HCLGEVF_D_IP_BIT; 880 881 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 882 hash_sets |= HCLGEVF_V_TAG_BIT; 883 884 return hash_sets; 885 } 886 887 static int hclgevf_init_rss_tuple_cmd(struct hnae3_handle *handle, 888 struct ethtool_rxnfc *nfc, 889 struct hclgevf_rss_input_tuple_cmd *req) 890 { 891 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 892 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 893 u8 tuple_sets; 894 895 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 896 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 897 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 898 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 899 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 900 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 901 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 902 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 903 904 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 905 switch (nfc->flow_type) { 906 case TCP_V4_FLOW: 907 req->ipv4_tcp_en = tuple_sets; 908 break; 909 case TCP_V6_FLOW: 910 req->ipv6_tcp_en = tuple_sets; 911 break; 912 case UDP_V4_FLOW: 913 req->ipv4_udp_en = tuple_sets; 914 break; 915 case UDP_V6_FLOW: 916 req->ipv6_udp_en = tuple_sets; 917 break; 918 case SCTP_V4_FLOW: 919 req->ipv4_sctp_en = tuple_sets; 920 break; 921 case SCTP_V6_FLOW: 922 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && 923 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))) 924 return -EINVAL; 925 926 req->ipv6_sctp_en = tuple_sets; 927 break; 928 case IPV4_FLOW: 929 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 930 break; 931 case IPV6_FLOW: 932 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 933 break; 934 default: 935 return -EINVAL; 936 } 937 938 return 0; 939 } 940 941 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 942 struct ethtool_rxnfc *nfc) 943 { 944 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 945 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 946 struct hclgevf_rss_input_tuple_cmd *req; 947 struct hclgevf_desc desc; 948 int ret; 949 950 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 951 return -EOPNOTSUPP; 952 953 if (nfc->data & 954 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 955 return -EINVAL; 956 957 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 958 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 959 960 ret = hclgevf_init_rss_tuple_cmd(handle, nfc, req); 961 if (ret) { 962 dev_err(&hdev->pdev->dev, 963 "failed to init rss tuple cmd, ret = %d\n", ret); 964 return ret; 965 } 966 967 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 968 if (ret) { 969 dev_err(&hdev->pdev->dev, 970 "Set rss tuple fail, status = %d\n", ret); 971 return ret; 972 } 973 974 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 975 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 976 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 977 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 978 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 979 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 980 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 981 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 982 return 0; 983 } 984 985 static int hclgevf_get_rss_tuple_by_flow_type(struct hclgevf_dev *hdev, 986 int flow_type, u8 *tuple_sets) 987 { 988 switch (flow_type) { 989 case TCP_V4_FLOW: 990 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_tcp_en; 991 break; 992 case UDP_V4_FLOW: 993 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_udp_en; 994 break; 995 case TCP_V6_FLOW: 996 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_tcp_en; 997 break; 998 case UDP_V6_FLOW: 999 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_udp_en; 1000 break; 1001 case SCTP_V4_FLOW: 1002 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_sctp_en; 1003 break; 1004 case SCTP_V6_FLOW: 1005 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_sctp_en; 1006 break; 1007 case IPV4_FLOW: 1008 case IPV6_FLOW: 1009 *tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 1010 break; 1011 default: 1012 return -EINVAL; 1013 } 1014 1015 return 0; 1016 } 1017 1018 static u64 hclgevf_convert_rss_tuple(u8 tuple_sets) 1019 { 1020 u64 tuple_data = 0; 1021 1022 if (tuple_sets & HCLGEVF_D_PORT_BIT) 1023 tuple_data |= RXH_L4_B_2_3; 1024 if (tuple_sets & HCLGEVF_S_PORT_BIT) 1025 tuple_data |= RXH_L4_B_0_1; 1026 if (tuple_sets & HCLGEVF_D_IP_BIT) 1027 tuple_data |= RXH_IP_DST; 1028 if (tuple_sets & HCLGEVF_S_IP_BIT) 1029 tuple_data |= RXH_IP_SRC; 1030 1031 return tuple_data; 1032 } 1033 1034 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 1035 struct ethtool_rxnfc *nfc) 1036 { 1037 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1038 u8 tuple_sets; 1039 int ret; 1040 1041 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 1042 return -EOPNOTSUPP; 1043 1044 nfc->data = 0; 1045 1046 ret = hclgevf_get_rss_tuple_by_flow_type(hdev, nfc->flow_type, 1047 &tuple_sets); 1048 if (ret || !tuple_sets) 1049 return ret; 1050 1051 nfc->data = hclgevf_convert_rss_tuple(tuple_sets); 1052 1053 return 0; 1054 } 1055 1056 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 1057 struct hclgevf_rss_cfg *rss_cfg) 1058 { 1059 struct hclgevf_rss_input_tuple_cmd *req; 1060 struct hclgevf_desc desc; 1061 int ret; 1062 1063 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 1064 1065 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 1066 1067 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 1068 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 1069 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 1070 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 1071 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 1072 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 1073 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 1074 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 1075 1076 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1077 if (ret) 1078 dev_err(&hdev->pdev->dev, 1079 "Configure rss input fail, status = %d\n", ret); 1080 return ret; 1081 } 1082 1083 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 1084 { 1085 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1086 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1087 1088 return rss_cfg->rss_size; 1089 } 1090 1091 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 1092 int vector_id, 1093 struct hnae3_ring_chain_node *ring_chain) 1094 { 1095 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1096 struct hclge_vf_to_pf_msg send_msg; 1097 struct hnae3_ring_chain_node *node; 1098 int status; 1099 int i = 0; 1100 1101 memset(&send_msg, 0, sizeof(send_msg)); 1102 send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 1103 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1104 send_msg.vector_id = vector_id; 1105 1106 for (node = ring_chain; node; node = node->next) { 1107 send_msg.param[i].ring_type = 1108 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1109 1110 send_msg.param[i].tqp_index = node->tqp_index; 1111 send_msg.param[i].int_gl_index = 1112 hnae3_get_field(node->int_gl_idx, 1113 HNAE3_RING_GL_IDX_M, 1114 HNAE3_RING_GL_IDX_S); 1115 1116 i++; 1117 if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 1118 send_msg.ring_num = i; 1119 1120 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 1121 NULL, 0); 1122 if (status) { 1123 dev_err(&hdev->pdev->dev, 1124 "Map TQP fail, status is %d.\n", 1125 status); 1126 return status; 1127 } 1128 i = 0; 1129 } 1130 } 1131 1132 return 0; 1133 } 1134 1135 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1136 struct hnae3_ring_chain_node *ring_chain) 1137 { 1138 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1139 int vector_id; 1140 1141 vector_id = hclgevf_get_vector_index(hdev, vector); 1142 if (vector_id < 0) { 1143 dev_err(&handle->pdev->dev, 1144 "Get vector index fail. ret =%d\n", vector_id); 1145 return vector_id; 1146 } 1147 1148 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1149 } 1150 1151 static int hclgevf_unmap_ring_from_vector( 1152 struct hnae3_handle *handle, 1153 int vector, 1154 struct hnae3_ring_chain_node *ring_chain) 1155 { 1156 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1157 int ret, vector_id; 1158 1159 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1160 return 0; 1161 1162 vector_id = hclgevf_get_vector_index(hdev, vector); 1163 if (vector_id < 0) { 1164 dev_err(&handle->pdev->dev, 1165 "Get vector index fail. ret =%d\n", vector_id); 1166 return vector_id; 1167 } 1168 1169 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1170 if (ret) 1171 dev_err(&handle->pdev->dev, 1172 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1173 vector_id, 1174 ret); 1175 1176 return ret; 1177 } 1178 1179 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1180 { 1181 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1182 int vector_id; 1183 1184 vector_id = hclgevf_get_vector_index(hdev, vector); 1185 if (vector_id < 0) { 1186 dev_err(&handle->pdev->dev, 1187 "hclgevf_put_vector get vector index fail. ret =%d\n", 1188 vector_id); 1189 return vector_id; 1190 } 1191 1192 hclgevf_free_vector(hdev, vector_id); 1193 1194 return 0; 1195 } 1196 1197 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1198 bool en_uc_pmc, bool en_mc_pmc, 1199 bool en_bc_pmc) 1200 { 1201 struct hnae3_handle *handle = &hdev->nic; 1202 struct hclge_vf_to_pf_msg send_msg; 1203 int ret; 1204 1205 memset(&send_msg, 0, sizeof(send_msg)); 1206 send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 1207 send_msg.en_bc = en_bc_pmc ? 1 : 0; 1208 send_msg.en_uc = en_uc_pmc ? 1 : 0; 1209 send_msg.en_mc = en_mc_pmc ? 1 : 0; 1210 send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC, 1211 &handle->priv_flags) ? 1 : 0; 1212 1213 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1214 if (ret) 1215 dev_err(&hdev->pdev->dev, 1216 "Set promisc mode fail, status is %d.\n", ret); 1217 1218 return ret; 1219 } 1220 1221 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 1222 bool en_mc_pmc) 1223 { 1224 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1225 bool en_bc_pmc; 1226 1227 en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2; 1228 1229 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 1230 en_bc_pmc); 1231 } 1232 1233 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) 1234 { 1235 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1236 1237 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1238 hclgevf_task_schedule(hdev, 0); 1239 } 1240 1241 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) 1242 { 1243 struct hnae3_handle *handle = &hdev->nic; 1244 bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; 1245 bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; 1246 int ret; 1247 1248 if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { 1249 ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); 1250 if (!ret) 1251 clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1252 } 1253 } 1254 1255 static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id, 1256 u16 stream_id, bool enable) 1257 { 1258 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1259 struct hclgevf_desc desc; 1260 1261 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1262 1263 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1264 false); 1265 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1266 req->stream_id = cpu_to_le16(stream_id); 1267 if (enable) 1268 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1269 1270 return hclgevf_cmd_send(&hdev->hw, &desc, 1); 1271 } 1272 1273 static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable) 1274 { 1275 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1276 int ret; 1277 u16 i; 1278 1279 for (i = 0; i < handle->kinfo.num_tqps; i++) { 1280 ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable); 1281 if (ret) 1282 return ret; 1283 } 1284 1285 return 0; 1286 } 1287 1288 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1289 { 1290 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1291 struct hclgevf_tqp *tqp; 1292 int i; 1293 1294 for (i = 0; i < kinfo->num_tqps; i++) { 1295 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1296 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1297 } 1298 } 1299 1300 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 1301 { 1302 struct hclge_vf_to_pf_msg send_msg; 1303 u8 host_mac[ETH_ALEN]; 1304 int status; 1305 1306 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 1307 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 1308 ETH_ALEN); 1309 if (status) { 1310 dev_err(&hdev->pdev->dev, 1311 "fail to get VF MAC from host %d", status); 1312 return status; 1313 } 1314 1315 ether_addr_copy(p, host_mac); 1316 1317 return 0; 1318 } 1319 1320 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1321 { 1322 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1323 u8 host_mac_addr[ETH_ALEN]; 1324 1325 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 1326 return; 1327 1328 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 1329 if (hdev->has_pf_mac) 1330 ether_addr_copy(p, host_mac_addr); 1331 else 1332 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1333 } 1334 1335 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1336 bool is_first) 1337 { 1338 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1339 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1340 struct hclge_vf_to_pf_msg send_msg; 1341 u8 *new_mac_addr = (u8 *)p; 1342 int status; 1343 1344 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 1345 send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1346 ether_addr_copy(send_msg.data, new_mac_addr); 1347 if (is_first && !hdev->has_pf_mac) 1348 eth_zero_addr(&send_msg.data[ETH_ALEN]); 1349 else 1350 ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 1351 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1352 if (!status) 1353 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1354 1355 return status; 1356 } 1357 1358 static struct hclgevf_mac_addr_node * 1359 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) 1360 { 1361 struct hclgevf_mac_addr_node *mac_node, *tmp; 1362 1363 list_for_each_entry_safe(mac_node, tmp, list, node) 1364 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 1365 return mac_node; 1366 1367 return NULL; 1368 } 1369 1370 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, 1371 enum HCLGEVF_MAC_NODE_STATE state) 1372 { 1373 switch (state) { 1374 /* from set_rx_mode or tmp_add_list */ 1375 case HCLGEVF_MAC_TO_ADD: 1376 if (mac_node->state == HCLGEVF_MAC_TO_DEL) 1377 mac_node->state = HCLGEVF_MAC_ACTIVE; 1378 break; 1379 /* only from set_rx_mode */ 1380 case HCLGEVF_MAC_TO_DEL: 1381 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1382 list_del(&mac_node->node); 1383 kfree(mac_node); 1384 } else { 1385 mac_node->state = HCLGEVF_MAC_TO_DEL; 1386 } 1387 break; 1388 /* only from tmp_add_list, the mac_node->state won't be 1389 * HCLGEVF_MAC_ACTIVE 1390 */ 1391 case HCLGEVF_MAC_ACTIVE: 1392 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1393 mac_node->state = HCLGEVF_MAC_ACTIVE; 1394 break; 1395 } 1396 } 1397 1398 static int hclgevf_update_mac_list(struct hnae3_handle *handle, 1399 enum HCLGEVF_MAC_NODE_STATE state, 1400 enum HCLGEVF_MAC_ADDR_TYPE mac_type, 1401 const unsigned char *addr) 1402 { 1403 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1404 struct hclgevf_mac_addr_node *mac_node; 1405 struct list_head *list; 1406 1407 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1408 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1409 1410 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1411 1412 /* if the mac addr is already in the mac list, no need to add a new 1413 * one into it, just check the mac addr state, convert it to a new 1414 * new state, or just remove it, or do nothing. 1415 */ 1416 mac_node = hclgevf_find_mac_node(list, addr); 1417 if (mac_node) { 1418 hclgevf_update_mac_node(mac_node, state); 1419 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1420 return 0; 1421 } 1422 /* if this address is never added, unnecessary to delete */ 1423 if (state == HCLGEVF_MAC_TO_DEL) { 1424 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1425 return -ENOENT; 1426 } 1427 1428 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 1429 if (!mac_node) { 1430 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1431 return -ENOMEM; 1432 } 1433 1434 mac_node->state = state; 1435 ether_addr_copy(mac_node->mac_addr, addr); 1436 list_add_tail(&mac_node->node, list); 1437 1438 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1439 return 0; 1440 } 1441 1442 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1443 const unsigned char *addr) 1444 { 1445 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1446 HCLGEVF_MAC_ADDR_UC, addr); 1447 } 1448 1449 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1450 const unsigned char *addr) 1451 { 1452 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1453 HCLGEVF_MAC_ADDR_UC, addr); 1454 } 1455 1456 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1457 const unsigned char *addr) 1458 { 1459 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1460 HCLGEVF_MAC_ADDR_MC, addr); 1461 } 1462 1463 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1464 const unsigned char *addr) 1465 { 1466 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1467 HCLGEVF_MAC_ADDR_MC, addr); 1468 } 1469 1470 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, 1471 struct hclgevf_mac_addr_node *mac_node, 1472 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1473 { 1474 struct hclge_vf_to_pf_msg send_msg; 1475 u8 code, subcode; 1476 1477 if (mac_type == HCLGEVF_MAC_ADDR_UC) { 1478 code = HCLGE_MBX_SET_UNICAST; 1479 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1480 subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; 1481 else 1482 subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; 1483 } else { 1484 code = HCLGE_MBX_SET_MULTICAST; 1485 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1486 subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; 1487 else 1488 subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; 1489 } 1490 1491 hclgevf_build_send_msg(&send_msg, code, subcode); 1492 ether_addr_copy(send_msg.data, mac_node->mac_addr); 1493 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1494 } 1495 1496 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, 1497 struct list_head *list, 1498 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1499 { 1500 struct hclgevf_mac_addr_node *mac_node, *tmp; 1501 int ret; 1502 1503 list_for_each_entry_safe(mac_node, tmp, list, node) { 1504 ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); 1505 if (ret) { 1506 dev_err(&hdev->pdev->dev, 1507 "failed to configure mac %pM, state = %d, ret = %d\n", 1508 mac_node->mac_addr, mac_node->state, ret); 1509 return; 1510 } 1511 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1512 mac_node->state = HCLGEVF_MAC_ACTIVE; 1513 } else { 1514 list_del(&mac_node->node); 1515 kfree(mac_node); 1516 } 1517 } 1518 } 1519 1520 static void hclgevf_sync_from_add_list(struct list_head *add_list, 1521 struct list_head *mac_list) 1522 { 1523 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1524 1525 list_for_each_entry_safe(mac_node, tmp, add_list, node) { 1526 /* if the mac address from tmp_add_list is not in the 1527 * uc/mc_mac_list, it means have received a TO_DEL request 1528 * during the time window of sending mac config request to PF 1529 * If mac_node state is ACTIVE, then change its state to TO_DEL, 1530 * then it will be removed at next time. If is TO_ADD, it means 1531 * send TO_ADD request failed, so just remove the mac node. 1532 */ 1533 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1534 if (new_node) { 1535 hclgevf_update_mac_node(new_node, mac_node->state); 1536 list_del(&mac_node->node); 1537 kfree(mac_node); 1538 } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { 1539 mac_node->state = HCLGEVF_MAC_TO_DEL; 1540 list_move_tail(&mac_node->node, mac_list); 1541 } else { 1542 list_del(&mac_node->node); 1543 kfree(mac_node); 1544 } 1545 } 1546 } 1547 1548 static void hclgevf_sync_from_del_list(struct list_head *del_list, 1549 struct list_head *mac_list) 1550 { 1551 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1552 1553 list_for_each_entry_safe(mac_node, tmp, del_list, node) { 1554 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1555 if (new_node) { 1556 /* If the mac addr is exist in the mac list, it means 1557 * received a new request TO_ADD during the time window 1558 * of sending mac addr configurrequest to PF, so just 1559 * change the mac state to ACTIVE. 1560 */ 1561 new_node->state = HCLGEVF_MAC_ACTIVE; 1562 list_del(&mac_node->node); 1563 kfree(mac_node); 1564 } else { 1565 list_move_tail(&mac_node->node, mac_list); 1566 } 1567 } 1568 } 1569 1570 static void hclgevf_clear_list(struct list_head *list) 1571 { 1572 struct hclgevf_mac_addr_node *mac_node, *tmp; 1573 1574 list_for_each_entry_safe(mac_node, tmp, list, node) { 1575 list_del(&mac_node->node); 1576 kfree(mac_node); 1577 } 1578 } 1579 1580 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, 1581 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1582 { 1583 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1584 struct list_head tmp_add_list, tmp_del_list; 1585 struct list_head *list; 1586 1587 INIT_LIST_HEAD(&tmp_add_list); 1588 INIT_LIST_HEAD(&tmp_del_list); 1589 1590 /* move the mac addr to the tmp_add_list and tmp_del_list, then 1591 * we can add/delete these mac addr outside the spin lock 1592 */ 1593 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1594 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1595 1596 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1597 1598 list_for_each_entry_safe(mac_node, tmp, list, node) { 1599 switch (mac_node->state) { 1600 case HCLGEVF_MAC_TO_DEL: 1601 list_move_tail(&mac_node->node, &tmp_del_list); 1602 break; 1603 case HCLGEVF_MAC_TO_ADD: 1604 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 1605 if (!new_node) 1606 goto stop_traverse; 1607 1608 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 1609 new_node->state = mac_node->state; 1610 list_add_tail(&new_node->node, &tmp_add_list); 1611 break; 1612 default: 1613 break; 1614 } 1615 } 1616 1617 stop_traverse: 1618 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1619 1620 /* delete first, in order to get max mac table space for adding */ 1621 hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); 1622 hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); 1623 1624 /* if some mac addresses were added/deleted fail, move back to the 1625 * mac_list, and retry at next time. 1626 */ 1627 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1628 1629 hclgevf_sync_from_del_list(&tmp_del_list, list); 1630 hclgevf_sync_from_add_list(&tmp_add_list, list); 1631 1632 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1633 } 1634 1635 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) 1636 { 1637 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); 1638 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); 1639 } 1640 1641 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) 1642 { 1643 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1644 1645 hclgevf_clear_list(&hdev->mac_table.uc_mac_list); 1646 hclgevf_clear_list(&hdev->mac_table.mc_mac_list); 1647 1648 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1649 } 1650 1651 static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 1652 { 1653 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1654 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 1655 struct hclge_vf_to_pf_msg send_msg; 1656 1657 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) 1658 return -EOPNOTSUPP; 1659 1660 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1661 HCLGE_MBX_ENABLE_VLAN_FILTER); 1662 send_msg.data[0] = enable ? 1 : 0; 1663 1664 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1665 } 1666 1667 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1668 __be16 proto, u16 vlan_id, 1669 bool is_kill) 1670 { 1671 #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 1672 #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 1673 #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 1674 1675 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1676 struct hclge_vf_to_pf_msg send_msg; 1677 int ret; 1678 1679 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1680 return -EINVAL; 1681 1682 if (proto != htons(ETH_P_8021Q)) 1683 return -EPROTONOSUPPORT; 1684 1685 /* When device is resetting or reset failed, firmware is unable to 1686 * handle mailbox. Just record the vlan id, and remove it after 1687 * reset finished. 1688 */ 1689 if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 1690 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { 1691 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1692 return -EBUSY; 1693 } 1694 1695 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1696 HCLGE_MBX_VLAN_FILTER); 1697 send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; 1698 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, 1699 sizeof(vlan_id)); 1700 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, 1701 sizeof(proto)); 1702 /* when remove hw vlan filter failed, record the vlan id, 1703 * and try to remove it from hw later, to be consistence 1704 * with stack. 1705 */ 1706 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1707 if (is_kill && ret) 1708 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1709 1710 return ret; 1711 } 1712 1713 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1714 { 1715 #define HCLGEVF_MAX_SYNC_COUNT 60 1716 struct hnae3_handle *handle = &hdev->nic; 1717 int ret, sync_cnt = 0; 1718 u16 vlan_id; 1719 1720 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1721 while (vlan_id != VLAN_N_VID) { 1722 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1723 vlan_id, true); 1724 if (ret) 1725 return; 1726 1727 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1728 sync_cnt++; 1729 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1730 return; 1731 1732 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1733 } 1734 } 1735 1736 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1737 { 1738 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1739 struct hclge_vf_to_pf_msg send_msg; 1740 1741 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1742 HCLGE_MBX_VLAN_RX_OFF_CFG); 1743 send_msg.data[0] = enable ? 1 : 0; 1744 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1745 } 1746 1747 static int hclgevf_reset_tqp(struct hnae3_handle *handle) 1748 { 1749 #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U 1750 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1751 struct hclge_vf_to_pf_msg send_msg; 1752 u8 return_status = 0; 1753 int ret; 1754 u16 i; 1755 1756 /* disable vf queue before send queue reset msg to PF */ 1757 ret = hclgevf_tqp_enable(handle, false); 1758 if (ret) { 1759 dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n", 1760 ret); 1761 return ret; 1762 } 1763 1764 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1765 1766 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status, 1767 sizeof(return_status)); 1768 if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE) 1769 return ret; 1770 1771 for (i = 1; i < handle->kinfo.num_tqps; i++) { 1772 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1773 memcpy(send_msg.data, &i, sizeof(i)); 1774 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1775 if (ret) 1776 return ret; 1777 } 1778 1779 return 0; 1780 } 1781 1782 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1783 { 1784 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1785 struct hclge_vf_to_pf_msg send_msg; 1786 1787 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1788 memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); 1789 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1790 } 1791 1792 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1793 enum hnae3_reset_notify_type type) 1794 { 1795 struct hnae3_client *client = hdev->nic_client; 1796 struct hnae3_handle *handle = &hdev->nic; 1797 int ret; 1798 1799 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1800 !client) 1801 return 0; 1802 1803 if (!client->ops->reset_notify) 1804 return -EOPNOTSUPP; 1805 1806 ret = client->ops->reset_notify(handle, type); 1807 if (ret) 1808 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1809 type, ret); 1810 1811 return ret; 1812 } 1813 1814 static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev, 1815 enum hnae3_reset_notify_type type) 1816 { 1817 struct hnae3_client *client = hdev->roce_client; 1818 struct hnae3_handle *handle = &hdev->roce; 1819 int ret; 1820 1821 if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client) 1822 return 0; 1823 1824 if (!client->ops->reset_notify) 1825 return -EOPNOTSUPP; 1826 1827 ret = client->ops->reset_notify(handle, type); 1828 if (ret) 1829 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", 1830 type, ret); 1831 return ret; 1832 } 1833 1834 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1835 { 1836 #define HCLGEVF_RESET_WAIT_US 20000 1837 #define HCLGEVF_RESET_WAIT_CNT 2000 1838 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1839 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1840 1841 u32 val; 1842 int ret; 1843 1844 if (hdev->reset_type == HNAE3_VF_RESET) 1845 ret = readl_poll_timeout(hdev->hw.io_base + 1846 HCLGEVF_VF_RST_ING, val, 1847 !(val & HCLGEVF_VF_RST_ING_BIT), 1848 HCLGEVF_RESET_WAIT_US, 1849 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1850 else 1851 ret = readl_poll_timeout(hdev->hw.io_base + 1852 HCLGEVF_RST_ING, val, 1853 !(val & HCLGEVF_RST_ING_BITS), 1854 HCLGEVF_RESET_WAIT_US, 1855 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1856 1857 /* hardware completion status should be available by this time */ 1858 if (ret) { 1859 dev_err(&hdev->pdev->dev, 1860 "couldn't get reset done status from h/w, timeout!\n"); 1861 return ret; 1862 } 1863 1864 /* we will wait a bit more to let reset of the stack to complete. This 1865 * might happen in case reset assertion was made by PF. Yes, this also 1866 * means we might end up waiting bit more even for VF reset. 1867 */ 1868 msleep(5000); 1869 1870 return 0; 1871 } 1872 1873 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 1874 { 1875 u32 reg_val; 1876 1877 reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); 1878 if (enable) 1879 reg_val |= HCLGEVF_NIC_SW_RST_RDY; 1880 else 1881 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 1882 1883 hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 1884 reg_val); 1885 } 1886 1887 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1888 { 1889 int ret; 1890 1891 /* uninitialize the nic client */ 1892 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1893 if (ret) 1894 return ret; 1895 1896 /* re-initialize the hclge device */ 1897 ret = hclgevf_reset_hdev(hdev); 1898 if (ret) { 1899 dev_err(&hdev->pdev->dev, 1900 "hclge device re-init failed, VF is disabled!\n"); 1901 return ret; 1902 } 1903 1904 /* bring up the nic client again */ 1905 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1906 if (ret) 1907 return ret; 1908 1909 /* clear handshake status with IMP */ 1910 hclgevf_reset_handshake(hdev, false); 1911 1912 /* bring up the nic to enable TX/RX again */ 1913 return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1914 } 1915 1916 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1917 { 1918 #define HCLGEVF_RESET_SYNC_TIME 100 1919 1920 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1921 struct hclge_vf_to_pf_msg send_msg; 1922 int ret; 1923 1924 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1925 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1926 if (ret) { 1927 dev_err(&hdev->pdev->dev, 1928 "failed to assert VF reset, ret = %d\n", ret); 1929 return ret; 1930 } 1931 hdev->rst_stats.vf_func_rst_cnt++; 1932 } 1933 1934 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1935 /* inform hardware that preparatory work is done */ 1936 msleep(HCLGEVF_RESET_SYNC_TIME); 1937 hclgevf_reset_handshake(hdev, true); 1938 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n", 1939 hdev->reset_type); 1940 1941 return 0; 1942 } 1943 1944 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 1945 { 1946 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 1947 hdev->rst_stats.vf_func_rst_cnt); 1948 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1949 hdev->rst_stats.flr_rst_cnt); 1950 dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 1951 hdev->rst_stats.vf_rst_cnt); 1952 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1953 hdev->rst_stats.rst_done_cnt); 1954 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1955 hdev->rst_stats.hw_rst_done_cnt); 1956 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1957 hdev->rst_stats.rst_cnt); 1958 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1959 hdev->rst_stats.rst_fail_cnt); 1960 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1961 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 1962 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 1963 hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG)); 1964 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1965 hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); 1966 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1967 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 1968 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1969 } 1970 1971 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1972 { 1973 /* recover handshake status with IMP when reset fail */ 1974 hclgevf_reset_handshake(hdev, true); 1975 hdev->rst_stats.rst_fail_cnt++; 1976 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1977 hdev->rst_stats.rst_fail_cnt); 1978 1979 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1980 set_bit(hdev->reset_type, &hdev->reset_pending); 1981 1982 if (hclgevf_is_reset_pending(hdev)) { 1983 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1984 hclgevf_reset_task_schedule(hdev); 1985 } else { 1986 set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1987 hclgevf_dump_rst_info(hdev); 1988 } 1989 } 1990 1991 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 1992 { 1993 int ret; 1994 1995 hdev->rst_stats.rst_cnt++; 1996 1997 /* perform reset of the stack & ae device for a client */ 1998 ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); 1999 if (ret) 2000 return ret; 2001 2002 rtnl_lock(); 2003 /* bring down the nic to stop any ongoing TX/RX */ 2004 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 2005 rtnl_unlock(); 2006 if (ret) 2007 return ret; 2008 2009 return hclgevf_reset_prepare_wait(hdev); 2010 } 2011 2012 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 2013 { 2014 int ret; 2015 2016 hdev->rst_stats.hw_rst_done_cnt++; 2017 ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); 2018 if (ret) 2019 return ret; 2020 2021 rtnl_lock(); 2022 /* now, re-initialize the nic client and ae device */ 2023 ret = hclgevf_reset_stack(hdev); 2024 rtnl_unlock(); 2025 if (ret) { 2026 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 2027 return ret; 2028 } 2029 2030 ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT); 2031 /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1 2032 * times 2033 */ 2034 if (ret && 2035 hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1) 2036 return ret; 2037 2038 ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT); 2039 if (ret) 2040 return ret; 2041 2042 hdev->last_reset_time = jiffies; 2043 hdev->rst_stats.rst_done_cnt++; 2044 hdev->rst_stats.rst_fail_cnt = 0; 2045 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2046 2047 return 0; 2048 } 2049 2050 static void hclgevf_reset(struct hclgevf_dev *hdev) 2051 { 2052 if (hclgevf_reset_prepare(hdev)) 2053 goto err_reset; 2054 2055 /* check if VF could successfully fetch the hardware reset completion 2056 * status from the hardware 2057 */ 2058 if (hclgevf_reset_wait(hdev)) { 2059 /* can't do much in this situation, will disable VF */ 2060 dev_err(&hdev->pdev->dev, 2061 "failed to fetch H/W reset completion status\n"); 2062 goto err_reset; 2063 } 2064 2065 if (hclgevf_reset_rebuild(hdev)) 2066 goto err_reset; 2067 2068 return; 2069 2070 err_reset: 2071 hclgevf_reset_err_handle(hdev); 2072 } 2073 2074 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 2075 unsigned long *addr) 2076 { 2077 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 2078 2079 /* return the highest priority reset level amongst all */ 2080 if (test_bit(HNAE3_VF_RESET, addr)) { 2081 rst_level = HNAE3_VF_RESET; 2082 clear_bit(HNAE3_VF_RESET, addr); 2083 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 2084 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2085 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 2086 rst_level = HNAE3_VF_FULL_RESET; 2087 clear_bit(HNAE3_VF_FULL_RESET, addr); 2088 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2089 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 2090 rst_level = HNAE3_VF_PF_FUNC_RESET; 2091 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 2092 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2093 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 2094 rst_level = HNAE3_VF_FUNC_RESET; 2095 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2096 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 2097 rst_level = HNAE3_FLR_RESET; 2098 clear_bit(HNAE3_FLR_RESET, addr); 2099 } 2100 2101 return rst_level; 2102 } 2103 2104 static void hclgevf_reset_event(struct pci_dev *pdev, 2105 struct hnae3_handle *handle) 2106 { 2107 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2108 struct hclgevf_dev *hdev = ae_dev->priv; 2109 2110 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 2111 2112 if (hdev->default_reset_request) 2113 hdev->reset_level = 2114 hclgevf_get_reset_level(hdev, 2115 &hdev->default_reset_request); 2116 else 2117 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2118 2119 /* reset of this VF requested */ 2120 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 2121 hclgevf_reset_task_schedule(hdev); 2122 2123 hdev->last_reset_time = jiffies; 2124 } 2125 2126 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 2127 enum hnae3_reset_type rst_type) 2128 { 2129 struct hclgevf_dev *hdev = ae_dev->priv; 2130 2131 set_bit(rst_type, &hdev->default_reset_request); 2132 } 2133 2134 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 2135 { 2136 writel(en ? 1 : 0, vector->addr); 2137 } 2138 2139 static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev, 2140 enum hnae3_reset_type rst_type) 2141 { 2142 #define HCLGEVF_RESET_RETRY_WAIT_MS 500 2143 #define HCLGEVF_RESET_RETRY_CNT 5 2144 2145 struct hclgevf_dev *hdev = ae_dev->priv; 2146 int retry_cnt = 0; 2147 int ret; 2148 2149 retry: 2150 down(&hdev->reset_sem); 2151 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2152 hdev->reset_type = rst_type; 2153 ret = hclgevf_reset_prepare(hdev); 2154 if (ret) { 2155 dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n", 2156 ret); 2157 if (hdev->reset_pending || 2158 retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) { 2159 dev_err(&hdev->pdev->dev, 2160 "reset_pending:0x%lx, retry_cnt:%d\n", 2161 hdev->reset_pending, retry_cnt); 2162 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2163 up(&hdev->reset_sem); 2164 msleep(HCLGEVF_RESET_RETRY_WAIT_MS); 2165 goto retry; 2166 } 2167 } 2168 2169 /* disable misc vector before reset done */ 2170 hclgevf_enable_vector(&hdev->misc_vector, false); 2171 2172 if (hdev->reset_type == HNAE3_FLR_RESET) 2173 hdev->rst_stats.flr_rst_cnt++; 2174 } 2175 2176 static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev) 2177 { 2178 struct hclgevf_dev *hdev = ae_dev->priv; 2179 int ret; 2180 2181 hclgevf_enable_vector(&hdev->misc_vector, true); 2182 2183 ret = hclgevf_reset_rebuild(hdev); 2184 if (ret) 2185 dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 2186 ret); 2187 2188 hdev->reset_type = HNAE3_NONE_RESET; 2189 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2190 up(&hdev->reset_sem); 2191 } 2192 2193 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 2194 { 2195 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2196 2197 return hdev->fw_version; 2198 } 2199 2200 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 2201 { 2202 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 2203 2204 vector->vector_irq = pci_irq_vector(hdev->pdev, 2205 HCLGEVF_MISC_VECTOR_NUM); 2206 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 2207 /* vector status always valid for Vector 0 */ 2208 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 2209 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 2210 2211 hdev->num_msi_left -= 1; 2212 hdev->num_msi_used += 1; 2213 } 2214 2215 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 2216 { 2217 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2218 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 2219 &hdev->state)) 2220 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2221 } 2222 2223 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 2224 { 2225 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2226 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 2227 &hdev->state)) 2228 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2229 } 2230 2231 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 2232 unsigned long delay) 2233 { 2234 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2235 !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2236 mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 2237 } 2238 2239 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 2240 { 2241 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 2242 2243 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 2244 return; 2245 2246 down(&hdev->reset_sem); 2247 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2248 2249 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 2250 &hdev->reset_state)) { 2251 /* PF has intimated that it is about to reset the hardware. 2252 * We now have to poll & check if hardware has actually 2253 * completed the reset sequence. On hardware reset completion, 2254 * VF needs to reset the client and ae device. 2255 */ 2256 hdev->reset_attempts = 0; 2257 2258 hdev->last_reset_time = jiffies; 2259 while ((hdev->reset_type = 2260 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 2261 != HNAE3_NONE_RESET) 2262 hclgevf_reset(hdev); 2263 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 2264 &hdev->reset_state)) { 2265 /* we could be here when either of below happens: 2266 * 1. reset was initiated due to watchdog timeout caused by 2267 * a. IMP was earlier reset and our TX got choked down and 2268 * which resulted in watchdog reacting and inducing VF 2269 * reset. This also means our cmdq would be unreliable. 2270 * b. problem in TX due to other lower layer(example link 2271 * layer not functioning properly etc.) 2272 * 2. VF reset might have been initiated due to some config 2273 * change. 2274 * 2275 * NOTE: Theres no clear way to detect above cases than to react 2276 * to the response of PF for this reset request. PF will ack the 2277 * 1b and 2. cases but we will not get any intimation about 1a 2278 * from PF as cmdq would be in unreliable state i.e. mailbox 2279 * communication between PF and VF would be broken. 2280 * 2281 * if we are never geting into pending state it means either: 2282 * 1. PF is not receiving our request which could be due to IMP 2283 * reset 2284 * 2. PF is screwed 2285 * We cannot do much for 2. but to check first we can try reset 2286 * our PCIe + stack and see if it alleviates the problem. 2287 */ 2288 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 2289 /* prepare for full reset of stack + pcie interface */ 2290 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 2291 2292 /* "defer" schedule the reset task again */ 2293 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2294 } else { 2295 hdev->reset_attempts++; 2296 2297 set_bit(hdev->reset_level, &hdev->reset_pending); 2298 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2299 } 2300 hclgevf_reset_task_schedule(hdev); 2301 } 2302 2303 hdev->reset_type = HNAE3_NONE_RESET; 2304 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2305 up(&hdev->reset_sem); 2306 } 2307 2308 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 2309 { 2310 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2311 return; 2312 2313 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 2314 return; 2315 2316 hclgevf_mbx_async_handler(hdev); 2317 2318 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2319 } 2320 2321 static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 2322 { 2323 struct hclge_vf_to_pf_msg send_msg; 2324 int ret; 2325 2326 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) 2327 return; 2328 2329 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 2330 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2331 if (ret) 2332 dev_err(&hdev->pdev->dev, 2333 "VF sends keep alive cmd failed(=%d)\n", ret); 2334 } 2335 2336 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 2337 { 2338 unsigned long delta = round_jiffies_relative(HZ); 2339 struct hnae3_handle *handle = &hdev->nic; 2340 2341 if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2342 return; 2343 2344 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 2345 delta = jiffies - hdev->last_serv_processed; 2346 2347 if (delta < round_jiffies_relative(HZ)) { 2348 delta = round_jiffies_relative(HZ) - delta; 2349 goto out; 2350 } 2351 } 2352 2353 hdev->serv_processed_cnt++; 2354 if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 2355 hclgevf_keep_alive(hdev); 2356 2357 if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 2358 hdev->last_serv_processed = jiffies; 2359 goto out; 2360 } 2361 2362 if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 2363 hclgevf_tqps_update_stats(handle); 2364 2365 /* VF does not need to request link status when this bit is set, because 2366 * PF will push its link status to VFs when link status changed. 2367 */ 2368 if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state)) 2369 hclgevf_request_link_info(hdev); 2370 2371 hclgevf_update_link_mode(hdev); 2372 2373 hclgevf_sync_vlan_filter(hdev); 2374 2375 hclgevf_sync_mac_table(hdev); 2376 2377 hclgevf_sync_promisc_mode(hdev); 2378 2379 hdev->last_serv_processed = jiffies; 2380 2381 out: 2382 hclgevf_task_schedule(hdev, delta); 2383 } 2384 2385 static void hclgevf_service_task(struct work_struct *work) 2386 { 2387 struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 2388 service_task.work); 2389 2390 hclgevf_reset_service_task(hdev); 2391 hclgevf_mailbox_service_task(hdev); 2392 hclgevf_periodic_service_task(hdev); 2393 2394 /* Handle reset and mbx again in case periodical task delays the 2395 * handling by calling hclgevf_task_schedule() in 2396 * hclgevf_periodic_service_task() 2397 */ 2398 hclgevf_reset_service_task(hdev); 2399 hclgevf_mailbox_service_task(hdev); 2400 } 2401 2402 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 2403 { 2404 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 2405 } 2406 2407 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 2408 u32 *clearval) 2409 { 2410 u32 val, cmdq_stat_reg, rst_ing_reg; 2411 2412 /* fetch the events from their corresponding regs */ 2413 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 2414 HCLGEVF_VECTOR0_CMDQ_STATE_REG); 2415 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 2416 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2417 dev_info(&hdev->pdev->dev, 2418 "receive reset interrupt 0x%x!\n", rst_ing_reg); 2419 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 2420 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2421 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 2422 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 2423 hdev->rst_stats.vf_rst_cnt++; 2424 /* set up VF hardware reset status, its PF will clear 2425 * this status when PF has initialized done. 2426 */ 2427 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 2428 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 2429 val | HCLGEVF_VF_RST_ING_BIT); 2430 return HCLGEVF_VECTOR0_EVENT_RST; 2431 } 2432 2433 /* check for vector0 mailbox(=CMDQ RX) event source */ 2434 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 2435 /* for revision 0x21, clearing interrupt is writing bit 0 2436 * to the clear register, writing bit 1 means to keep the 2437 * old value. 2438 * for revision 0x20, the clear register is a read & write 2439 * register, so we should just write 0 to the bit we are 2440 * handling, and keep other bits as cmdq_stat_reg. 2441 */ 2442 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) 2443 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2444 else 2445 *clearval = cmdq_stat_reg & 2446 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2447 2448 return HCLGEVF_VECTOR0_EVENT_MBX; 2449 } 2450 2451 /* print other vector0 event source */ 2452 dev_info(&hdev->pdev->dev, 2453 "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2454 cmdq_stat_reg); 2455 2456 return HCLGEVF_VECTOR0_EVENT_OTHER; 2457 } 2458 2459 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2460 { 2461 enum hclgevf_evt_cause event_cause; 2462 struct hclgevf_dev *hdev = data; 2463 u32 clearval; 2464 2465 hclgevf_enable_vector(&hdev->misc_vector, false); 2466 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2467 2468 switch (event_cause) { 2469 case HCLGEVF_VECTOR0_EVENT_RST: 2470 hclgevf_reset_task_schedule(hdev); 2471 break; 2472 case HCLGEVF_VECTOR0_EVENT_MBX: 2473 hclgevf_mbx_handler(hdev); 2474 break; 2475 default: 2476 break; 2477 } 2478 2479 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 2480 hclgevf_clear_event_cause(hdev, clearval); 2481 hclgevf_enable_vector(&hdev->misc_vector, true); 2482 } 2483 2484 return IRQ_HANDLED; 2485 } 2486 2487 static int hclgevf_configure(struct hclgevf_dev *hdev) 2488 { 2489 int ret; 2490 2491 ret = hclgevf_get_basic_info(hdev); 2492 if (ret) 2493 return ret; 2494 2495 /* get current port based vlan state from PF */ 2496 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2497 if (ret) 2498 return ret; 2499 2500 /* get queue configuration from PF */ 2501 ret = hclgevf_get_queue_info(hdev); 2502 if (ret) 2503 return ret; 2504 2505 /* get queue depth info from PF */ 2506 ret = hclgevf_get_queue_depth(hdev); 2507 if (ret) 2508 return ret; 2509 2510 return hclgevf_get_pf_media_type(hdev); 2511 } 2512 2513 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 2514 { 2515 struct pci_dev *pdev = ae_dev->pdev; 2516 struct hclgevf_dev *hdev; 2517 2518 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 2519 if (!hdev) 2520 return -ENOMEM; 2521 2522 hdev->pdev = pdev; 2523 hdev->ae_dev = ae_dev; 2524 ae_dev->priv = hdev; 2525 2526 return 0; 2527 } 2528 2529 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2530 { 2531 struct hnae3_handle *roce = &hdev->roce; 2532 struct hnae3_handle *nic = &hdev->nic; 2533 2534 roce->rinfo.num_vectors = hdev->num_roce_msix; 2535 2536 if (hdev->num_msi_left < roce->rinfo.num_vectors || 2537 hdev->num_msi_left == 0) 2538 return -EINVAL; 2539 2540 roce->rinfo.base_vector = hdev->roce_base_vector; 2541 2542 roce->rinfo.netdev = nic->kinfo.netdev; 2543 roce->rinfo.roce_io_base = hdev->hw.io_base; 2544 roce->rinfo.roce_mem_base = hdev->hw.mem_base; 2545 2546 roce->pdev = nic->pdev; 2547 roce->ae_algo = nic->ae_algo; 2548 roce->numa_node_mask = nic->numa_node_mask; 2549 2550 return 0; 2551 } 2552 2553 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 2554 { 2555 struct hclgevf_cfg_gro_status_cmd *req; 2556 struct hclgevf_desc desc; 2557 int ret; 2558 2559 if (!hnae3_dev_gro_supported(hdev)) 2560 return 0; 2561 2562 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2563 false); 2564 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2565 2566 req->gro_en = en ? 1 : 0; 2567 2568 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2569 if (ret) 2570 dev_err(&hdev->pdev->dev, 2571 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2572 2573 return ret; 2574 } 2575 2576 static int hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) 2577 { 2578 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size; 2579 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2580 struct hclgevf_rss_tuple_cfg *tuple_sets; 2581 u32 i; 2582 2583 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 2584 rss_cfg->rss_size = hdev->nic.kinfo.rss_size; 2585 tuple_sets = &rss_cfg->rss_tuple_sets; 2586 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2587 u8 *rss_ind_tbl; 2588 2589 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 2590 2591 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size, 2592 sizeof(*rss_ind_tbl), GFP_KERNEL); 2593 if (!rss_ind_tbl) 2594 return -ENOMEM; 2595 2596 rss_cfg->rss_indirection_tbl = rss_ind_tbl; 2597 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2598 HCLGEVF_RSS_KEY_SIZE); 2599 2600 tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2601 tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2602 tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2603 tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2604 tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2605 tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2606 tuple_sets->ipv6_sctp_en = 2607 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ? 2608 HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT : 2609 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2610 tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2611 } 2612 2613 /* Initialize RSS indirect table */ 2614 for (i = 0; i < rss_ind_tbl_size; i++) 2615 rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 2616 2617 return 0; 2618 } 2619 2620 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2621 { 2622 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2623 int ret; 2624 2625 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2626 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 2627 rss_cfg->rss_hash_key); 2628 if (ret) 2629 return ret; 2630 2631 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2632 if (ret) 2633 return ret; 2634 } 2635 2636 ret = hclgevf_set_rss_indir_table(hdev); 2637 if (ret) 2638 return ret; 2639 2640 return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); 2641 } 2642 2643 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2644 { 2645 struct hnae3_handle *nic = &hdev->nic; 2646 int ret; 2647 2648 ret = hclgevf_en_hw_strip_rxvtag(nic, true); 2649 if (ret) { 2650 dev_err(&hdev->pdev->dev, 2651 "failed to enable rx vlan offload, ret = %d\n", ret); 2652 return ret; 2653 } 2654 2655 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2656 false); 2657 } 2658 2659 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2660 { 2661 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2662 2663 unsigned long last = hdev->serv_processed_cnt; 2664 int i = 0; 2665 2666 while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2667 i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2668 last == hdev->serv_processed_cnt) 2669 usleep_range(1, 1); 2670 } 2671 2672 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2673 { 2674 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2675 2676 if (enable) { 2677 hclgevf_task_schedule(hdev, 0); 2678 } else { 2679 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2680 2681 /* flush memory to make sure DOWN is seen by service task */ 2682 smp_mb__before_atomic(); 2683 hclgevf_flush_link_update(hdev); 2684 } 2685 } 2686 2687 static int hclgevf_ae_start(struct hnae3_handle *handle) 2688 { 2689 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2690 2691 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2692 clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state); 2693 2694 hclgevf_reset_tqp_stats(handle); 2695 2696 hclgevf_request_link_info(hdev); 2697 2698 hclgevf_update_link_mode(hdev); 2699 2700 return 0; 2701 } 2702 2703 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2704 { 2705 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2706 2707 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2708 2709 if (hdev->reset_type != HNAE3_VF_RESET) 2710 hclgevf_reset_tqp(handle); 2711 2712 hclgevf_reset_tqp_stats(handle); 2713 hclgevf_update_link_status(hdev, 0); 2714 } 2715 2716 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2717 { 2718 #define HCLGEVF_STATE_ALIVE 1 2719 #define HCLGEVF_STATE_NOT_ALIVE 0 2720 2721 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2722 struct hclge_vf_to_pf_msg send_msg; 2723 2724 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2725 send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2726 HCLGEVF_STATE_NOT_ALIVE; 2727 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2728 } 2729 2730 static int hclgevf_client_start(struct hnae3_handle *handle) 2731 { 2732 return hclgevf_set_alive(handle, true); 2733 } 2734 2735 static void hclgevf_client_stop(struct hnae3_handle *handle) 2736 { 2737 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2738 int ret; 2739 2740 ret = hclgevf_set_alive(handle, false); 2741 if (ret) 2742 dev_warn(&hdev->pdev->dev, 2743 "%s failed %d\n", __func__, ret); 2744 } 2745 2746 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2747 { 2748 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2749 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2750 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2751 2752 INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 2753 2754 mutex_init(&hdev->mbx_resp.mbx_mutex); 2755 sema_init(&hdev->reset_sem, 1); 2756 2757 spin_lock_init(&hdev->mac_table.mac_list_lock); 2758 INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); 2759 INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); 2760 2761 /* bring the device down */ 2762 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2763 } 2764 2765 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2766 { 2767 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2768 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2769 2770 if (hdev->service_task.work.func) 2771 cancel_delayed_work_sync(&hdev->service_task); 2772 2773 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2774 } 2775 2776 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2777 { 2778 struct pci_dev *pdev = hdev->pdev; 2779 int vectors; 2780 int i; 2781 2782 if (hnae3_dev_roce_supported(hdev)) 2783 vectors = pci_alloc_irq_vectors(pdev, 2784 hdev->roce_base_msix_offset + 1, 2785 hdev->num_msi, 2786 PCI_IRQ_MSIX); 2787 else 2788 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2789 hdev->num_msi, 2790 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2791 2792 if (vectors < 0) { 2793 dev_err(&pdev->dev, 2794 "failed(%d) to allocate MSI/MSI-X vectors\n", 2795 vectors); 2796 return vectors; 2797 } 2798 if (vectors < hdev->num_msi) 2799 dev_warn(&hdev->pdev->dev, 2800 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2801 hdev->num_msi, vectors); 2802 2803 hdev->num_msi = vectors; 2804 hdev->num_msi_left = vectors; 2805 2806 hdev->base_msi_vector = pdev->irq; 2807 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2808 2809 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2810 sizeof(u16), GFP_KERNEL); 2811 if (!hdev->vector_status) { 2812 pci_free_irq_vectors(pdev); 2813 return -ENOMEM; 2814 } 2815 2816 for (i = 0; i < hdev->num_msi; i++) 2817 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2818 2819 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2820 sizeof(int), GFP_KERNEL); 2821 if (!hdev->vector_irq) { 2822 devm_kfree(&pdev->dev, hdev->vector_status); 2823 pci_free_irq_vectors(pdev); 2824 return -ENOMEM; 2825 } 2826 2827 return 0; 2828 } 2829 2830 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2831 { 2832 struct pci_dev *pdev = hdev->pdev; 2833 2834 devm_kfree(&pdev->dev, hdev->vector_status); 2835 devm_kfree(&pdev->dev, hdev->vector_irq); 2836 pci_free_irq_vectors(pdev); 2837 } 2838 2839 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2840 { 2841 int ret; 2842 2843 hclgevf_get_misc_vector(hdev); 2844 2845 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2846 HCLGEVF_NAME, pci_name(hdev->pdev)); 2847 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2848 0, hdev->misc_vector.name, hdev); 2849 if (ret) { 2850 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2851 hdev->misc_vector.vector_irq); 2852 return ret; 2853 } 2854 2855 hclgevf_clear_event_cause(hdev, 0); 2856 2857 /* enable misc. vector(vector 0) */ 2858 hclgevf_enable_vector(&hdev->misc_vector, true); 2859 2860 return ret; 2861 } 2862 2863 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2864 { 2865 /* disable misc vector(vector 0) */ 2866 hclgevf_enable_vector(&hdev->misc_vector, false); 2867 synchronize_irq(hdev->misc_vector.vector_irq); 2868 free_irq(hdev->misc_vector.vector_irq, hdev); 2869 hclgevf_free_vector(hdev, 0); 2870 } 2871 2872 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2873 { 2874 struct device *dev = &hdev->pdev->dev; 2875 2876 dev_info(dev, "VF info begin:\n"); 2877 2878 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2879 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2880 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2881 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2882 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2883 dev_info(dev, "PF media type of this VF: %u\n", 2884 hdev->hw.mac.media_type); 2885 2886 dev_info(dev, "VF info end.\n"); 2887 } 2888 2889 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2890 struct hnae3_client *client) 2891 { 2892 struct hclgevf_dev *hdev = ae_dev->priv; 2893 int rst_cnt = hdev->rst_stats.rst_cnt; 2894 int ret; 2895 2896 ret = client->ops->init_instance(&hdev->nic); 2897 if (ret) 2898 return ret; 2899 2900 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2901 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 2902 rst_cnt != hdev->rst_stats.rst_cnt) { 2903 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2904 2905 client->ops->uninit_instance(&hdev->nic, 0); 2906 return -EBUSY; 2907 } 2908 2909 hnae3_set_client_init_flag(client, ae_dev, 1); 2910 2911 if (netif_msg_drv(&hdev->nic)) 2912 hclgevf_info_show(hdev); 2913 2914 return 0; 2915 } 2916 2917 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2918 struct hnae3_client *client) 2919 { 2920 struct hclgevf_dev *hdev = ae_dev->priv; 2921 int ret; 2922 2923 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2924 !hdev->nic_client) 2925 return 0; 2926 2927 ret = hclgevf_init_roce_base_info(hdev); 2928 if (ret) 2929 return ret; 2930 2931 ret = client->ops->init_instance(&hdev->roce); 2932 if (ret) 2933 return ret; 2934 2935 set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2936 hnae3_set_client_init_flag(client, ae_dev, 1); 2937 2938 return 0; 2939 } 2940 2941 static int hclgevf_init_client_instance(struct hnae3_client *client, 2942 struct hnae3_ae_dev *ae_dev) 2943 { 2944 struct hclgevf_dev *hdev = ae_dev->priv; 2945 int ret; 2946 2947 switch (client->type) { 2948 case HNAE3_CLIENT_KNIC: 2949 hdev->nic_client = client; 2950 hdev->nic.client = client; 2951 2952 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2953 if (ret) 2954 goto clear_nic; 2955 2956 ret = hclgevf_init_roce_client_instance(ae_dev, 2957 hdev->roce_client); 2958 if (ret) 2959 goto clear_roce; 2960 2961 break; 2962 case HNAE3_CLIENT_ROCE: 2963 if (hnae3_dev_roce_supported(hdev)) { 2964 hdev->roce_client = client; 2965 hdev->roce.client = client; 2966 } 2967 2968 ret = hclgevf_init_roce_client_instance(ae_dev, client); 2969 if (ret) 2970 goto clear_roce; 2971 2972 break; 2973 default: 2974 return -EINVAL; 2975 } 2976 2977 return 0; 2978 2979 clear_nic: 2980 hdev->nic_client = NULL; 2981 hdev->nic.client = NULL; 2982 return ret; 2983 clear_roce: 2984 hdev->roce_client = NULL; 2985 hdev->roce.client = NULL; 2986 return ret; 2987 } 2988 2989 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2990 struct hnae3_ae_dev *ae_dev) 2991 { 2992 struct hclgevf_dev *hdev = ae_dev->priv; 2993 2994 /* un-init roce, if it exists */ 2995 if (hdev->roce_client) { 2996 clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2997 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2998 hdev->roce_client = NULL; 2999 hdev->roce.client = NULL; 3000 } 3001 3002 /* un-init nic/unic, if this was not called by roce client */ 3003 if (client->ops->uninit_instance && hdev->nic_client && 3004 client->type != HNAE3_CLIENT_ROCE) { 3005 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 3006 3007 client->ops->uninit_instance(&hdev->nic, 0); 3008 hdev->nic_client = NULL; 3009 hdev->nic.client = NULL; 3010 } 3011 } 3012 3013 static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev) 3014 { 3015 #define HCLGEVF_MEM_BAR 4 3016 3017 struct pci_dev *pdev = hdev->pdev; 3018 struct hclgevf_hw *hw = &hdev->hw; 3019 3020 /* for device does not have device memory, return directly */ 3021 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR))) 3022 return 0; 3023 3024 hw->mem_base = devm_ioremap_wc(&pdev->dev, 3025 pci_resource_start(pdev, 3026 HCLGEVF_MEM_BAR), 3027 pci_resource_len(pdev, HCLGEVF_MEM_BAR)); 3028 if (!hw->mem_base) { 3029 dev_err(&pdev->dev, "failed to map device memory\n"); 3030 return -EFAULT; 3031 } 3032 3033 return 0; 3034 } 3035 3036 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 3037 { 3038 struct pci_dev *pdev = hdev->pdev; 3039 struct hclgevf_hw *hw; 3040 int ret; 3041 3042 ret = pci_enable_device(pdev); 3043 if (ret) { 3044 dev_err(&pdev->dev, "failed to enable PCI device\n"); 3045 return ret; 3046 } 3047 3048 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3049 if (ret) { 3050 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 3051 goto err_disable_device; 3052 } 3053 3054 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 3055 if (ret) { 3056 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 3057 goto err_disable_device; 3058 } 3059 3060 pci_set_master(pdev); 3061 hw = &hdev->hw; 3062 hw->hdev = hdev; 3063 hw->io_base = pci_iomap(pdev, 2, 0); 3064 if (!hw->io_base) { 3065 dev_err(&pdev->dev, "can't map configuration register space\n"); 3066 ret = -ENOMEM; 3067 goto err_clr_master; 3068 } 3069 3070 ret = hclgevf_dev_mem_map(hdev); 3071 if (ret) 3072 goto err_unmap_io_base; 3073 3074 return 0; 3075 3076 err_unmap_io_base: 3077 pci_iounmap(pdev, hdev->hw.io_base); 3078 err_clr_master: 3079 pci_clear_master(pdev); 3080 pci_release_regions(pdev); 3081 err_disable_device: 3082 pci_disable_device(pdev); 3083 3084 return ret; 3085 } 3086 3087 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 3088 { 3089 struct pci_dev *pdev = hdev->pdev; 3090 3091 if (hdev->hw.mem_base) 3092 devm_iounmap(&pdev->dev, hdev->hw.mem_base); 3093 3094 pci_iounmap(pdev, hdev->hw.io_base); 3095 pci_clear_master(pdev); 3096 pci_release_regions(pdev); 3097 pci_disable_device(pdev); 3098 } 3099 3100 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 3101 { 3102 struct hclgevf_query_res_cmd *req; 3103 struct hclgevf_desc desc; 3104 int ret; 3105 3106 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 3107 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 3108 if (ret) { 3109 dev_err(&hdev->pdev->dev, 3110 "query vf resource failed, ret = %d.\n", ret); 3111 return ret; 3112 } 3113 3114 req = (struct hclgevf_query_res_cmd *)desc.data; 3115 3116 if (hnae3_dev_roce_supported(hdev)) { 3117 hdev->roce_base_msix_offset = 3118 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 3119 HCLGEVF_MSIX_OFT_ROCEE_M, 3120 HCLGEVF_MSIX_OFT_ROCEE_S); 3121 hdev->num_roce_msix = 3122 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 3123 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 3124 3125 /* nic's msix numbers is always equals to the roce's. */ 3126 hdev->num_nic_msix = hdev->num_roce_msix; 3127 3128 /* VF should have NIC vectors and Roce vectors, NIC vectors 3129 * are queued before Roce vectors. The offset is fixed to 64. 3130 */ 3131 hdev->num_msi = hdev->num_roce_msix + 3132 hdev->roce_base_msix_offset; 3133 } else { 3134 hdev->num_msi = 3135 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 3136 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 3137 3138 hdev->num_nic_msix = hdev->num_msi; 3139 } 3140 3141 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 3142 dev_err(&hdev->pdev->dev, 3143 "Just %u msi resources, not enough for vf(min:2).\n", 3144 hdev->num_nic_msix); 3145 return -EINVAL; 3146 } 3147 3148 return 0; 3149 } 3150 3151 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) 3152 { 3153 #define HCLGEVF_MAX_NON_TSO_BD_NUM 8U 3154 3155 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3156 3157 ae_dev->dev_specs.max_non_tso_bd_num = 3158 HCLGEVF_MAX_NON_TSO_BD_NUM; 3159 ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 3160 ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE; 3161 ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 3162 ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME; 3163 } 3164 3165 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, 3166 struct hclgevf_desc *desc) 3167 { 3168 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3169 struct hclgevf_dev_specs_0_cmd *req0; 3170 struct hclgevf_dev_specs_1_cmd *req1; 3171 3172 req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data; 3173 req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data; 3174 3175 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; 3176 ae_dev->dev_specs.rss_ind_tbl_size = 3177 le16_to_cpu(req0->rss_ind_tbl_size); 3178 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); 3179 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); 3180 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); 3181 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); 3182 } 3183 3184 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) 3185 { 3186 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; 3187 3188 if (!dev_specs->max_non_tso_bd_num) 3189 dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM; 3190 if (!dev_specs->rss_ind_tbl_size) 3191 dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 3192 if (!dev_specs->rss_key_size) 3193 dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE; 3194 if (!dev_specs->max_int_gl) 3195 dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 3196 if (!dev_specs->max_frm_size) 3197 dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME; 3198 } 3199 3200 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) 3201 { 3202 struct hclgevf_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; 3203 int ret; 3204 int i; 3205 3206 /* set default specifications as devices lower than version V3 do not 3207 * support querying specifications from firmware. 3208 */ 3209 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 3210 hclgevf_set_default_dev_specs(hdev); 3211 return 0; 3212 } 3213 3214 for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 3215 hclgevf_cmd_setup_basic_desc(&desc[i], 3216 HCLGEVF_OPC_QUERY_DEV_SPECS, true); 3217 desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT); 3218 } 3219 hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS, 3220 true); 3221 3222 ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM); 3223 if (ret) 3224 return ret; 3225 3226 hclgevf_parse_dev_specs(hdev, desc); 3227 hclgevf_check_dev_specs(hdev); 3228 3229 return 0; 3230 } 3231 3232 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 3233 { 3234 struct pci_dev *pdev = hdev->pdev; 3235 int ret = 0; 3236 3237 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 3238 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3239 hclgevf_misc_irq_uninit(hdev); 3240 hclgevf_uninit_msi(hdev); 3241 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3242 } 3243 3244 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3245 pci_set_master(pdev); 3246 ret = hclgevf_init_msi(hdev); 3247 if (ret) { 3248 dev_err(&pdev->dev, 3249 "failed(%d) to init MSI/MSI-X\n", ret); 3250 return ret; 3251 } 3252 3253 ret = hclgevf_misc_irq_init(hdev); 3254 if (ret) { 3255 hclgevf_uninit_msi(hdev); 3256 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 3257 ret); 3258 return ret; 3259 } 3260 3261 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3262 } 3263 3264 return ret; 3265 } 3266 3267 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) 3268 { 3269 struct hclge_vf_to_pf_msg send_msg; 3270 3271 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, 3272 HCLGE_MBX_VPORT_LIST_CLEAR); 3273 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3274 } 3275 3276 static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev) 3277 { 3278 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 3279 hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1); 3280 } 3281 3282 static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev) 3283 { 3284 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 3285 hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0); 3286 } 3287 3288 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 3289 { 3290 struct pci_dev *pdev = hdev->pdev; 3291 int ret; 3292 3293 ret = hclgevf_pci_reset(hdev); 3294 if (ret) { 3295 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 3296 return ret; 3297 } 3298 3299 ret = hclgevf_cmd_init(hdev); 3300 if (ret) { 3301 dev_err(&pdev->dev, "cmd failed %d\n", ret); 3302 return ret; 3303 } 3304 3305 ret = hclgevf_rss_init_hw(hdev); 3306 if (ret) { 3307 dev_err(&hdev->pdev->dev, 3308 "failed(%d) to initialize RSS\n", ret); 3309 return ret; 3310 } 3311 3312 ret = hclgevf_config_gro(hdev, true); 3313 if (ret) 3314 return ret; 3315 3316 ret = hclgevf_init_vlan_config(hdev); 3317 if (ret) { 3318 dev_err(&hdev->pdev->dev, 3319 "failed(%d) to initialize VLAN config\n", ret); 3320 return ret; 3321 } 3322 3323 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 3324 3325 hclgevf_init_rxd_adv_layout(hdev); 3326 3327 dev_info(&hdev->pdev->dev, "Reset done\n"); 3328 3329 return 0; 3330 } 3331 3332 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 3333 { 3334 struct pci_dev *pdev = hdev->pdev; 3335 int ret; 3336 3337 ret = hclgevf_pci_init(hdev); 3338 if (ret) 3339 return ret; 3340 3341 ret = hclgevf_devlink_init(hdev); 3342 if (ret) 3343 goto err_devlink_init; 3344 3345 ret = hclgevf_cmd_queue_init(hdev); 3346 if (ret) 3347 goto err_cmd_queue_init; 3348 3349 ret = hclgevf_cmd_init(hdev); 3350 if (ret) 3351 goto err_cmd_init; 3352 3353 /* Get vf resource */ 3354 ret = hclgevf_query_vf_resource(hdev); 3355 if (ret) 3356 goto err_cmd_init; 3357 3358 ret = hclgevf_query_dev_specs(hdev); 3359 if (ret) { 3360 dev_err(&pdev->dev, 3361 "failed to query dev specifications, ret = %d\n", ret); 3362 goto err_cmd_init; 3363 } 3364 3365 ret = hclgevf_init_msi(hdev); 3366 if (ret) { 3367 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 3368 goto err_cmd_init; 3369 } 3370 3371 hclgevf_state_init(hdev); 3372 hdev->reset_level = HNAE3_VF_FUNC_RESET; 3373 hdev->reset_type = HNAE3_NONE_RESET; 3374 3375 ret = hclgevf_misc_irq_init(hdev); 3376 if (ret) 3377 goto err_misc_irq_init; 3378 3379 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3380 3381 ret = hclgevf_configure(hdev); 3382 if (ret) { 3383 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 3384 goto err_config; 3385 } 3386 3387 ret = hclgevf_alloc_tqps(hdev); 3388 if (ret) { 3389 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 3390 goto err_config; 3391 } 3392 3393 ret = hclgevf_set_handle_info(hdev); 3394 if (ret) 3395 goto err_config; 3396 3397 ret = hclgevf_config_gro(hdev, true); 3398 if (ret) 3399 goto err_config; 3400 3401 /* Initialize RSS for this VF */ 3402 ret = hclgevf_rss_init_cfg(hdev); 3403 if (ret) { 3404 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); 3405 goto err_config; 3406 } 3407 3408 ret = hclgevf_rss_init_hw(hdev); 3409 if (ret) { 3410 dev_err(&hdev->pdev->dev, 3411 "failed(%d) to initialize RSS\n", ret); 3412 goto err_config; 3413 } 3414 3415 /* ensure vf tbl list as empty before init*/ 3416 ret = hclgevf_clear_vport_list(hdev); 3417 if (ret) { 3418 dev_err(&pdev->dev, 3419 "failed to clear tbl list configuration, ret = %d.\n", 3420 ret); 3421 goto err_config; 3422 } 3423 3424 ret = hclgevf_init_vlan_config(hdev); 3425 if (ret) { 3426 dev_err(&hdev->pdev->dev, 3427 "failed(%d) to initialize VLAN config\n", ret); 3428 goto err_config; 3429 } 3430 3431 hclgevf_init_rxd_adv_layout(hdev); 3432 3433 hdev->last_reset_time = jiffies; 3434 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 3435 HCLGEVF_DRIVER_NAME); 3436 3437 hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 3438 3439 return 0; 3440 3441 err_config: 3442 hclgevf_misc_irq_uninit(hdev); 3443 err_misc_irq_init: 3444 hclgevf_state_uninit(hdev); 3445 hclgevf_uninit_msi(hdev); 3446 err_cmd_init: 3447 hclgevf_cmd_uninit(hdev); 3448 err_cmd_queue_init: 3449 hclgevf_devlink_uninit(hdev); 3450 err_devlink_init: 3451 hclgevf_pci_uninit(hdev); 3452 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3453 return ret; 3454 } 3455 3456 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 3457 { 3458 struct hclge_vf_to_pf_msg send_msg; 3459 3460 hclgevf_state_uninit(hdev); 3461 hclgevf_uninit_rxd_adv_layout(hdev); 3462 3463 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 3464 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3465 3466 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3467 hclgevf_misc_irq_uninit(hdev); 3468 hclgevf_uninit_msi(hdev); 3469 } 3470 3471 hclgevf_cmd_uninit(hdev); 3472 hclgevf_devlink_uninit(hdev); 3473 hclgevf_pci_uninit(hdev); 3474 hclgevf_uninit_mac_list(hdev); 3475 } 3476 3477 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 3478 { 3479 struct pci_dev *pdev = ae_dev->pdev; 3480 int ret; 3481 3482 ret = hclgevf_alloc_hdev(ae_dev); 3483 if (ret) { 3484 dev_err(&pdev->dev, "hclge device allocation failed\n"); 3485 return ret; 3486 } 3487 3488 ret = hclgevf_init_hdev(ae_dev->priv); 3489 if (ret) { 3490 dev_err(&pdev->dev, "hclge device initialization failed\n"); 3491 return ret; 3492 } 3493 3494 return 0; 3495 } 3496 3497 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 3498 { 3499 struct hclgevf_dev *hdev = ae_dev->priv; 3500 3501 hclgevf_uninit_hdev(hdev); 3502 ae_dev->priv = NULL; 3503 } 3504 3505 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3506 { 3507 struct hnae3_handle *nic = &hdev->nic; 3508 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3509 3510 return min_t(u32, hdev->rss_size_max, 3511 hdev->num_tqps / kinfo->tc_info.num_tc); 3512 } 3513 3514 /** 3515 * hclgevf_get_channels - Get the current channels enabled and max supported. 3516 * @handle: hardware information for network interface 3517 * @ch: ethtool channels structure 3518 * 3519 * We don't support separate tx and rx queues as channels. The other count 3520 * represents how many queues are being used for control. max_combined counts 3521 * how many queue pairs we can support. They may not be mapped 1 to 1 with 3522 * q_vectors since we support a lot more queue pairs than q_vectors. 3523 **/ 3524 static void hclgevf_get_channels(struct hnae3_handle *handle, 3525 struct ethtool_channels *ch) 3526 { 3527 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3528 3529 ch->max_combined = hclgevf_get_max_channels(hdev); 3530 ch->other_count = 0; 3531 ch->max_other = 0; 3532 ch->combined_count = handle->kinfo.rss_size; 3533 } 3534 3535 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 3536 u16 *alloc_tqps, u16 *max_rss_size) 3537 { 3538 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3539 3540 *alloc_tqps = hdev->num_tqps; 3541 *max_rss_size = hdev->rss_size_max; 3542 } 3543 3544 static void hclgevf_update_rss_size(struct hnae3_handle *handle, 3545 u32 new_tqps_num) 3546 { 3547 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3548 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3549 u16 max_rss_size; 3550 3551 kinfo->req_rss_size = new_tqps_num; 3552 3553 max_rss_size = min_t(u16, hdev->rss_size_max, 3554 hdev->num_tqps / kinfo->tc_info.num_tc); 3555 3556 /* Use the user's configuration when it is not larger than 3557 * max_rss_size, otherwise, use the maximum specification value. 3558 */ 3559 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 3560 kinfo->req_rss_size <= max_rss_size) 3561 kinfo->rss_size = kinfo->req_rss_size; 3562 else if (kinfo->rss_size > max_rss_size || 3563 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 3564 kinfo->rss_size = max_rss_size; 3565 3566 kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size; 3567 } 3568 3569 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 3570 bool rxfh_configured) 3571 { 3572 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3573 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3574 u16 cur_rss_size = kinfo->rss_size; 3575 u16 cur_tqps = kinfo->num_tqps; 3576 u32 *rss_indir; 3577 unsigned int i; 3578 int ret; 3579 3580 hclgevf_update_rss_size(handle, new_tqps_num); 3581 3582 ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); 3583 if (ret) 3584 return ret; 3585 3586 /* RSS indirection table has been configured by user */ 3587 if (rxfh_configured) 3588 goto out; 3589 3590 /* Reinitializes the rss indirect table according to the new RSS size */ 3591 rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size, 3592 sizeof(u32), GFP_KERNEL); 3593 if (!rss_indir) 3594 return -ENOMEM; 3595 3596 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 3597 rss_indir[i] = i % kinfo->rss_size; 3598 3599 hdev->rss_cfg.rss_size = kinfo->rss_size; 3600 3601 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 3602 if (ret) 3603 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 3604 ret); 3605 3606 kfree(rss_indir); 3607 3608 out: 3609 if (!ret) 3610 dev_info(&hdev->pdev->dev, 3611 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 3612 cur_rss_size, kinfo->rss_size, 3613 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); 3614 3615 return ret; 3616 } 3617 3618 static int hclgevf_get_status(struct hnae3_handle *handle) 3619 { 3620 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3621 3622 return hdev->hw.mac.link; 3623 } 3624 3625 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 3626 u8 *auto_neg, u32 *speed, 3627 u8 *duplex) 3628 { 3629 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3630 3631 if (speed) 3632 *speed = hdev->hw.mac.speed; 3633 if (duplex) 3634 *duplex = hdev->hw.mac.duplex; 3635 if (auto_neg) 3636 *auto_neg = AUTONEG_DISABLE; 3637 } 3638 3639 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 3640 u8 duplex) 3641 { 3642 hdev->hw.mac.speed = speed; 3643 hdev->hw.mac.duplex = duplex; 3644 } 3645 3646 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 3647 { 3648 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3649 3650 return hclgevf_config_gro(hdev, enable); 3651 } 3652 3653 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 3654 u8 *module_type) 3655 { 3656 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3657 3658 if (media_type) 3659 *media_type = hdev->hw.mac.media_type; 3660 3661 if (module_type) 3662 *module_type = hdev->hw.mac.module_type; 3663 } 3664 3665 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 3666 { 3667 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3668 3669 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 3670 } 3671 3672 static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle) 3673 { 3674 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3675 3676 return test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 3677 } 3678 3679 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 3680 { 3681 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3682 3683 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 3684 } 3685 3686 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 3687 { 3688 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3689 3690 return hdev->rst_stats.hw_rst_done_cnt; 3691 } 3692 3693 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 3694 unsigned long *supported, 3695 unsigned long *advertising) 3696 { 3697 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3698 3699 *supported = hdev->hw.mac.supported; 3700 *advertising = hdev->hw.mac.advertising; 3701 } 3702 3703 #define MAX_SEPARATE_NUM 4 3704 #define SEPARATOR_VALUE 0xFDFCFBFA 3705 #define REG_NUM_PER_LINE 4 3706 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 3707 3708 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 3709 { 3710 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 3711 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3712 3713 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 3714 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 3715 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 3716 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 3717 3718 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 3719 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 3720 } 3721 3722 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 3723 void *data) 3724 { 3725 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3726 int i, j, reg_um, separator_num; 3727 u32 *reg = data; 3728 3729 *version = hdev->fw_version; 3730 3731 /* fetching per-VF registers values from VF PCIe register space */ 3732 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 3733 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3734 for (i = 0; i < reg_um; i++) 3735 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 3736 for (i = 0; i < separator_num; i++) 3737 *reg++ = SEPARATOR_VALUE; 3738 3739 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 3740 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3741 for (i = 0; i < reg_um; i++) 3742 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 3743 for (i = 0; i < separator_num; i++) 3744 *reg++ = SEPARATOR_VALUE; 3745 3746 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 3747 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3748 for (j = 0; j < hdev->num_tqps; j++) { 3749 for (i = 0; i < reg_um; i++) 3750 *reg++ = hclgevf_read_dev(&hdev->hw, 3751 ring_reg_addr_list[i] + 3752 0x200 * j); 3753 for (i = 0; i < separator_num; i++) 3754 *reg++ = SEPARATOR_VALUE; 3755 } 3756 3757 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 3758 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3759 for (j = 0; j < hdev->num_msi_used - 1; j++) { 3760 for (i = 0; i < reg_um; i++) 3761 *reg++ = hclgevf_read_dev(&hdev->hw, 3762 tqp_intr_reg_addr_list[i] + 3763 4 * j); 3764 for (i = 0; i < separator_num; i++) 3765 *reg++ = SEPARATOR_VALUE; 3766 } 3767 } 3768 3769 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 3770 u8 *port_base_vlan_info, u8 data_size) 3771 { 3772 struct hnae3_handle *nic = &hdev->nic; 3773 struct hclge_vf_to_pf_msg send_msg; 3774 int ret; 3775 3776 rtnl_lock(); 3777 3778 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 3779 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { 3780 dev_warn(&hdev->pdev->dev, 3781 "is resetting when updating port based vlan info\n"); 3782 rtnl_unlock(); 3783 return; 3784 } 3785 3786 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3787 if (ret) { 3788 rtnl_unlock(); 3789 return; 3790 } 3791 3792 /* send msg to PF and wait update port based vlan info */ 3793 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3794 HCLGE_MBX_PORT_BASE_VLAN_CFG); 3795 memcpy(send_msg.data, port_base_vlan_info, data_size); 3796 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3797 if (!ret) { 3798 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3799 nic->port_base_vlan_state = state; 3800 else 3801 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3802 } 3803 3804 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 3805 rtnl_unlock(); 3806 } 3807 3808 static const struct hnae3_ae_ops hclgevf_ops = { 3809 .init_ae_dev = hclgevf_init_ae_dev, 3810 .uninit_ae_dev = hclgevf_uninit_ae_dev, 3811 .reset_prepare = hclgevf_reset_prepare_general, 3812 .reset_done = hclgevf_reset_done, 3813 .init_client_instance = hclgevf_init_client_instance, 3814 .uninit_client_instance = hclgevf_uninit_client_instance, 3815 .start = hclgevf_ae_start, 3816 .stop = hclgevf_ae_stop, 3817 .client_start = hclgevf_client_start, 3818 .client_stop = hclgevf_client_stop, 3819 .map_ring_to_vector = hclgevf_map_ring_to_vector, 3820 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3821 .get_vector = hclgevf_get_vector, 3822 .put_vector = hclgevf_put_vector, 3823 .reset_queue = hclgevf_reset_tqp, 3824 .get_mac_addr = hclgevf_get_mac_addr, 3825 .set_mac_addr = hclgevf_set_mac_addr, 3826 .add_uc_addr = hclgevf_add_uc_addr, 3827 .rm_uc_addr = hclgevf_rm_uc_addr, 3828 .add_mc_addr = hclgevf_add_mc_addr, 3829 .rm_mc_addr = hclgevf_rm_mc_addr, 3830 .get_stats = hclgevf_get_stats, 3831 .update_stats = hclgevf_update_stats, 3832 .get_strings = hclgevf_get_strings, 3833 .get_sset_count = hclgevf_get_sset_count, 3834 .get_rss_key_size = hclgevf_get_rss_key_size, 3835 .get_rss = hclgevf_get_rss, 3836 .set_rss = hclgevf_set_rss, 3837 .get_rss_tuple = hclgevf_get_rss_tuple, 3838 .set_rss_tuple = hclgevf_set_rss_tuple, 3839 .get_tc_size = hclgevf_get_tc_size, 3840 .get_fw_version = hclgevf_get_fw_version, 3841 .set_vlan_filter = hclgevf_set_vlan_filter, 3842 .enable_vlan_filter = hclgevf_enable_vlan_filter, 3843 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 3844 .reset_event = hclgevf_reset_event, 3845 .set_default_reset_request = hclgevf_set_def_reset_request, 3846 .set_channels = hclgevf_set_channels, 3847 .get_channels = hclgevf_get_channels, 3848 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 3849 .get_regs_len = hclgevf_get_regs_len, 3850 .get_regs = hclgevf_get_regs, 3851 .get_status = hclgevf_get_status, 3852 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3853 .get_media_type = hclgevf_get_media_type, 3854 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 3855 .ae_dev_resetting = hclgevf_ae_dev_resetting, 3856 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 3857 .set_gro_en = hclgevf_gro_en, 3858 .set_mtu = hclgevf_set_mtu, 3859 .get_global_queue_id = hclgevf_get_qid_global, 3860 .set_timer_task = hclgevf_set_timer_task, 3861 .get_link_mode = hclgevf_get_link_mode, 3862 .set_promisc_mode = hclgevf_set_promisc_mode, 3863 .request_update_promisc_mode = hclgevf_request_update_promisc_mode, 3864 .get_cmdq_stat = hclgevf_get_cmdq_stat, 3865 }; 3866 3867 static struct hnae3_ae_algo ae_algovf = { 3868 .ops = &hclgevf_ops, 3869 .pdev_id_table = ae_algovf_pci_tbl, 3870 }; 3871 3872 static int hclgevf_init(void) 3873 { 3874 pr_info("%s is initializing\n", HCLGEVF_NAME); 3875 3876 hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME); 3877 if (!hclgevf_wq) { 3878 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 3879 return -ENOMEM; 3880 } 3881 3882 hnae3_register_ae_algo(&ae_algovf); 3883 3884 return 0; 3885 } 3886 3887 static void hclgevf_exit(void) 3888 { 3889 hnae3_unregister_ae_algo(&ae_algovf); 3890 destroy_workqueue(hclgevf_wq); 3891 } 3892 module_init(hclgevf_init); 3893 module_exit(hclgevf_exit); 3894 3895 MODULE_LICENSE("GPL"); 3896 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3897 MODULE_DESCRIPTION("HCLGEVF Driver"); 3898 MODULE_VERSION(HCLGEVF_MOD_VERSION); 3899