1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclge_mbx.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_NAME "hclgevf" 13 14 #define HCLGEVF_RESET_MAX_FAIL_CNT 5 15 16 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 17 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 18 unsigned long delay); 19 20 static struct hnae3_ae_algo ae_algovf; 21 22 static struct workqueue_struct *hclgevf_wq; 23 24 static const struct pci_device_id ae_algovf_pci_tbl[] = { 25 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, 26 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 27 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 28 /* required last entry */ 29 {0, } 30 }; 31 32 static const u8 hclgevf_hash_key[] = { 33 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 34 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 35 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 36 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 37 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 38 }; 39 40 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 41 42 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, 43 HCLGEVF_CMDQ_TX_ADDR_H_REG, 44 HCLGEVF_CMDQ_TX_DEPTH_REG, 45 HCLGEVF_CMDQ_TX_TAIL_REG, 46 HCLGEVF_CMDQ_TX_HEAD_REG, 47 HCLGEVF_CMDQ_RX_ADDR_L_REG, 48 HCLGEVF_CMDQ_RX_ADDR_H_REG, 49 HCLGEVF_CMDQ_RX_DEPTH_REG, 50 HCLGEVF_CMDQ_RX_TAIL_REG, 51 HCLGEVF_CMDQ_RX_HEAD_REG, 52 HCLGEVF_VECTOR0_CMDQ_SRC_REG, 53 HCLGEVF_VECTOR0_CMDQ_STATE_REG, 54 HCLGEVF_CMDQ_INTR_EN_REG, 55 HCLGEVF_CMDQ_INTR_GEN_REG}; 56 57 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, 58 HCLGEVF_RST_ING, 59 HCLGEVF_GRO_EN_REG}; 60 61 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, 62 HCLGEVF_RING_RX_ADDR_H_REG, 63 HCLGEVF_RING_RX_BD_NUM_REG, 64 HCLGEVF_RING_RX_BD_LENGTH_REG, 65 HCLGEVF_RING_RX_MERGE_EN_REG, 66 HCLGEVF_RING_RX_TAIL_REG, 67 HCLGEVF_RING_RX_HEAD_REG, 68 HCLGEVF_RING_RX_FBD_NUM_REG, 69 HCLGEVF_RING_RX_OFFSET_REG, 70 HCLGEVF_RING_RX_FBD_OFFSET_REG, 71 HCLGEVF_RING_RX_STASH_REG, 72 HCLGEVF_RING_RX_BD_ERR_REG, 73 HCLGEVF_RING_TX_ADDR_L_REG, 74 HCLGEVF_RING_TX_ADDR_H_REG, 75 HCLGEVF_RING_TX_BD_NUM_REG, 76 HCLGEVF_RING_TX_PRIORITY_REG, 77 HCLGEVF_RING_TX_TC_REG, 78 HCLGEVF_RING_TX_MERGE_EN_REG, 79 HCLGEVF_RING_TX_TAIL_REG, 80 HCLGEVF_RING_TX_HEAD_REG, 81 HCLGEVF_RING_TX_FBD_NUM_REG, 82 HCLGEVF_RING_TX_OFFSET_REG, 83 HCLGEVF_RING_TX_EBD_NUM_REG, 84 HCLGEVF_RING_TX_EBD_OFFSET_REG, 85 HCLGEVF_RING_TX_BD_ERR_REG, 86 HCLGEVF_RING_EN_REG}; 87 88 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, 89 HCLGEVF_TQP_INTR_GL0_REG, 90 HCLGEVF_TQP_INTR_GL1_REG, 91 HCLGEVF_TQP_INTR_GL2_REG, 92 HCLGEVF_TQP_INTR_RL_REG}; 93 94 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 95 { 96 if (!handle->client) 97 return container_of(handle, struct hclgevf_dev, nic); 98 else if (handle->client->type == HNAE3_CLIENT_ROCE) 99 return container_of(handle, struct hclgevf_dev, roce); 100 else 101 return container_of(handle, struct hclgevf_dev, nic); 102 } 103 104 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) 105 { 106 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 107 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 108 struct hclgevf_desc desc; 109 struct hclgevf_tqp *tqp; 110 int status; 111 int i; 112 113 for (i = 0; i < kinfo->num_tqps; i++) { 114 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 115 hclgevf_cmd_setup_basic_desc(&desc, 116 HCLGEVF_OPC_QUERY_RX_STATUS, 117 true); 118 119 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 120 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 121 if (status) { 122 dev_err(&hdev->pdev->dev, 123 "Query tqp stat fail, status = %d,queue = %d\n", 124 status, i); 125 return status; 126 } 127 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 128 le32_to_cpu(desc.data[1]); 129 130 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 131 true); 132 133 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); 134 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 135 if (status) { 136 dev_err(&hdev->pdev->dev, 137 "Query tqp stat fail, status = %d,queue = %d\n", 138 status, i); 139 return status; 140 } 141 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 142 le32_to_cpu(desc.data[1]); 143 } 144 145 return 0; 146 } 147 148 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 149 { 150 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 151 struct hclgevf_tqp *tqp; 152 u64 *buff = data; 153 int i; 154 155 for (i = 0; i < kinfo->num_tqps; i++) { 156 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 157 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 158 } 159 for (i = 0; i < kinfo->num_tqps; i++) { 160 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 161 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 162 } 163 164 return buff; 165 } 166 167 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) 168 { 169 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 170 171 return kinfo->num_tqps * 2; 172 } 173 174 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 175 { 176 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 177 u8 *buff = data; 178 int i; 179 180 for (i = 0; i < kinfo->num_tqps; i++) { 181 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 182 struct hclgevf_tqp, q); 183 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", 184 tqp->index); 185 buff += ETH_GSTRING_LEN; 186 } 187 188 for (i = 0; i < kinfo->num_tqps; i++) { 189 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], 190 struct hclgevf_tqp, q); 191 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", 192 tqp->index); 193 buff += ETH_GSTRING_LEN; 194 } 195 196 return buff; 197 } 198 199 static void hclgevf_update_stats(struct hnae3_handle *handle, 200 struct net_device_stats *net_stats) 201 { 202 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 203 int status; 204 205 status = hclgevf_tqps_update_stats(handle); 206 if (status) 207 dev_err(&hdev->pdev->dev, 208 "VF update of TQPS stats fail, status = %d.\n", 209 status); 210 } 211 212 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 213 { 214 if (strset == ETH_SS_TEST) 215 return -EOPNOTSUPP; 216 else if (strset == ETH_SS_STATS) 217 return hclgevf_tqps_get_sset_count(handle, strset); 218 219 return 0; 220 } 221 222 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 223 u8 *data) 224 { 225 u8 *p = (char *)data; 226 227 if (strset == ETH_SS_STATS) 228 p = hclgevf_tqps_get_strings(handle, p); 229 } 230 231 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 232 { 233 hclgevf_tqps_get_stats(handle, data); 234 } 235 236 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 237 u8 subcode) 238 { 239 if (msg) { 240 memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 241 msg->code = code; 242 msg->subcode = subcode; 243 } 244 } 245 246 static int hclgevf_get_basic_info(struct hclgevf_dev *hdev) 247 { 248 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 249 u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE]; 250 struct hclge_basic_info *basic_info; 251 struct hclge_vf_to_pf_msg send_msg; 252 unsigned long caps; 253 int status; 254 255 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0); 256 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 257 sizeof(resp_msg)); 258 if (status) { 259 dev_err(&hdev->pdev->dev, 260 "failed to get basic info from pf, ret = %d", status); 261 return status; 262 } 263 264 basic_info = (struct hclge_basic_info *)resp_msg; 265 266 hdev->hw_tc_map = basic_info->hw_tc_map; 267 hdev->mbx_api_version = basic_info->mbx_api_version; 268 caps = basic_info->pf_caps; 269 if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps)) 270 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); 271 272 return 0; 273 } 274 275 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 276 { 277 struct hnae3_handle *nic = &hdev->nic; 278 struct hclge_vf_to_pf_msg send_msg; 279 u8 resp_msg; 280 int ret; 281 282 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 283 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 284 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 285 sizeof(u8)); 286 if (ret) { 287 dev_err(&hdev->pdev->dev, 288 "VF request to get port based vlan state failed %d", 289 ret); 290 return ret; 291 } 292 293 nic->port_base_vlan_state = resp_msg; 294 295 return 0; 296 } 297 298 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 299 { 300 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 301 #define HCLGEVF_TQPS_ALLOC_OFFSET 0 302 #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 303 #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 304 305 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 306 struct hclge_vf_to_pf_msg send_msg; 307 int status; 308 309 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 310 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 311 HCLGEVF_TQPS_RSS_INFO_LEN); 312 if (status) { 313 dev_err(&hdev->pdev->dev, 314 "VF request to get tqp info from PF failed %d", 315 status); 316 return status; 317 } 318 319 memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], 320 sizeof(u16)); 321 memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], 322 sizeof(u16)); 323 memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], 324 sizeof(u16)); 325 326 return 0; 327 } 328 329 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 330 { 331 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 332 #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 333 #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 334 335 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 336 struct hclge_vf_to_pf_msg send_msg; 337 int ret; 338 339 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 340 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 341 HCLGEVF_TQPS_DEPTH_INFO_LEN); 342 if (ret) { 343 dev_err(&hdev->pdev->dev, 344 "VF request to get tqp depth info from PF failed %d", 345 ret); 346 return ret; 347 } 348 349 memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], 350 sizeof(u16)); 351 memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], 352 sizeof(u16)); 353 354 return 0; 355 } 356 357 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 358 { 359 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 360 struct hclge_vf_to_pf_msg send_msg; 361 u16 qid_in_pf = 0; 362 u8 resp_data[2]; 363 int ret; 364 365 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 366 memcpy(send_msg.data, &queue_id, sizeof(queue_id)); 367 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 368 sizeof(resp_data)); 369 if (!ret) 370 qid_in_pf = *(u16 *)resp_data; 371 372 return qid_in_pf; 373 } 374 375 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 376 { 377 struct hclge_vf_to_pf_msg send_msg; 378 u8 resp_msg[2]; 379 int ret; 380 381 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 382 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 383 sizeof(resp_msg)); 384 if (ret) { 385 dev_err(&hdev->pdev->dev, 386 "VF request to get the pf port media type failed %d", 387 ret); 388 return ret; 389 } 390 391 hdev->hw.mac.media_type = resp_msg[0]; 392 hdev->hw.mac.module_type = resp_msg[1]; 393 394 return 0; 395 } 396 397 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 398 { 399 struct hclgevf_tqp *tqp; 400 int i; 401 402 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 403 sizeof(struct hclgevf_tqp), GFP_KERNEL); 404 if (!hdev->htqp) 405 return -ENOMEM; 406 407 tqp = hdev->htqp; 408 409 for (i = 0; i < hdev->num_tqps; i++) { 410 tqp->dev = &hdev->pdev->dev; 411 tqp->index = i; 412 413 tqp->q.ae_algo = &ae_algovf; 414 tqp->q.buf_size = hdev->rx_buf_len; 415 tqp->q.tx_desc_num = hdev->num_tx_desc; 416 tqp->q.rx_desc_num = hdev->num_rx_desc; 417 418 /* need an extended offset to configure queues >= 419 * HCLGEVF_TQP_MAX_SIZE_DEV_V2. 420 */ 421 if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2) 422 tqp->q.io_base = hdev->hw.io_base + 423 HCLGEVF_TQP_REG_OFFSET + 424 i * HCLGEVF_TQP_REG_SIZE; 425 else 426 tqp->q.io_base = hdev->hw.io_base + 427 HCLGEVF_TQP_REG_OFFSET + 428 HCLGEVF_TQP_EXT_REG_OFFSET + 429 (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) * 430 HCLGEVF_TQP_REG_SIZE; 431 432 tqp++; 433 } 434 435 return 0; 436 } 437 438 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 439 { 440 struct hnae3_handle *nic = &hdev->nic; 441 struct hnae3_knic_private_info *kinfo; 442 u16 new_tqps = hdev->num_tqps; 443 unsigned int i; 444 u8 num_tc = 0; 445 446 kinfo = &nic->kinfo; 447 kinfo->num_tx_desc = hdev->num_tx_desc; 448 kinfo->num_rx_desc = hdev->num_rx_desc; 449 kinfo->rx_buf_len = hdev->rx_buf_len; 450 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 451 if (hdev->hw_tc_map & BIT(i)) 452 num_tc++; 453 454 num_tc = num_tc ? num_tc : 1; 455 kinfo->tc_info.num_tc = num_tc; 456 kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc); 457 new_tqps = kinfo->rss_size * num_tc; 458 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 459 460 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 461 sizeof(struct hnae3_queue *), GFP_KERNEL); 462 if (!kinfo->tqp) 463 return -ENOMEM; 464 465 for (i = 0; i < kinfo->num_tqps; i++) { 466 hdev->htqp[i].q.handle = &hdev->nic; 467 hdev->htqp[i].q.tqp_index = i; 468 kinfo->tqp[i] = &hdev->htqp[i].q; 469 } 470 471 /* after init the max rss_size and tqps, adjust the default tqp numbers 472 * and rss size with the actual vector numbers 473 */ 474 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 475 kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc, 476 kinfo->rss_size); 477 478 return 0; 479 } 480 481 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 482 { 483 struct hclge_vf_to_pf_msg send_msg; 484 int status; 485 486 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 487 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 488 if (status) 489 dev_err(&hdev->pdev->dev, 490 "VF failed to fetch link status(%d) from PF", status); 491 } 492 493 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 494 { 495 struct hnae3_handle *rhandle = &hdev->roce; 496 struct hnae3_handle *handle = &hdev->nic; 497 struct hnae3_client *rclient; 498 struct hnae3_client *client; 499 500 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 501 return; 502 503 client = handle->client; 504 rclient = hdev->roce_client; 505 506 link_state = 507 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 508 if (link_state != hdev->hw.mac.link) { 509 client->ops->link_status_change(handle, !!link_state); 510 if (rclient && rclient->ops->link_status_change) 511 rclient->ops->link_status_change(rhandle, !!link_state); 512 hdev->hw.mac.link = link_state; 513 } 514 515 clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 516 } 517 518 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 519 { 520 #define HCLGEVF_ADVERTISING 0 521 #define HCLGEVF_SUPPORTED 1 522 523 struct hclge_vf_to_pf_msg send_msg; 524 525 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 526 send_msg.data[0] = HCLGEVF_ADVERTISING; 527 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 528 send_msg.data[0] = HCLGEVF_SUPPORTED; 529 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 530 } 531 532 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 533 { 534 struct hnae3_handle *nic = &hdev->nic; 535 int ret; 536 537 nic->ae_algo = &ae_algovf; 538 nic->pdev = hdev->pdev; 539 nic->numa_node_mask = hdev->numa_node_mask; 540 nic->flags |= HNAE3_SUPPORT_VF; 541 542 ret = hclgevf_knic_setup(hdev); 543 if (ret) 544 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 545 ret); 546 return ret; 547 } 548 549 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 550 { 551 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 552 dev_warn(&hdev->pdev->dev, 553 "vector(vector_id %d) has been freed.\n", vector_id); 554 return; 555 } 556 557 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 558 hdev->num_msi_left += 1; 559 hdev->num_msi_used -= 1; 560 } 561 562 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 563 struct hnae3_vector_info *vector_info) 564 { 565 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 566 struct hnae3_vector_info *vector = vector_info; 567 int alloc = 0; 568 int i, j; 569 570 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 571 vector_num = min(hdev->num_msi_left, vector_num); 572 573 for (j = 0; j < vector_num; j++) { 574 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 575 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 576 vector->vector = pci_irq_vector(hdev->pdev, i); 577 vector->io_addr = hdev->hw.io_base + 578 HCLGEVF_VECTOR_REG_BASE + 579 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 580 hdev->vector_status[i] = 0; 581 hdev->vector_irq[i] = vector->vector; 582 583 vector++; 584 alloc++; 585 586 break; 587 } 588 } 589 } 590 hdev->num_msi_left -= alloc; 591 hdev->num_msi_used += alloc; 592 593 return alloc; 594 } 595 596 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 597 { 598 int i; 599 600 for (i = 0; i < hdev->num_msi; i++) 601 if (vector == hdev->vector_irq[i]) 602 return i; 603 604 return -EINVAL; 605 } 606 607 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, 608 const u8 hfunc, const u8 *key) 609 { 610 struct hclgevf_rss_config_cmd *req; 611 unsigned int key_offset = 0; 612 struct hclgevf_desc desc; 613 int key_counts; 614 int key_size; 615 int ret; 616 617 key_counts = HCLGEVF_RSS_KEY_SIZE; 618 req = (struct hclgevf_rss_config_cmd *)desc.data; 619 620 while (key_counts) { 621 hclgevf_cmd_setup_basic_desc(&desc, 622 HCLGEVF_OPC_RSS_GENERIC_CONFIG, 623 false); 624 625 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); 626 req->hash_config |= 627 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); 628 629 key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); 630 memcpy(req->hash_key, 631 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); 632 633 key_counts -= key_size; 634 key_offset++; 635 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 636 if (ret) { 637 dev_err(&hdev->pdev->dev, 638 "Configure RSS config fail, status = %d\n", 639 ret); 640 return ret; 641 } 642 } 643 644 return 0; 645 } 646 647 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) 648 { 649 return HCLGEVF_RSS_KEY_SIZE; 650 } 651 652 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) 653 { 654 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; 655 struct hclgevf_rss_indirection_table_cmd *req; 656 struct hclgevf_desc desc; 657 int rss_cfg_tbl_num; 658 int status; 659 int i, j; 660 661 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; 662 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size / 663 HCLGEVF_RSS_CFG_TBL_SIZE; 664 665 for (i = 0; i < rss_cfg_tbl_num; i++) { 666 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, 667 false); 668 req->start_table_index = 669 cpu_to_le16(i * HCLGEVF_RSS_CFG_TBL_SIZE); 670 req->rss_set_bitmap = cpu_to_le16(HCLGEVF_RSS_SET_BITMAP_MSK); 671 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) 672 req->rss_result[j] = 673 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; 674 675 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 676 if (status) { 677 dev_err(&hdev->pdev->dev, 678 "VF failed(=%d) to set RSS indirection table\n", 679 status); 680 return status; 681 } 682 } 683 684 return 0; 685 } 686 687 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) 688 { 689 struct hclgevf_rss_tc_mode_cmd *req; 690 u16 tc_offset[HCLGEVF_MAX_TC_NUM]; 691 u16 tc_valid[HCLGEVF_MAX_TC_NUM]; 692 u16 tc_size[HCLGEVF_MAX_TC_NUM]; 693 struct hclgevf_desc desc; 694 u16 roundup_size; 695 unsigned int i; 696 int status; 697 698 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; 699 700 roundup_size = roundup_pow_of_two(rss_size); 701 roundup_size = ilog2(roundup_size); 702 703 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 704 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); 705 tc_size[i] = roundup_size; 706 tc_offset[i] = rss_size * i; 707 } 708 709 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); 710 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { 711 u16 mode = 0; 712 713 hnae3_set_bit(mode, HCLGEVF_RSS_TC_VALID_B, 714 (tc_valid[i] & 0x1)); 715 hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M, 716 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); 717 hnae3_set_bit(mode, HCLGEVF_RSS_TC_SIZE_MSB_B, 718 tc_size[i] >> HCLGEVF_RSS_TC_SIZE_MSB_OFFSET & 719 0x1); 720 hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M, 721 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); 722 723 req->rss_tc_mode[i] = cpu_to_le16(mode); 724 } 725 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); 726 if (status) 727 dev_err(&hdev->pdev->dev, 728 "VF failed(=%d) to set rss tc mode\n", status); 729 730 return status; 731 } 732 733 /* for revision 0x20, vf shared the same rss config with pf */ 734 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 735 { 736 #define HCLGEVF_RSS_MBX_RESP_LEN 8 737 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 738 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 739 struct hclge_vf_to_pf_msg send_msg; 740 u16 msg_num, hash_key_index; 741 u8 index; 742 int ret; 743 744 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 745 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 746 HCLGEVF_RSS_MBX_RESP_LEN; 747 for (index = 0; index < msg_num; index++) { 748 send_msg.data[0] = index; 749 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 750 HCLGEVF_RSS_MBX_RESP_LEN); 751 if (ret) { 752 dev_err(&hdev->pdev->dev, 753 "VF get rss hash key from PF failed, ret=%d", 754 ret); 755 return ret; 756 } 757 758 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 759 if (index == msg_num - 1) 760 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 761 &resp_msg[0], 762 HCLGEVF_RSS_KEY_SIZE - hash_key_index); 763 else 764 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 765 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 766 } 767 768 return 0; 769 } 770 771 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 772 u8 *hfunc) 773 { 774 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 775 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 776 int i, ret; 777 778 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 779 /* Get hash algorithm */ 780 if (hfunc) { 781 switch (rss_cfg->hash_algo) { 782 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: 783 *hfunc = ETH_RSS_HASH_TOP; 784 break; 785 case HCLGEVF_RSS_HASH_ALGO_SIMPLE: 786 *hfunc = ETH_RSS_HASH_XOR; 787 break; 788 default: 789 *hfunc = ETH_RSS_HASH_UNKNOWN; 790 break; 791 } 792 } 793 794 /* Get the RSS Key required by the user */ 795 if (key) 796 memcpy(key, rss_cfg->rss_hash_key, 797 HCLGEVF_RSS_KEY_SIZE); 798 } else { 799 if (hfunc) 800 *hfunc = ETH_RSS_HASH_TOP; 801 if (key) { 802 ret = hclgevf_get_rss_hash_key(hdev); 803 if (ret) 804 return ret; 805 memcpy(key, rss_cfg->rss_hash_key, 806 HCLGEVF_RSS_KEY_SIZE); 807 } 808 } 809 810 if (indir) 811 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 812 indir[i] = rss_cfg->rss_indirection_tbl[i]; 813 814 return 0; 815 } 816 817 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 818 const u8 *key, const u8 hfunc) 819 { 820 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 821 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 822 int ret, i; 823 824 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 825 /* Set the RSS Hash Key if specififed by the user */ 826 if (key) { 827 switch (hfunc) { 828 case ETH_RSS_HASH_TOP: 829 rss_cfg->hash_algo = 830 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 831 break; 832 case ETH_RSS_HASH_XOR: 833 rss_cfg->hash_algo = 834 HCLGEVF_RSS_HASH_ALGO_SIMPLE; 835 break; 836 case ETH_RSS_HASH_NO_CHANGE: 837 break; 838 default: 839 return -EINVAL; 840 } 841 842 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 843 key); 844 if (ret) 845 return ret; 846 847 /* Update the shadow RSS key with user specified qids */ 848 memcpy(rss_cfg->rss_hash_key, key, 849 HCLGEVF_RSS_KEY_SIZE); 850 } 851 } 852 853 /* update the shadow RSS table with user specified qids */ 854 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 855 rss_cfg->rss_indirection_tbl[i] = indir[i]; 856 857 /* update the hardware */ 858 return hclgevf_set_rss_indir_table(hdev); 859 } 860 861 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 862 { 863 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; 864 865 if (nfc->data & RXH_L4_B_2_3) 866 hash_sets |= HCLGEVF_D_PORT_BIT; 867 else 868 hash_sets &= ~HCLGEVF_D_PORT_BIT; 869 870 if (nfc->data & RXH_IP_SRC) 871 hash_sets |= HCLGEVF_S_IP_BIT; 872 else 873 hash_sets &= ~HCLGEVF_S_IP_BIT; 874 875 if (nfc->data & RXH_IP_DST) 876 hash_sets |= HCLGEVF_D_IP_BIT; 877 else 878 hash_sets &= ~HCLGEVF_D_IP_BIT; 879 880 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 881 hash_sets |= HCLGEVF_V_TAG_BIT; 882 883 return hash_sets; 884 } 885 886 static int hclgevf_init_rss_tuple_cmd(struct hnae3_handle *handle, 887 struct ethtool_rxnfc *nfc, 888 struct hclgevf_rss_input_tuple_cmd *req) 889 { 890 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 891 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 892 u8 tuple_sets; 893 894 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 895 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 896 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 897 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 898 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 899 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 900 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 901 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 902 903 tuple_sets = hclgevf_get_rss_hash_bits(nfc); 904 switch (nfc->flow_type) { 905 case TCP_V4_FLOW: 906 req->ipv4_tcp_en = tuple_sets; 907 break; 908 case TCP_V6_FLOW: 909 req->ipv6_tcp_en = tuple_sets; 910 break; 911 case UDP_V4_FLOW: 912 req->ipv4_udp_en = tuple_sets; 913 break; 914 case UDP_V6_FLOW: 915 req->ipv6_udp_en = tuple_sets; 916 break; 917 case SCTP_V4_FLOW: 918 req->ipv4_sctp_en = tuple_sets; 919 break; 920 case SCTP_V6_FLOW: 921 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && 922 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))) 923 return -EINVAL; 924 925 req->ipv6_sctp_en = tuple_sets; 926 break; 927 case IPV4_FLOW: 928 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 929 break; 930 case IPV6_FLOW: 931 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 932 break; 933 default: 934 return -EINVAL; 935 } 936 937 return 0; 938 } 939 940 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 941 struct ethtool_rxnfc *nfc) 942 { 943 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 944 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 945 struct hclgevf_rss_input_tuple_cmd *req; 946 struct hclgevf_desc desc; 947 int ret; 948 949 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 950 return -EOPNOTSUPP; 951 952 if (nfc->data & 953 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 954 return -EINVAL; 955 956 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 957 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 958 959 ret = hclgevf_init_rss_tuple_cmd(handle, nfc, req); 960 if (ret) { 961 dev_err(&hdev->pdev->dev, 962 "failed to init rss tuple cmd, ret = %d\n", ret); 963 return ret; 964 } 965 966 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 967 if (ret) { 968 dev_err(&hdev->pdev->dev, 969 "Set rss tuple fail, status = %d\n", ret); 970 return ret; 971 } 972 973 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 974 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 975 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 976 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 977 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 978 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 979 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 980 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 981 return 0; 982 } 983 984 static int hclgevf_get_rss_tuple_by_flow_type(struct hclgevf_dev *hdev, 985 int flow_type, u8 *tuple_sets) 986 { 987 switch (flow_type) { 988 case TCP_V4_FLOW: 989 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_tcp_en; 990 break; 991 case UDP_V4_FLOW: 992 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_udp_en; 993 break; 994 case TCP_V6_FLOW: 995 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_tcp_en; 996 break; 997 case UDP_V6_FLOW: 998 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_udp_en; 999 break; 1000 case SCTP_V4_FLOW: 1001 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_sctp_en; 1002 break; 1003 case SCTP_V6_FLOW: 1004 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_sctp_en; 1005 break; 1006 case IPV4_FLOW: 1007 case IPV6_FLOW: 1008 *tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; 1009 break; 1010 default: 1011 return -EINVAL; 1012 } 1013 1014 return 0; 1015 } 1016 1017 static u64 hclgevf_convert_rss_tuple(u8 tuple_sets) 1018 { 1019 u64 tuple_data = 0; 1020 1021 if (tuple_sets & HCLGEVF_D_PORT_BIT) 1022 tuple_data |= RXH_L4_B_2_3; 1023 if (tuple_sets & HCLGEVF_S_PORT_BIT) 1024 tuple_data |= RXH_L4_B_0_1; 1025 if (tuple_sets & HCLGEVF_D_IP_BIT) 1026 tuple_data |= RXH_IP_DST; 1027 if (tuple_sets & HCLGEVF_S_IP_BIT) 1028 tuple_data |= RXH_IP_SRC; 1029 1030 return tuple_data; 1031 } 1032 1033 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 1034 struct ethtool_rxnfc *nfc) 1035 { 1036 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1037 u8 tuple_sets; 1038 int ret; 1039 1040 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 1041 return -EOPNOTSUPP; 1042 1043 nfc->data = 0; 1044 1045 ret = hclgevf_get_rss_tuple_by_flow_type(hdev, nfc->flow_type, 1046 &tuple_sets); 1047 if (ret || !tuple_sets) 1048 return ret; 1049 1050 nfc->data = hclgevf_convert_rss_tuple(tuple_sets); 1051 1052 return 0; 1053 } 1054 1055 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, 1056 struct hclgevf_rss_cfg *rss_cfg) 1057 { 1058 struct hclgevf_rss_input_tuple_cmd *req; 1059 struct hclgevf_desc desc; 1060 int ret; 1061 1062 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); 1063 1064 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; 1065 1066 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 1067 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 1068 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 1069 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 1070 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 1071 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 1072 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 1073 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 1074 1075 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 1076 if (ret) 1077 dev_err(&hdev->pdev->dev, 1078 "Configure rss input fail, status = %d\n", ret); 1079 return ret; 1080 } 1081 1082 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 1083 { 1084 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1085 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 1086 1087 return rss_cfg->rss_size; 1088 } 1089 1090 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 1091 int vector_id, 1092 struct hnae3_ring_chain_node *ring_chain) 1093 { 1094 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1095 struct hclge_vf_to_pf_msg send_msg; 1096 struct hnae3_ring_chain_node *node; 1097 int status; 1098 int i = 0; 1099 1100 memset(&send_msg, 0, sizeof(send_msg)); 1101 send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 1102 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 1103 send_msg.vector_id = vector_id; 1104 1105 for (node = ring_chain; node; node = node->next) { 1106 send_msg.param[i].ring_type = 1107 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 1108 1109 send_msg.param[i].tqp_index = node->tqp_index; 1110 send_msg.param[i].int_gl_index = 1111 hnae3_get_field(node->int_gl_idx, 1112 HNAE3_RING_GL_IDX_M, 1113 HNAE3_RING_GL_IDX_S); 1114 1115 i++; 1116 if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 1117 send_msg.ring_num = i; 1118 1119 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 1120 NULL, 0); 1121 if (status) { 1122 dev_err(&hdev->pdev->dev, 1123 "Map TQP fail, status is %d.\n", 1124 status); 1125 return status; 1126 } 1127 i = 0; 1128 } 1129 } 1130 1131 return 0; 1132 } 1133 1134 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 1135 struct hnae3_ring_chain_node *ring_chain) 1136 { 1137 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1138 int vector_id; 1139 1140 vector_id = hclgevf_get_vector_index(hdev, vector); 1141 if (vector_id < 0) { 1142 dev_err(&handle->pdev->dev, 1143 "Get vector index fail. ret =%d\n", vector_id); 1144 return vector_id; 1145 } 1146 1147 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 1148 } 1149 1150 static int hclgevf_unmap_ring_from_vector( 1151 struct hnae3_handle *handle, 1152 int vector, 1153 struct hnae3_ring_chain_node *ring_chain) 1154 { 1155 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1156 int ret, vector_id; 1157 1158 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1159 return 0; 1160 1161 vector_id = hclgevf_get_vector_index(hdev, vector); 1162 if (vector_id < 0) { 1163 dev_err(&handle->pdev->dev, 1164 "Get vector index fail. ret =%d\n", vector_id); 1165 return vector_id; 1166 } 1167 1168 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 1169 if (ret) 1170 dev_err(&handle->pdev->dev, 1171 "Unmap ring from vector fail. vector=%d, ret =%d\n", 1172 vector_id, 1173 ret); 1174 1175 return ret; 1176 } 1177 1178 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 1179 { 1180 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1181 int vector_id; 1182 1183 vector_id = hclgevf_get_vector_index(hdev, vector); 1184 if (vector_id < 0) { 1185 dev_err(&handle->pdev->dev, 1186 "hclgevf_put_vector get vector index fail. ret =%d\n", 1187 vector_id); 1188 return vector_id; 1189 } 1190 1191 hclgevf_free_vector(hdev, vector_id); 1192 1193 return 0; 1194 } 1195 1196 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 1197 bool en_uc_pmc, bool en_mc_pmc, 1198 bool en_bc_pmc) 1199 { 1200 struct hnae3_handle *handle = &hdev->nic; 1201 struct hclge_vf_to_pf_msg send_msg; 1202 int ret; 1203 1204 memset(&send_msg, 0, sizeof(send_msg)); 1205 send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 1206 send_msg.en_bc = en_bc_pmc ? 1 : 0; 1207 send_msg.en_uc = en_uc_pmc ? 1 : 0; 1208 send_msg.en_mc = en_mc_pmc ? 1 : 0; 1209 send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC, 1210 &handle->priv_flags) ? 1 : 0; 1211 1212 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1213 if (ret) 1214 dev_err(&hdev->pdev->dev, 1215 "Set promisc mode fail, status is %d.\n", ret); 1216 1217 return ret; 1218 } 1219 1220 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 1221 bool en_mc_pmc) 1222 { 1223 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1224 bool en_bc_pmc; 1225 1226 en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2; 1227 1228 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 1229 en_bc_pmc); 1230 } 1231 1232 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) 1233 { 1234 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1235 1236 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1237 hclgevf_task_schedule(hdev, 0); 1238 } 1239 1240 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) 1241 { 1242 struct hnae3_handle *handle = &hdev->nic; 1243 bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; 1244 bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; 1245 int ret; 1246 1247 if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { 1248 ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); 1249 if (!ret) 1250 clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 1251 } 1252 } 1253 1254 static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id, 1255 u16 stream_id, bool enable) 1256 { 1257 struct hclgevf_cfg_com_tqp_queue_cmd *req; 1258 struct hclgevf_desc desc; 1259 1260 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 1261 1262 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, 1263 false); 1264 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 1265 req->stream_id = cpu_to_le16(stream_id); 1266 if (enable) 1267 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 1268 1269 return hclgevf_cmd_send(&hdev->hw, &desc, 1); 1270 } 1271 1272 static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable) 1273 { 1274 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1275 int ret; 1276 u16 i; 1277 1278 for (i = 0; i < handle->kinfo.num_tqps; i++) { 1279 ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable); 1280 if (ret) 1281 return ret; 1282 } 1283 1284 return 0; 1285 } 1286 1287 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) 1288 { 1289 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 1290 struct hclgevf_tqp *tqp; 1291 int i; 1292 1293 for (i = 0; i < kinfo->num_tqps; i++) { 1294 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); 1295 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 1296 } 1297 } 1298 1299 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 1300 { 1301 struct hclge_vf_to_pf_msg send_msg; 1302 u8 host_mac[ETH_ALEN]; 1303 int status; 1304 1305 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 1306 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 1307 ETH_ALEN); 1308 if (status) { 1309 dev_err(&hdev->pdev->dev, 1310 "fail to get VF MAC from host %d", status); 1311 return status; 1312 } 1313 1314 ether_addr_copy(p, host_mac); 1315 1316 return 0; 1317 } 1318 1319 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 1320 { 1321 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1322 u8 host_mac_addr[ETH_ALEN]; 1323 1324 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 1325 return; 1326 1327 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 1328 if (hdev->has_pf_mac) 1329 ether_addr_copy(p, host_mac_addr); 1330 else 1331 ether_addr_copy(p, hdev->hw.mac.mac_addr); 1332 } 1333 1334 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, 1335 bool is_first) 1336 { 1337 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1338 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 1339 struct hclge_vf_to_pf_msg send_msg; 1340 u8 *new_mac_addr = (u8 *)p; 1341 int status; 1342 1343 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 1344 send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; 1345 ether_addr_copy(send_msg.data, new_mac_addr); 1346 if (is_first && !hdev->has_pf_mac) 1347 eth_zero_addr(&send_msg.data[ETH_ALEN]); 1348 else 1349 ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 1350 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1351 if (!status) 1352 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 1353 1354 return status; 1355 } 1356 1357 static struct hclgevf_mac_addr_node * 1358 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) 1359 { 1360 struct hclgevf_mac_addr_node *mac_node, *tmp; 1361 1362 list_for_each_entry_safe(mac_node, tmp, list, node) 1363 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 1364 return mac_node; 1365 1366 return NULL; 1367 } 1368 1369 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, 1370 enum HCLGEVF_MAC_NODE_STATE state) 1371 { 1372 switch (state) { 1373 /* from set_rx_mode or tmp_add_list */ 1374 case HCLGEVF_MAC_TO_ADD: 1375 if (mac_node->state == HCLGEVF_MAC_TO_DEL) 1376 mac_node->state = HCLGEVF_MAC_ACTIVE; 1377 break; 1378 /* only from set_rx_mode */ 1379 case HCLGEVF_MAC_TO_DEL: 1380 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1381 list_del(&mac_node->node); 1382 kfree(mac_node); 1383 } else { 1384 mac_node->state = HCLGEVF_MAC_TO_DEL; 1385 } 1386 break; 1387 /* only from tmp_add_list, the mac_node->state won't be 1388 * HCLGEVF_MAC_ACTIVE 1389 */ 1390 case HCLGEVF_MAC_ACTIVE: 1391 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1392 mac_node->state = HCLGEVF_MAC_ACTIVE; 1393 break; 1394 } 1395 } 1396 1397 static int hclgevf_update_mac_list(struct hnae3_handle *handle, 1398 enum HCLGEVF_MAC_NODE_STATE state, 1399 enum HCLGEVF_MAC_ADDR_TYPE mac_type, 1400 const unsigned char *addr) 1401 { 1402 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1403 struct hclgevf_mac_addr_node *mac_node; 1404 struct list_head *list; 1405 1406 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1407 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1408 1409 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1410 1411 /* if the mac addr is already in the mac list, no need to add a new 1412 * one into it, just check the mac addr state, convert it to a new 1413 * new state, or just remove it, or do nothing. 1414 */ 1415 mac_node = hclgevf_find_mac_node(list, addr); 1416 if (mac_node) { 1417 hclgevf_update_mac_node(mac_node, state); 1418 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1419 return 0; 1420 } 1421 /* if this address is never added, unnecessary to delete */ 1422 if (state == HCLGEVF_MAC_TO_DEL) { 1423 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1424 return -ENOENT; 1425 } 1426 1427 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 1428 if (!mac_node) { 1429 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1430 return -ENOMEM; 1431 } 1432 1433 mac_node->state = state; 1434 ether_addr_copy(mac_node->mac_addr, addr); 1435 list_add_tail(&mac_node->node, list); 1436 1437 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1438 return 0; 1439 } 1440 1441 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 1442 const unsigned char *addr) 1443 { 1444 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1445 HCLGEVF_MAC_ADDR_UC, addr); 1446 } 1447 1448 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1449 const unsigned char *addr) 1450 { 1451 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1452 HCLGEVF_MAC_ADDR_UC, addr); 1453 } 1454 1455 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1456 const unsigned char *addr) 1457 { 1458 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1459 HCLGEVF_MAC_ADDR_MC, addr); 1460 } 1461 1462 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1463 const unsigned char *addr) 1464 { 1465 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1466 HCLGEVF_MAC_ADDR_MC, addr); 1467 } 1468 1469 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, 1470 struct hclgevf_mac_addr_node *mac_node, 1471 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1472 { 1473 struct hclge_vf_to_pf_msg send_msg; 1474 u8 code, subcode; 1475 1476 if (mac_type == HCLGEVF_MAC_ADDR_UC) { 1477 code = HCLGE_MBX_SET_UNICAST; 1478 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1479 subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; 1480 else 1481 subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; 1482 } else { 1483 code = HCLGE_MBX_SET_MULTICAST; 1484 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1485 subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; 1486 else 1487 subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; 1488 } 1489 1490 hclgevf_build_send_msg(&send_msg, code, subcode); 1491 ether_addr_copy(send_msg.data, mac_node->mac_addr); 1492 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1493 } 1494 1495 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, 1496 struct list_head *list, 1497 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1498 { 1499 struct hclgevf_mac_addr_node *mac_node, *tmp; 1500 int ret; 1501 1502 list_for_each_entry_safe(mac_node, tmp, list, node) { 1503 ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); 1504 if (ret) { 1505 dev_err(&hdev->pdev->dev, 1506 "failed to configure mac %pM, state = %d, ret = %d\n", 1507 mac_node->mac_addr, mac_node->state, ret); 1508 return; 1509 } 1510 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1511 mac_node->state = HCLGEVF_MAC_ACTIVE; 1512 } else { 1513 list_del(&mac_node->node); 1514 kfree(mac_node); 1515 } 1516 } 1517 } 1518 1519 static void hclgevf_sync_from_add_list(struct list_head *add_list, 1520 struct list_head *mac_list) 1521 { 1522 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1523 1524 list_for_each_entry_safe(mac_node, tmp, add_list, node) { 1525 /* if the mac address from tmp_add_list is not in the 1526 * uc/mc_mac_list, it means have received a TO_DEL request 1527 * during the time window of sending mac config request to PF 1528 * If mac_node state is ACTIVE, then change its state to TO_DEL, 1529 * then it will be removed at next time. If is TO_ADD, it means 1530 * send TO_ADD request failed, so just remove the mac node. 1531 */ 1532 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1533 if (new_node) { 1534 hclgevf_update_mac_node(new_node, mac_node->state); 1535 list_del(&mac_node->node); 1536 kfree(mac_node); 1537 } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { 1538 mac_node->state = HCLGEVF_MAC_TO_DEL; 1539 list_del(&mac_node->node); 1540 list_add_tail(&mac_node->node, mac_list); 1541 } else { 1542 list_del(&mac_node->node); 1543 kfree(mac_node); 1544 } 1545 } 1546 } 1547 1548 static void hclgevf_sync_from_del_list(struct list_head *del_list, 1549 struct list_head *mac_list) 1550 { 1551 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1552 1553 list_for_each_entry_safe(mac_node, tmp, del_list, node) { 1554 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1555 if (new_node) { 1556 /* If the mac addr is exist in the mac list, it means 1557 * received a new request TO_ADD during the time window 1558 * of sending mac addr configurrequest to PF, so just 1559 * change the mac state to ACTIVE. 1560 */ 1561 new_node->state = HCLGEVF_MAC_ACTIVE; 1562 list_del(&mac_node->node); 1563 kfree(mac_node); 1564 } else { 1565 list_del(&mac_node->node); 1566 list_add_tail(&mac_node->node, mac_list); 1567 } 1568 } 1569 } 1570 1571 static void hclgevf_clear_list(struct list_head *list) 1572 { 1573 struct hclgevf_mac_addr_node *mac_node, *tmp; 1574 1575 list_for_each_entry_safe(mac_node, tmp, list, node) { 1576 list_del(&mac_node->node); 1577 kfree(mac_node); 1578 } 1579 } 1580 1581 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, 1582 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1583 { 1584 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1585 struct list_head tmp_add_list, tmp_del_list; 1586 struct list_head *list; 1587 1588 INIT_LIST_HEAD(&tmp_add_list); 1589 INIT_LIST_HEAD(&tmp_del_list); 1590 1591 /* move the mac addr to the tmp_add_list and tmp_del_list, then 1592 * we can add/delete these mac addr outside the spin lock 1593 */ 1594 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1595 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1596 1597 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1598 1599 list_for_each_entry_safe(mac_node, tmp, list, node) { 1600 switch (mac_node->state) { 1601 case HCLGEVF_MAC_TO_DEL: 1602 list_del(&mac_node->node); 1603 list_add_tail(&mac_node->node, &tmp_del_list); 1604 break; 1605 case HCLGEVF_MAC_TO_ADD: 1606 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 1607 if (!new_node) 1608 goto stop_traverse; 1609 1610 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 1611 new_node->state = mac_node->state; 1612 list_add_tail(&new_node->node, &tmp_add_list); 1613 break; 1614 default: 1615 break; 1616 } 1617 } 1618 1619 stop_traverse: 1620 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1621 1622 /* delete first, in order to get max mac table space for adding */ 1623 hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); 1624 hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); 1625 1626 /* if some mac addresses were added/deleted fail, move back to the 1627 * mac_list, and retry at next time. 1628 */ 1629 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1630 1631 hclgevf_sync_from_del_list(&tmp_del_list, list); 1632 hclgevf_sync_from_add_list(&tmp_add_list, list); 1633 1634 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1635 } 1636 1637 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) 1638 { 1639 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); 1640 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); 1641 } 1642 1643 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) 1644 { 1645 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1646 1647 hclgevf_clear_list(&hdev->mac_table.uc_mac_list); 1648 hclgevf_clear_list(&hdev->mac_table.mc_mac_list); 1649 1650 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1651 } 1652 1653 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1654 __be16 proto, u16 vlan_id, 1655 bool is_kill) 1656 { 1657 #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 1658 #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 1659 #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 1660 1661 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1662 struct hclge_vf_to_pf_msg send_msg; 1663 int ret; 1664 1665 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1666 return -EINVAL; 1667 1668 if (proto != htons(ETH_P_8021Q)) 1669 return -EPROTONOSUPPORT; 1670 1671 /* When device is resetting or reset failed, firmware is unable to 1672 * handle mailbox. Just record the vlan id, and remove it after 1673 * reset finished. 1674 */ 1675 if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 1676 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { 1677 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1678 return -EBUSY; 1679 } 1680 1681 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1682 HCLGE_MBX_VLAN_FILTER); 1683 send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; 1684 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, 1685 sizeof(vlan_id)); 1686 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, 1687 sizeof(proto)); 1688 /* when remove hw vlan filter failed, record the vlan id, 1689 * and try to remove it from hw later, to be consistence 1690 * with stack. 1691 */ 1692 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1693 if (is_kill && ret) 1694 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1695 1696 return ret; 1697 } 1698 1699 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1700 { 1701 #define HCLGEVF_MAX_SYNC_COUNT 60 1702 struct hnae3_handle *handle = &hdev->nic; 1703 int ret, sync_cnt = 0; 1704 u16 vlan_id; 1705 1706 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1707 while (vlan_id != VLAN_N_VID) { 1708 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1709 vlan_id, true); 1710 if (ret) 1711 return; 1712 1713 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1714 sync_cnt++; 1715 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1716 return; 1717 1718 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1719 } 1720 } 1721 1722 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1723 { 1724 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1725 struct hclge_vf_to_pf_msg send_msg; 1726 1727 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1728 HCLGE_MBX_VLAN_RX_OFF_CFG); 1729 send_msg.data[0] = enable ? 1 : 0; 1730 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1731 } 1732 1733 static int hclgevf_reset_tqp(struct hnae3_handle *handle) 1734 { 1735 #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U 1736 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1737 struct hclge_vf_to_pf_msg send_msg; 1738 u8 return_status = 0; 1739 int ret; 1740 u16 i; 1741 1742 /* disable vf queue before send queue reset msg to PF */ 1743 ret = hclgevf_tqp_enable(handle, false); 1744 if (ret) { 1745 dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n", 1746 ret); 1747 return ret; 1748 } 1749 1750 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1751 1752 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status, 1753 sizeof(return_status)); 1754 if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE) 1755 return ret; 1756 1757 for (i = 1; i < handle->kinfo.num_tqps; i++) { 1758 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1759 memcpy(send_msg.data, &i, sizeof(i)); 1760 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1761 if (ret) 1762 return ret; 1763 } 1764 1765 return 0; 1766 } 1767 1768 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1769 { 1770 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1771 struct hclge_vf_to_pf_msg send_msg; 1772 1773 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1774 memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); 1775 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1776 } 1777 1778 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1779 enum hnae3_reset_notify_type type) 1780 { 1781 struct hnae3_client *client = hdev->nic_client; 1782 struct hnae3_handle *handle = &hdev->nic; 1783 int ret; 1784 1785 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1786 !client) 1787 return 0; 1788 1789 if (!client->ops->reset_notify) 1790 return -EOPNOTSUPP; 1791 1792 ret = client->ops->reset_notify(handle, type); 1793 if (ret) 1794 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1795 type, ret); 1796 1797 return ret; 1798 } 1799 1800 static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev, 1801 enum hnae3_reset_notify_type type) 1802 { 1803 struct hnae3_client *client = hdev->roce_client; 1804 struct hnae3_handle *handle = &hdev->roce; 1805 int ret; 1806 1807 if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client) 1808 return 0; 1809 1810 if (!client->ops->reset_notify) 1811 return -EOPNOTSUPP; 1812 1813 ret = client->ops->reset_notify(handle, type); 1814 if (ret) 1815 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", 1816 type, ret); 1817 return ret; 1818 } 1819 1820 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1821 { 1822 #define HCLGEVF_RESET_WAIT_US 20000 1823 #define HCLGEVF_RESET_WAIT_CNT 2000 1824 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1825 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1826 1827 u32 val; 1828 int ret; 1829 1830 if (hdev->reset_type == HNAE3_VF_RESET) 1831 ret = readl_poll_timeout(hdev->hw.io_base + 1832 HCLGEVF_VF_RST_ING, val, 1833 !(val & HCLGEVF_VF_RST_ING_BIT), 1834 HCLGEVF_RESET_WAIT_US, 1835 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1836 else 1837 ret = readl_poll_timeout(hdev->hw.io_base + 1838 HCLGEVF_RST_ING, val, 1839 !(val & HCLGEVF_RST_ING_BITS), 1840 HCLGEVF_RESET_WAIT_US, 1841 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1842 1843 /* hardware completion status should be available by this time */ 1844 if (ret) { 1845 dev_err(&hdev->pdev->dev, 1846 "couldn't get reset done status from h/w, timeout!\n"); 1847 return ret; 1848 } 1849 1850 /* we will wait a bit more to let reset of the stack to complete. This 1851 * might happen in case reset assertion was made by PF. Yes, this also 1852 * means we might end up waiting bit more even for VF reset. 1853 */ 1854 msleep(5000); 1855 1856 return 0; 1857 } 1858 1859 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 1860 { 1861 u32 reg_val; 1862 1863 reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); 1864 if (enable) 1865 reg_val |= HCLGEVF_NIC_SW_RST_RDY; 1866 else 1867 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 1868 1869 hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 1870 reg_val); 1871 } 1872 1873 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1874 { 1875 int ret; 1876 1877 /* uninitialize the nic client */ 1878 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1879 if (ret) 1880 return ret; 1881 1882 /* re-initialize the hclge device */ 1883 ret = hclgevf_reset_hdev(hdev); 1884 if (ret) { 1885 dev_err(&hdev->pdev->dev, 1886 "hclge device re-init failed, VF is disabled!\n"); 1887 return ret; 1888 } 1889 1890 /* bring up the nic client again */ 1891 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1892 if (ret) 1893 return ret; 1894 1895 /* clear handshake status with IMP */ 1896 hclgevf_reset_handshake(hdev, false); 1897 1898 /* bring up the nic to enable TX/RX again */ 1899 return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1900 } 1901 1902 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1903 { 1904 #define HCLGEVF_RESET_SYNC_TIME 100 1905 1906 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1907 struct hclge_vf_to_pf_msg send_msg; 1908 int ret; 1909 1910 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1911 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1912 if (ret) { 1913 dev_err(&hdev->pdev->dev, 1914 "failed to assert VF reset, ret = %d\n", ret); 1915 return ret; 1916 } 1917 hdev->rst_stats.vf_func_rst_cnt++; 1918 } 1919 1920 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 1921 /* inform hardware that preparatory work is done */ 1922 msleep(HCLGEVF_RESET_SYNC_TIME); 1923 hclgevf_reset_handshake(hdev, true); 1924 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n", 1925 hdev->reset_type); 1926 1927 return 0; 1928 } 1929 1930 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 1931 { 1932 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 1933 hdev->rst_stats.vf_func_rst_cnt); 1934 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1935 hdev->rst_stats.flr_rst_cnt); 1936 dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 1937 hdev->rst_stats.vf_rst_cnt); 1938 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1939 hdev->rst_stats.rst_done_cnt); 1940 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1941 hdev->rst_stats.hw_rst_done_cnt); 1942 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1943 hdev->rst_stats.rst_cnt); 1944 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1945 hdev->rst_stats.rst_fail_cnt); 1946 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1947 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 1948 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 1949 hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG)); 1950 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1951 hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); 1952 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1953 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 1954 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1955 } 1956 1957 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1958 { 1959 /* recover handshake status with IMP when reset fail */ 1960 hclgevf_reset_handshake(hdev, true); 1961 hdev->rst_stats.rst_fail_cnt++; 1962 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1963 hdev->rst_stats.rst_fail_cnt); 1964 1965 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1966 set_bit(hdev->reset_type, &hdev->reset_pending); 1967 1968 if (hclgevf_is_reset_pending(hdev)) { 1969 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1970 hclgevf_reset_task_schedule(hdev); 1971 } else { 1972 set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1973 hclgevf_dump_rst_info(hdev); 1974 } 1975 } 1976 1977 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 1978 { 1979 int ret; 1980 1981 hdev->rst_stats.rst_cnt++; 1982 1983 /* perform reset of the stack & ae device for a client */ 1984 ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); 1985 if (ret) 1986 return ret; 1987 1988 rtnl_lock(); 1989 /* bring down the nic to stop any ongoing TX/RX */ 1990 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1991 rtnl_unlock(); 1992 if (ret) 1993 return ret; 1994 1995 return hclgevf_reset_prepare_wait(hdev); 1996 } 1997 1998 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 1999 { 2000 int ret; 2001 2002 hdev->rst_stats.hw_rst_done_cnt++; 2003 ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); 2004 if (ret) 2005 return ret; 2006 2007 rtnl_lock(); 2008 /* now, re-initialize the nic client and ae device */ 2009 ret = hclgevf_reset_stack(hdev); 2010 rtnl_unlock(); 2011 if (ret) { 2012 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 2013 return ret; 2014 } 2015 2016 ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT); 2017 /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1 2018 * times 2019 */ 2020 if (ret && 2021 hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1) 2022 return ret; 2023 2024 ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT); 2025 if (ret) 2026 return ret; 2027 2028 hdev->last_reset_time = jiffies; 2029 hdev->rst_stats.rst_done_cnt++; 2030 hdev->rst_stats.rst_fail_cnt = 0; 2031 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2032 2033 return 0; 2034 } 2035 2036 static void hclgevf_reset(struct hclgevf_dev *hdev) 2037 { 2038 if (hclgevf_reset_prepare(hdev)) 2039 goto err_reset; 2040 2041 /* check if VF could successfully fetch the hardware reset completion 2042 * status from the hardware 2043 */ 2044 if (hclgevf_reset_wait(hdev)) { 2045 /* can't do much in this situation, will disable VF */ 2046 dev_err(&hdev->pdev->dev, 2047 "failed to fetch H/W reset completion status\n"); 2048 goto err_reset; 2049 } 2050 2051 if (hclgevf_reset_rebuild(hdev)) 2052 goto err_reset; 2053 2054 return; 2055 2056 err_reset: 2057 hclgevf_reset_err_handle(hdev); 2058 } 2059 2060 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, 2061 unsigned long *addr) 2062 { 2063 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 2064 2065 /* return the highest priority reset level amongst all */ 2066 if (test_bit(HNAE3_VF_RESET, addr)) { 2067 rst_level = HNAE3_VF_RESET; 2068 clear_bit(HNAE3_VF_RESET, addr); 2069 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 2070 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2071 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 2072 rst_level = HNAE3_VF_FULL_RESET; 2073 clear_bit(HNAE3_VF_FULL_RESET, addr); 2074 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2075 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 2076 rst_level = HNAE3_VF_PF_FUNC_RESET; 2077 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 2078 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2079 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 2080 rst_level = HNAE3_VF_FUNC_RESET; 2081 clear_bit(HNAE3_VF_FUNC_RESET, addr); 2082 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 2083 rst_level = HNAE3_FLR_RESET; 2084 clear_bit(HNAE3_FLR_RESET, addr); 2085 } 2086 2087 return rst_level; 2088 } 2089 2090 static void hclgevf_reset_event(struct pci_dev *pdev, 2091 struct hnae3_handle *handle) 2092 { 2093 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2094 struct hclgevf_dev *hdev = ae_dev->priv; 2095 2096 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 2097 2098 if (hdev->default_reset_request) 2099 hdev->reset_level = 2100 hclgevf_get_reset_level(hdev, 2101 &hdev->default_reset_request); 2102 else 2103 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2104 2105 /* reset of this VF requested */ 2106 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 2107 hclgevf_reset_task_schedule(hdev); 2108 2109 hdev->last_reset_time = jiffies; 2110 } 2111 2112 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 2113 enum hnae3_reset_type rst_type) 2114 { 2115 struct hclgevf_dev *hdev = ae_dev->priv; 2116 2117 set_bit(rst_type, &hdev->default_reset_request); 2118 } 2119 2120 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 2121 { 2122 writel(en ? 1 : 0, vector->addr); 2123 } 2124 2125 static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev, 2126 enum hnae3_reset_type rst_type) 2127 { 2128 #define HCLGEVF_RESET_RETRY_WAIT_MS 500 2129 #define HCLGEVF_RESET_RETRY_CNT 5 2130 2131 struct hclgevf_dev *hdev = ae_dev->priv; 2132 int retry_cnt = 0; 2133 int ret; 2134 2135 retry: 2136 down(&hdev->reset_sem); 2137 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2138 hdev->reset_type = rst_type; 2139 ret = hclgevf_reset_prepare(hdev); 2140 if (ret) { 2141 dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n", 2142 ret); 2143 if (hdev->reset_pending || 2144 retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) { 2145 dev_err(&hdev->pdev->dev, 2146 "reset_pending:0x%lx, retry_cnt:%d\n", 2147 hdev->reset_pending, retry_cnt); 2148 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2149 up(&hdev->reset_sem); 2150 msleep(HCLGEVF_RESET_RETRY_WAIT_MS); 2151 goto retry; 2152 } 2153 } 2154 2155 /* disable misc vector before reset done */ 2156 hclgevf_enable_vector(&hdev->misc_vector, false); 2157 2158 if (hdev->reset_type == HNAE3_FLR_RESET) 2159 hdev->rst_stats.flr_rst_cnt++; 2160 } 2161 2162 static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev) 2163 { 2164 struct hclgevf_dev *hdev = ae_dev->priv; 2165 int ret; 2166 2167 hclgevf_enable_vector(&hdev->misc_vector, true); 2168 2169 ret = hclgevf_reset_rebuild(hdev); 2170 if (ret) 2171 dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 2172 ret); 2173 2174 hdev->reset_type = HNAE3_NONE_RESET; 2175 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2176 up(&hdev->reset_sem); 2177 } 2178 2179 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 2180 { 2181 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2182 2183 return hdev->fw_version; 2184 } 2185 2186 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 2187 { 2188 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 2189 2190 vector->vector_irq = pci_irq_vector(hdev->pdev, 2191 HCLGEVF_MISC_VECTOR_NUM); 2192 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 2193 /* vector status always valid for Vector 0 */ 2194 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 2195 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 2196 2197 hdev->num_msi_left -= 1; 2198 hdev->num_msi_used += 1; 2199 } 2200 2201 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 2202 { 2203 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2204 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 2205 &hdev->state)) 2206 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2207 } 2208 2209 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 2210 { 2211 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2212 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 2213 &hdev->state)) 2214 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 2215 } 2216 2217 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 2218 unsigned long delay) 2219 { 2220 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 2221 !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2222 mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 2223 } 2224 2225 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 2226 { 2227 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 2228 2229 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 2230 return; 2231 2232 down(&hdev->reset_sem); 2233 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2234 2235 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 2236 &hdev->reset_state)) { 2237 /* PF has intimated that it is about to reset the hardware. 2238 * We now have to poll & check if hardware has actually 2239 * completed the reset sequence. On hardware reset completion, 2240 * VF needs to reset the client and ae device. 2241 */ 2242 hdev->reset_attempts = 0; 2243 2244 hdev->last_reset_time = jiffies; 2245 while ((hdev->reset_type = 2246 hclgevf_get_reset_level(hdev, &hdev->reset_pending)) 2247 != HNAE3_NONE_RESET) 2248 hclgevf_reset(hdev); 2249 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 2250 &hdev->reset_state)) { 2251 /* we could be here when either of below happens: 2252 * 1. reset was initiated due to watchdog timeout caused by 2253 * a. IMP was earlier reset and our TX got choked down and 2254 * which resulted in watchdog reacting and inducing VF 2255 * reset. This also means our cmdq would be unreliable. 2256 * b. problem in TX due to other lower layer(example link 2257 * layer not functioning properly etc.) 2258 * 2. VF reset might have been initiated due to some config 2259 * change. 2260 * 2261 * NOTE: Theres no clear way to detect above cases than to react 2262 * to the response of PF for this reset request. PF will ack the 2263 * 1b and 2. cases but we will not get any intimation about 1a 2264 * from PF as cmdq would be in unreliable state i.e. mailbox 2265 * communication between PF and VF would be broken. 2266 * 2267 * if we are never geting into pending state it means either: 2268 * 1. PF is not receiving our request which could be due to IMP 2269 * reset 2270 * 2. PF is screwed 2271 * We cannot do much for 2. but to check first we can try reset 2272 * our PCIe + stack and see if it alleviates the problem. 2273 */ 2274 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 2275 /* prepare for full reset of stack + pcie interface */ 2276 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 2277 2278 /* "defer" schedule the reset task again */ 2279 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2280 } else { 2281 hdev->reset_attempts++; 2282 2283 set_bit(hdev->reset_level, &hdev->reset_pending); 2284 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2285 } 2286 hclgevf_reset_task_schedule(hdev); 2287 } 2288 2289 hdev->reset_type = HNAE3_NONE_RESET; 2290 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 2291 up(&hdev->reset_sem); 2292 } 2293 2294 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 2295 { 2296 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2297 return; 2298 2299 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 2300 return; 2301 2302 hclgevf_mbx_async_handler(hdev); 2303 2304 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2305 } 2306 2307 static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 2308 { 2309 struct hclge_vf_to_pf_msg send_msg; 2310 int ret; 2311 2312 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) 2313 return; 2314 2315 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 2316 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2317 if (ret) 2318 dev_err(&hdev->pdev->dev, 2319 "VF sends keep alive cmd failed(=%d)\n", ret); 2320 } 2321 2322 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 2323 { 2324 unsigned long delta = round_jiffies_relative(HZ); 2325 struct hnae3_handle *handle = &hdev->nic; 2326 2327 if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 2328 return; 2329 2330 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 2331 delta = jiffies - hdev->last_serv_processed; 2332 2333 if (delta < round_jiffies_relative(HZ)) { 2334 delta = round_jiffies_relative(HZ) - delta; 2335 goto out; 2336 } 2337 } 2338 2339 hdev->serv_processed_cnt++; 2340 if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 2341 hclgevf_keep_alive(hdev); 2342 2343 if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 2344 hdev->last_serv_processed = jiffies; 2345 goto out; 2346 } 2347 2348 if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 2349 hclgevf_tqps_update_stats(handle); 2350 2351 /* VF does not need to request link status when this bit is set, because 2352 * PF will push its link status to VFs when link status changed. 2353 */ 2354 if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state)) 2355 hclgevf_request_link_info(hdev); 2356 2357 hclgevf_update_link_mode(hdev); 2358 2359 hclgevf_sync_vlan_filter(hdev); 2360 2361 hclgevf_sync_mac_table(hdev); 2362 2363 hclgevf_sync_promisc_mode(hdev); 2364 2365 hdev->last_serv_processed = jiffies; 2366 2367 out: 2368 hclgevf_task_schedule(hdev, delta); 2369 } 2370 2371 static void hclgevf_service_task(struct work_struct *work) 2372 { 2373 struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 2374 service_task.work); 2375 2376 hclgevf_reset_service_task(hdev); 2377 hclgevf_mailbox_service_task(hdev); 2378 hclgevf_periodic_service_task(hdev); 2379 2380 /* Handle reset and mbx again in case periodical task delays the 2381 * handling by calling hclgevf_task_schedule() in 2382 * hclgevf_periodic_service_task() 2383 */ 2384 hclgevf_reset_service_task(hdev); 2385 hclgevf_mailbox_service_task(hdev); 2386 } 2387 2388 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 2389 { 2390 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); 2391 } 2392 2393 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 2394 u32 *clearval) 2395 { 2396 u32 val, cmdq_stat_reg, rst_ing_reg; 2397 2398 /* fetch the events from their corresponding regs */ 2399 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 2400 HCLGEVF_VECTOR0_CMDQ_STATE_REG); 2401 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 2402 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2403 dev_info(&hdev->pdev->dev, 2404 "receive reset interrupt 0x%x!\n", rst_ing_reg); 2405 set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 2406 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2407 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 2408 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 2409 hdev->rst_stats.vf_rst_cnt++; 2410 /* set up VF hardware reset status, its PF will clear 2411 * this status when PF has initialized done. 2412 */ 2413 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 2414 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 2415 val | HCLGEVF_VF_RST_ING_BIT); 2416 return HCLGEVF_VECTOR0_EVENT_RST; 2417 } 2418 2419 /* check for vector0 mailbox(=CMDQ RX) event source */ 2420 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 2421 /* for revision 0x21, clearing interrupt is writing bit 0 2422 * to the clear register, writing bit 1 means to keep the 2423 * old value. 2424 * for revision 0x20, the clear register is a read & write 2425 * register, so we should just write 0 to the bit we are 2426 * handling, and keep other bits as cmdq_stat_reg. 2427 */ 2428 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) 2429 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2430 else 2431 *clearval = cmdq_stat_reg & 2432 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2433 2434 return HCLGEVF_VECTOR0_EVENT_MBX; 2435 } 2436 2437 /* print other vector0 event source */ 2438 dev_info(&hdev->pdev->dev, 2439 "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2440 cmdq_stat_reg); 2441 2442 return HCLGEVF_VECTOR0_EVENT_OTHER; 2443 } 2444 2445 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2446 { 2447 enum hclgevf_evt_cause event_cause; 2448 struct hclgevf_dev *hdev = data; 2449 u32 clearval; 2450 2451 hclgevf_enable_vector(&hdev->misc_vector, false); 2452 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2453 2454 switch (event_cause) { 2455 case HCLGEVF_VECTOR0_EVENT_RST: 2456 hclgevf_reset_task_schedule(hdev); 2457 break; 2458 case HCLGEVF_VECTOR0_EVENT_MBX: 2459 hclgevf_mbx_handler(hdev); 2460 break; 2461 default: 2462 break; 2463 } 2464 2465 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { 2466 hclgevf_clear_event_cause(hdev, clearval); 2467 hclgevf_enable_vector(&hdev->misc_vector, true); 2468 } 2469 2470 return IRQ_HANDLED; 2471 } 2472 2473 static int hclgevf_configure(struct hclgevf_dev *hdev) 2474 { 2475 int ret; 2476 2477 ret = hclgevf_get_basic_info(hdev); 2478 if (ret) 2479 return ret; 2480 2481 /* get current port based vlan state from PF */ 2482 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2483 if (ret) 2484 return ret; 2485 2486 /* get queue configuration from PF */ 2487 ret = hclgevf_get_queue_info(hdev); 2488 if (ret) 2489 return ret; 2490 2491 /* get queue depth info from PF */ 2492 ret = hclgevf_get_queue_depth(hdev); 2493 if (ret) 2494 return ret; 2495 2496 return hclgevf_get_pf_media_type(hdev); 2497 } 2498 2499 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 2500 { 2501 struct pci_dev *pdev = ae_dev->pdev; 2502 struct hclgevf_dev *hdev; 2503 2504 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 2505 if (!hdev) 2506 return -ENOMEM; 2507 2508 hdev->pdev = pdev; 2509 hdev->ae_dev = ae_dev; 2510 ae_dev->priv = hdev; 2511 2512 return 0; 2513 } 2514 2515 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2516 { 2517 struct hnae3_handle *roce = &hdev->roce; 2518 struct hnae3_handle *nic = &hdev->nic; 2519 2520 roce->rinfo.num_vectors = hdev->num_roce_msix; 2521 2522 if (hdev->num_msi_left < roce->rinfo.num_vectors || 2523 hdev->num_msi_left == 0) 2524 return -EINVAL; 2525 2526 roce->rinfo.base_vector = hdev->roce_base_vector; 2527 2528 roce->rinfo.netdev = nic->kinfo.netdev; 2529 roce->rinfo.roce_io_base = hdev->hw.io_base; 2530 roce->rinfo.roce_mem_base = hdev->hw.mem_base; 2531 2532 roce->pdev = nic->pdev; 2533 roce->ae_algo = nic->ae_algo; 2534 roce->numa_node_mask = nic->numa_node_mask; 2535 2536 return 0; 2537 } 2538 2539 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) 2540 { 2541 struct hclgevf_cfg_gro_status_cmd *req; 2542 struct hclgevf_desc desc; 2543 int ret; 2544 2545 if (!hnae3_dev_gro_supported(hdev)) 2546 return 0; 2547 2548 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, 2549 false); 2550 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2551 2552 req->gro_en = en ? 1 : 0; 2553 2554 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2555 if (ret) 2556 dev_err(&hdev->pdev->dev, 2557 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2558 2559 return ret; 2560 } 2561 2562 static int hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) 2563 { 2564 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size; 2565 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2566 struct hclgevf_rss_tuple_cfg *tuple_sets; 2567 u32 i; 2568 2569 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 2570 rss_cfg->rss_size = hdev->nic.kinfo.rss_size; 2571 tuple_sets = &rss_cfg->rss_tuple_sets; 2572 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2573 u8 *rss_ind_tbl; 2574 2575 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 2576 2577 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size, 2578 sizeof(*rss_ind_tbl), GFP_KERNEL); 2579 if (!rss_ind_tbl) 2580 return -ENOMEM; 2581 2582 rss_cfg->rss_indirection_tbl = rss_ind_tbl; 2583 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, 2584 HCLGEVF_RSS_KEY_SIZE); 2585 2586 tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2587 tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2588 tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2589 tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2590 tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2591 tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2592 tuple_sets->ipv6_sctp_en = 2593 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ? 2594 HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT : 2595 HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2596 tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2597 } 2598 2599 /* Initialize RSS indirect table */ 2600 for (i = 0; i < rss_ind_tbl_size; i++) 2601 rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 2602 2603 return 0; 2604 } 2605 2606 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2607 { 2608 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 2609 int ret; 2610 2611 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2612 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 2613 rss_cfg->rss_hash_key); 2614 if (ret) 2615 return ret; 2616 2617 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); 2618 if (ret) 2619 return ret; 2620 } 2621 2622 ret = hclgevf_set_rss_indir_table(hdev); 2623 if (ret) 2624 return ret; 2625 2626 return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); 2627 } 2628 2629 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2630 { 2631 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2632 false); 2633 } 2634 2635 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2636 { 2637 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2638 2639 unsigned long last = hdev->serv_processed_cnt; 2640 int i = 0; 2641 2642 while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2643 i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2644 last == hdev->serv_processed_cnt) 2645 usleep_range(1, 1); 2646 } 2647 2648 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2649 { 2650 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2651 2652 if (enable) { 2653 hclgevf_task_schedule(hdev, 0); 2654 } else { 2655 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2656 2657 /* flush memory to make sure DOWN is seen by service task */ 2658 smp_mb__before_atomic(); 2659 hclgevf_flush_link_update(hdev); 2660 } 2661 } 2662 2663 static int hclgevf_ae_start(struct hnae3_handle *handle) 2664 { 2665 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2666 2667 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2668 clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state); 2669 2670 hclgevf_reset_tqp_stats(handle); 2671 2672 hclgevf_request_link_info(hdev); 2673 2674 hclgevf_update_link_mode(hdev); 2675 2676 return 0; 2677 } 2678 2679 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2680 { 2681 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2682 2683 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2684 2685 if (hdev->reset_type != HNAE3_VF_RESET) 2686 hclgevf_reset_tqp(handle); 2687 2688 hclgevf_reset_tqp_stats(handle); 2689 hclgevf_update_link_status(hdev, 0); 2690 } 2691 2692 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2693 { 2694 #define HCLGEVF_STATE_ALIVE 1 2695 #define HCLGEVF_STATE_NOT_ALIVE 0 2696 2697 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2698 struct hclge_vf_to_pf_msg send_msg; 2699 2700 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2701 send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2702 HCLGEVF_STATE_NOT_ALIVE; 2703 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2704 } 2705 2706 static int hclgevf_client_start(struct hnae3_handle *handle) 2707 { 2708 return hclgevf_set_alive(handle, true); 2709 } 2710 2711 static void hclgevf_client_stop(struct hnae3_handle *handle) 2712 { 2713 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2714 int ret; 2715 2716 ret = hclgevf_set_alive(handle, false); 2717 if (ret) 2718 dev_warn(&hdev->pdev->dev, 2719 "%s failed %d\n", __func__, ret); 2720 } 2721 2722 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2723 { 2724 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2725 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2726 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2727 2728 INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 2729 2730 mutex_init(&hdev->mbx_resp.mbx_mutex); 2731 sema_init(&hdev->reset_sem, 1); 2732 2733 spin_lock_init(&hdev->mac_table.mac_list_lock); 2734 INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); 2735 INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); 2736 2737 /* bring the device down */ 2738 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2739 } 2740 2741 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2742 { 2743 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2744 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2745 2746 if (hdev->service_task.work.func) 2747 cancel_delayed_work_sync(&hdev->service_task); 2748 2749 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2750 } 2751 2752 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2753 { 2754 struct pci_dev *pdev = hdev->pdev; 2755 int vectors; 2756 int i; 2757 2758 if (hnae3_dev_roce_supported(hdev)) 2759 vectors = pci_alloc_irq_vectors(pdev, 2760 hdev->roce_base_msix_offset + 1, 2761 hdev->num_msi, 2762 PCI_IRQ_MSIX); 2763 else 2764 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2765 hdev->num_msi, 2766 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2767 2768 if (vectors < 0) { 2769 dev_err(&pdev->dev, 2770 "failed(%d) to allocate MSI/MSI-X vectors\n", 2771 vectors); 2772 return vectors; 2773 } 2774 if (vectors < hdev->num_msi) 2775 dev_warn(&hdev->pdev->dev, 2776 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2777 hdev->num_msi, vectors); 2778 2779 hdev->num_msi = vectors; 2780 hdev->num_msi_left = vectors; 2781 2782 hdev->base_msi_vector = pdev->irq; 2783 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; 2784 2785 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2786 sizeof(u16), GFP_KERNEL); 2787 if (!hdev->vector_status) { 2788 pci_free_irq_vectors(pdev); 2789 return -ENOMEM; 2790 } 2791 2792 for (i = 0; i < hdev->num_msi; i++) 2793 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2794 2795 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2796 sizeof(int), GFP_KERNEL); 2797 if (!hdev->vector_irq) { 2798 devm_kfree(&pdev->dev, hdev->vector_status); 2799 pci_free_irq_vectors(pdev); 2800 return -ENOMEM; 2801 } 2802 2803 return 0; 2804 } 2805 2806 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2807 { 2808 struct pci_dev *pdev = hdev->pdev; 2809 2810 devm_kfree(&pdev->dev, hdev->vector_status); 2811 devm_kfree(&pdev->dev, hdev->vector_irq); 2812 pci_free_irq_vectors(pdev); 2813 } 2814 2815 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2816 { 2817 int ret; 2818 2819 hclgevf_get_misc_vector(hdev); 2820 2821 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2822 HCLGEVF_NAME, pci_name(hdev->pdev)); 2823 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2824 0, hdev->misc_vector.name, hdev); 2825 if (ret) { 2826 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2827 hdev->misc_vector.vector_irq); 2828 return ret; 2829 } 2830 2831 hclgevf_clear_event_cause(hdev, 0); 2832 2833 /* enable misc. vector(vector 0) */ 2834 hclgevf_enable_vector(&hdev->misc_vector, true); 2835 2836 return ret; 2837 } 2838 2839 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2840 { 2841 /* disable misc vector(vector 0) */ 2842 hclgevf_enable_vector(&hdev->misc_vector, false); 2843 synchronize_irq(hdev->misc_vector.vector_irq); 2844 free_irq(hdev->misc_vector.vector_irq, hdev); 2845 hclgevf_free_vector(hdev, 0); 2846 } 2847 2848 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2849 { 2850 struct device *dev = &hdev->pdev->dev; 2851 2852 dev_info(dev, "VF info begin:\n"); 2853 2854 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2855 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2856 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2857 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2858 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2859 dev_info(dev, "PF media type of this VF: %u\n", 2860 hdev->hw.mac.media_type); 2861 2862 dev_info(dev, "VF info end.\n"); 2863 } 2864 2865 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2866 struct hnae3_client *client) 2867 { 2868 struct hclgevf_dev *hdev = ae_dev->priv; 2869 int rst_cnt = hdev->rst_stats.rst_cnt; 2870 int ret; 2871 2872 ret = client->ops->init_instance(&hdev->nic); 2873 if (ret) 2874 return ret; 2875 2876 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2877 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 2878 rst_cnt != hdev->rst_stats.rst_cnt) { 2879 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2880 2881 client->ops->uninit_instance(&hdev->nic, 0); 2882 return -EBUSY; 2883 } 2884 2885 hnae3_set_client_init_flag(client, ae_dev, 1); 2886 2887 if (netif_msg_drv(&hdev->nic)) 2888 hclgevf_info_show(hdev); 2889 2890 return 0; 2891 } 2892 2893 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2894 struct hnae3_client *client) 2895 { 2896 struct hclgevf_dev *hdev = ae_dev->priv; 2897 int ret; 2898 2899 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2900 !hdev->nic_client) 2901 return 0; 2902 2903 ret = hclgevf_init_roce_base_info(hdev); 2904 if (ret) 2905 return ret; 2906 2907 ret = client->ops->init_instance(&hdev->roce); 2908 if (ret) 2909 return ret; 2910 2911 set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2912 hnae3_set_client_init_flag(client, ae_dev, 1); 2913 2914 return 0; 2915 } 2916 2917 static int hclgevf_init_client_instance(struct hnae3_client *client, 2918 struct hnae3_ae_dev *ae_dev) 2919 { 2920 struct hclgevf_dev *hdev = ae_dev->priv; 2921 int ret; 2922 2923 switch (client->type) { 2924 case HNAE3_CLIENT_KNIC: 2925 hdev->nic_client = client; 2926 hdev->nic.client = client; 2927 2928 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2929 if (ret) 2930 goto clear_nic; 2931 2932 ret = hclgevf_init_roce_client_instance(ae_dev, 2933 hdev->roce_client); 2934 if (ret) 2935 goto clear_roce; 2936 2937 break; 2938 case HNAE3_CLIENT_ROCE: 2939 if (hnae3_dev_roce_supported(hdev)) { 2940 hdev->roce_client = client; 2941 hdev->roce.client = client; 2942 } 2943 2944 ret = hclgevf_init_roce_client_instance(ae_dev, client); 2945 if (ret) 2946 goto clear_roce; 2947 2948 break; 2949 default: 2950 return -EINVAL; 2951 } 2952 2953 return 0; 2954 2955 clear_nic: 2956 hdev->nic_client = NULL; 2957 hdev->nic.client = NULL; 2958 return ret; 2959 clear_roce: 2960 hdev->roce_client = NULL; 2961 hdev->roce.client = NULL; 2962 return ret; 2963 } 2964 2965 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2966 struct hnae3_ae_dev *ae_dev) 2967 { 2968 struct hclgevf_dev *hdev = ae_dev->priv; 2969 2970 /* un-init roce, if it exists */ 2971 if (hdev->roce_client) { 2972 clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2973 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2974 hdev->roce_client = NULL; 2975 hdev->roce.client = NULL; 2976 } 2977 2978 /* un-init nic/unic, if this was not called by roce client */ 2979 if (client->ops->uninit_instance && hdev->nic_client && 2980 client->type != HNAE3_CLIENT_ROCE) { 2981 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2982 2983 client->ops->uninit_instance(&hdev->nic, 0); 2984 hdev->nic_client = NULL; 2985 hdev->nic.client = NULL; 2986 } 2987 } 2988 2989 static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev) 2990 { 2991 #define HCLGEVF_MEM_BAR 4 2992 2993 struct pci_dev *pdev = hdev->pdev; 2994 struct hclgevf_hw *hw = &hdev->hw; 2995 2996 /* for device does not have device memory, return directly */ 2997 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR))) 2998 return 0; 2999 3000 hw->mem_base = devm_ioremap_wc(&pdev->dev, 3001 pci_resource_start(pdev, 3002 HCLGEVF_MEM_BAR), 3003 pci_resource_len(pdev, HCLGEVF_MEM_BAR)); 3004 if (!hw->mem_base) { 3005 dev_err(&pdev->dev, "failed to map device memory\n"); 3006 return -EFAULT; 3007 } 3008 3009 return 0; 3010 } 3011 3012 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 3013 { 3014 struct pci_dev *pdev = hdev->pdev; 3015 struct hclgevf_hw *hw; 3016 int ret; 3017 3018 ret = pci_enable_device(pdev); 3019 if (ret) { 3020 dev_err(&pdev->dev, "failed to enable PCI device\n"); 3021 return ret; 3022 } 3023 3024 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3025 if (ret) { 3026 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 3027 goto err_disable_device; 3028 } 3029 3030 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 3031 if (ret) { 3032 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 3033 goto err_disable_device; 3034 } 3035 3036 pci_set_master(pdev); 3037 hw = &hdev->hw; 3038 hw->hdev = hdev; 3039 hw->io_base = pci_iomap(pdev, 2, 0); 3040 if (!hw->io_base) { 3041 dev_err(&pdev->dev, "can't map configuration register space\n"); 3042 ret = -ENOMEM; 3043 goto err_clr_master; 3044 } 3045 3046 ret = hclgevf_dev_mem_map(hdev); 3047 if (ret) 3048 goto err_unmap_io_base; 3049 3050 return 0; 3051 3052 err_unmap_io_base: 3053 pci_iounmap(pdev, hdev->hw.io_base); 3054 err_clr_master: 3055 pci_clear_master(pdev); 3056 pci_release_regions(pdev); 3057 err_disable_device: 3058 pci_disable_device(pdev); 3059 3060 return ret; 3061 } 3062 3063 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 3064 { 3065 struct pci_dev *pdev = hdev->pdev; 3066 3067 if (hdev->hw.mem_base) 3068 devm_iounmap(&pdev->dev, hdev->hw.mem_base); 3069 3070 pci_iounmap(pdev, hdev->hw.io_base); 3071 pci_clear_master(pdev); 3072 pci_release_regions(pdev); 3073 pci_disable_device(pdev); 3074 } 3075 3076 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 3077 { 3078 struct hclgevf_query_res_cmd *req; 3079 struct hclgevf_desc desc; 3080 int ret; 3081 3082 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); 3083 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 3084 if (ret) { 3085 dev_err(&hdev->pdev->dev, 3086 "query vf resource failed, ret = %d.\n", ret); 3087 return ret; 3088 } 3089 3090 req = (struct hclgevf_query_res_cmd *)desc.data; 3091 3092 if (hnae3_dev_roce_supported(hdev)) { 3093 hdev->roce_base_msix_offset = 3094 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 3095 HCLGEVF_MSIX_OFT_ROCEE_M, 3096 HCLGEVF_MSIX_OFT_ROCEE_S); 3097 hdev->num_roce_msix = 3098 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 3099 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 3100 3101 /* nic's msix numbers is always equals to the roce's. */ 3102 hdev->num_nic_msix = hdev->num_roce_msix; 3103 3104 /* VF should have NIC vectors and Roce vectors, NIC vectors 3105 * are queued before Roce vectors. The offset is fixed to 64. 3106 */ 3107 hdev->num_msi = hdev->num_roce_msix + 3108 hdev->roce_base_msix_offset; 3109 } else { 3110 hdev->num_msi = 3111 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 3112 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 3113 3114 hdev->num_nic_msix = hdev->num_msi; 3115 } 3116 3117 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 3118 dev_err(&hdev->pdev->dev, 3119 "Just %u msi resources, not enough for vf(min:2).\n", 3120 hdev->num_nic_msix); 3121 return -EINVAL; 3122 } 3123 3124 return 0; 3125 } 3126 3127 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) 3128 { 3129 #define HCLGEVF_MAX_NON_TSO_BD_NUM 8U 3130 3131 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3132 3133 ae_dev->dev_specs.max_non_tso_bd_num = 3134 HCLGEVF_MAX_NON_TSO_BD_NUM; 3135 ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 3136 ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE; 3137 ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 3138 ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME; 3139 } 3140 3141 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, 3142 struct hclgevf_desc *desc) 3143 { 3144 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3145 struct hclgevf_dev_specs_0_cmd *req0; 3146 struct hclgevf_dev_specs_1_cmd *req1; 3147 3148 req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data; 3149 req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data; 3150 3151 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; 3152 ae_dev->dev_specs.rss_ind_tbl_size = 3153 le16_to_cpu(req0->rss_ind_tbl_size); 3154 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); 3155 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); 3156 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); 3157 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); 3158 } 3159 3160 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) 3161 { 3162 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; 3163 3164 if (!dev_specs->max_non_tso_bd_num) 3165 dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM; 3166 if (!dev_specs->rss_ind_tbl_size) 3167 dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 3168 if (!dev_specs->rss_key_size) 3169 dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE; 3170 if (!dev_specs->max_int_gl) 3171 dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 3172 if (!dev_specs->max_frm_size) 3173 dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME; 3174 } 3175 3176 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) 3177 { 3178 struct hclgevf_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; 3179 int ret; 3180 int i; 3181 3182 /* set default specifications as devices lower than version V3 do not 3183 * support querying specifications from firmware. 3184 */ 3185 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 3186 hclgevf_set_default_dev_specs(hdev); 3187 return 0; 3188 } 3189 3190 for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 3191 hclgevf_cmd_setup_basic_desc(&desc[i], 3192 HCLGEVF_OPC_QUERY_DEV_SPECS, true); 3193 desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT); 3194 } 3195 hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS, 3196 true); 3197 3198 ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM); 3199 if (ret) 3200 return ret; 3201 3202 hclgevf_parse_dev_specs(hdev, desc); 3203 hclgevf_check_dev_specs(hdev); 3204 3205 return 0; 3206 } 3207 3208 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 3209 { 3210 struct pci_dev *pdev = hdev->pdev; 3211 int ret = 0; 3212 3213 if (hdev->reset_type == HNAE3_VF_FULL_RESET && 3214 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3215 hclgevf_misc_irq_uninit(hdev); 3216 hclgevf_uninit_msi(hdev); 3217 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3218 } 3219 3220 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3221 pci_set_master(pdev); 3222 ret = hclgevf_init_msi(hdev); 3223 if (ret) { 3224 dev_err(&pdev->dev, 3225 "failed(%d) to init MSI/MSI-X\n", ret); 3226 return ret; 3227 } 3228 3229 ret = hclgevf_misc_irq_init(hdev); 3230 if (ret) { 3231 hclgevf_uninit_msi(hdev); 3232 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 3233 ret); 3234 return ret; 3235 } 3236 3237 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3238 } 3239 3240 return ret; 3241 } 3242 3243 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) 3244 { 3245 struct hclge_vf_to_pf_msg send_msg; 3246 3247 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, 3248 HCLGE_MBX_VPORT_LIST_CLEAR); 3249 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3250 } 3251 3252 static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev) 3253 { 3254 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 3255 hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1); 3256 } 3257 3258 static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev) 3259 { 3260 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 3261 hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0); 3262 } 3263 3264 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 3265 { 3266 struct pci_dev *pdev = hdev->pdev; 3267 int ret; 3268 3269 ret = hclgevf_pci_reset(hdev); 3270 if (ret) { 3271 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 3272 return ret; 3273 } 3274 3275 ret = hclgevf_cmd_init(hdev); 3276 if (ret) { 3277 dev_err(&pdev->dev, "cmd failed %d\n", ret); 3278 return ret; 3279 } 3280 3281 ret = hclgevf_rss_init_hw(hdev); 3282 if (ret) { 3283 dev_err(&hdev->pdev->dev, 3284 "failed(%d) to initialize RSS\n", ret); 3285 return ret; 3286 } 3287 3288 ret = hclgevf_config_gro(hdev, true); 3289 if (ret) 3290 return ret; 3291 3292 ret = hclgevf_init_vlan_config(hdev); 3293 if (ret) { 3294 dev_err(&hdev->pdev->dev, 3295 "failed(%d) to initialize VLAN config\n", ret); 3296 return ret; 3297 } 3298 3299 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 3300 3301 hclgevf_init_rxd_adv_layout(hdev); 3302 3303 dev_info(&hdev->pdev->dev, "Reset done\n"); 3304 3305 return 0; 3306 } 3307 3308 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 3309 { 3310 struct pci_dev *pdev = hdev->pdev; 3311 int ret; 3312 3313 ret = hclgevf_pci_init(hdev); 3314 if (ret) 3315 return ret; 3316 3317 ret = hclgevf_cmd_queue_init(hdev); 3318 if (ret) 3319 goto err_cmd_queue_init; 3320 3321 ret = hclgevf_cmd_init(hdev); 3322 if (ret) 3323 goto err_cmd_init; 3324 3325 /* Get vf resource */ 3326 ret = hclgevf_query_vf_resource(hdev); 3327 if (ret) 3328 goto err_cmd_init; 3329 3330 ret = hclgevf_query_dev_specs(hdev); 3331 if (ret) { 3332 dev_err(&pdev->dev, 3333 "failed to query dev specifications, ret = %d\n", ret); 3334 goto err_cmd_init; 3335 } 3336 3337 ret = hclgevf_init_msi(hdev); 3338 if (ret) { 3339 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 3340 goto err_cmd_init; 3341 } 3342 3343 hclgevf_state_init(hdev); 3344 hdev->reset_level = HNAE3_VF_FUNC_RESET; 3345 hdev->reset_type = HNAE3_NONE_RESET; 3346 3347 ret = hclgevf_misc_irq_init(hdev); 3348 if (ret) 3349 goto err_misc_irq_init; 3350 3351 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3352 3353 ret = hclgevf_configure(hdev); 3354 if (ret) { 3355 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 3356 goto err_config; 3357 } 3358 3359 ret = hclgevf_alloc_tqps(hdev); 3360 if (ret) { 3361 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 3362 goto err_config; 3363 } 3364 3365 ret = hclgevf_set_handle_info(hdev); 3366 if (ret) 3367 goto err_config; 3368 3369 ret = hclgevf_config_gro(hdev, true); 3370 if (ret) 3371 goto err_config; 3372 3373 /* Initialize RSS for this VF */ 3374 ret = hclgevf_rss_init_cfg(hdev); 3375 if (ret) { 3376 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); 3377 goto err_config; 3378 } 3379 3380 ret = hclgevf_rss_init_hw(hdev); 3381 if (ret) { 3382 dev_err(&hdev->pdev->dev, 3383 "failed(%d) to initialize RSS\n", ret); 3384 goto err_config; 3385 } 3386 3387 /* ensure vf tbl list as empty before init*/ 3388 ret = hclgevf_clear_vport_list(hdev); 3389 if (ret) { 3390 dev_err(&pdev->dev, 3391 "failed to clear tbl list configuration, ret = %d.\n", 3392 ret); 3393 goto err_config; 3394 } 3395 3396 ret = hclgevf_init_vlan_config(hdev); 3397 if (ret) { 3398 dev_err(&hdev->pdev->dev, 3399 "failed(%d) to initialize VLAN config\n", ret); 3400 goto err_config; 3401 } 3402 3403 hclgevf_init_rxd_adv_layout(hdev); 3404 3405 hdev->last_reset_time = jiffies; 3406 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 3407 HCLGEVF_DRIVER_NAME); 3408 3409 hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 3410 3411 return 0; 3412 3413 err_config: 3414 hclgevf_misc_irq_uninit(hdev); 3415 err_misc_irq_init: 3416 hclgevf_state_uninit(hdev); 3417 hclgevf_uninit_msi(hdev); 3418 err_cmd_init: 3419 hclgevf_cmd_uninit(hdev); 3420 err_cmd_queue_init: 3421 hclgevf_pci_uninit(hdev); 3422 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3423 return ret; 3424 } 3425 3426 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 3427 { 3428 struct hclge_vf_to_pf_msg send_msg; 3429 3430 hclgevf_state_uninit(hdev); 3431 hclgevf_uninit_rxd_adv_layout(hdev); 3432 3433 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 3434 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3435 3436 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3437 hclgevf_misc_irq_uninit(hdev); 3438 hclgevf_uninit_msi(hdev); 3439 } 3440 3441 hclgevf_cmd_uninit(hdev); 3442 hclgevf_pci_uninit(hdev); 3443 hclgevf_uninit_mac_list(hdev); 3444 } 3445 3446 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 3447 { 3448 struct pci_dev *pdev = ae_dev->pdev; 3449 int ret; 3450 3451 ret = hclgevf_alloc_hdev(ae_dev); 3452 if (ret) { 3453 dev_err(&pdev->dev, "hclge device allocation failed\n"); 3454 return ret; 3455 } 3456 3457 ret = hclgevf_init_hdev(ae_dev->priv); 3458 if (ret) { 3459 dev_err(&pdev->dev, "hclge device initialization failed\n"); 3460 return ret; 3461 } 3462 3463 return 0; 3464 } 3465 3466 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 3467 { 3468 struct hclgevf_dev *hdev = ae_dev->priv; 3469 3470 hclgevf_uninit_hdev(hdev); 3471 ae_dev->priv = NULL; 3472 } 3473 3474 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3475 { 3476 struct hnae3_handle *nic = &hdev->nic; 3477 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3478 3479 return min_t(u32, hdev->rss_size_max, 3480 hdev->num_tqps / kinfo->tc_info.num_tc); 3481 } 3482 3483 /** 3484 * hclgevf_get_channels - Get the current channels enabled and max supported. 3485 * @handle: hardware information for network interface 3486 * @ch: ethtool channels structure 3487 * 3488 * We don't support separate tx and rx queues as channels. The other count 3489 * represents how many queues are being used for control. max_combined counts 3490 * how many queue pairs we can support. They may not be mapped 1 to 1 with 3491 * q_vectors since we support a lot more queue pairs than q_vectors. 3492 **/ 3493 static void hclgevf_get_channels(struct hnae3_handle *handle, 3494 struct ethtool_channels *ch) 3495 { 3496 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3497 3498 ch->max_combined = hclgevf_get_max_channels(hdev); 3499 ch->other_count = 0; 3500 ch->max_other = 0; 3501 ch->combined_count = handle->kinfo.rss_size; 3502 } 3503 3504 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 3505 u16 *alloc_tqps, u16 *max_rss_size) 3506 { 3507 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3508 3509 *alloc_tqps = hdev->num_tqps; 3510 *max_rss_size = hdev->rss_size_max; 3511 } 3512 3513 static void hclgevf_update_rss_size(struct hnae3_handle *handle, 3514 u32 new_tqps_num) 3515 { 3516 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3517 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3518 u16 max_rss_size; 3519 3520 kinfo->req_rss_size = new_tqps_num; 3521 3522 max_rss_size = min_t(u16, hdev->rss_size_max, 3523 hdev->num_tqps / kinfo->tc_info.num_tc); 3524 3525 /* Use the user's configuration when it is not larger than 3526 * max_rss_size, otherwise, use the maximum specification value. 3527 */ 3528 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 3529 kinfo->req_rss_size <= max_rss_size) 3530 kinfo->rss_size = kinfo->req_rss_size; 3531 else if (kinfo->rss_size > max_rss_size || 3532 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 3533 kinfo->rss_size = max_rss_size; 3534 3535 kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size; 3536 } 3537 3538 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 3539 bool rxfh_configured) 3540 { 3541 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3542 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3543 u16 cur_rss_size = kinfo->rss_size; 3544 u16 cur_tqps = kinfo->num_tqps; 3545 u32 *rss_indir; 3546 unsigned int i; 3547 int ret; 3548 3549 hclgevf_update_rss_size(handle, new_tqps_num); 3550 3551 ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); 3552 if (ret) 3553 return ret; 3554 3555 /* RSS indirection table has been configured by user */ 3556 if (rxfh_configured) 3557 goto out; 3558 3559 /* Reinitializes the rss indirect table according to the new RSS size */ 3560 rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size, 3561 sizeof(u32), GFP_KERNEL); 3562 if (!rss_indir) 3563 return -ENOMEM; 3564 3565 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 3566 rss_indir[i] = i % kinfo->rss_size; 3567 3568 hdev->rss_cfg.rss_size = kinfo->rss_size; 3569 3570 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 3571 if (ret) 3572 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 3573 ret); 3574 3575 kfree(rss_indir); 3576 3577 out: 3578 if (!ret) 3579 dev_info(&hdev->pdev->dev, 3580 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 3581 cur_rss_size, kinfo->rss_size, 3582 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); 3583 3584 return ret; 3585 } 3586 3587 static int hclgevf_get_status(struct hnae3_handle *handle) 3588 { 3589 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3590 3591 return hdev->hw.mac.link; 3592 } 3593 3594 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 3595 u8 *auto_neg, u32 *speed, 3596 u8 *duplex) 3597 { 3598 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3599 3600 if (speed) 3601 *speed = hdev->hw.mac.speed; 3602 if (duplex) 3603 *duplex = hdev->hw.mac.duplex; 3604 if (auto_neg) 3605 *auto_neg = AUTONEG_DISABLE; 3606 } 3607 3608 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 3609 u8 duplex) 3610 { 3611 hdev->hw.mac.speed = speed; 3612 hdev->hw.mac.duplex = duplex; 3613 } 3614 3615 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 3616 { 3617 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3618 3619 return hclgevf_config_gro(hdev, enable); 3620 } 3621 3622 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 3623 u8 *module_type) 3624 { 3625 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3626 3627 if (media_type) 3628 *media_type = hdev->hw.mac.media_type; 3629 3630 if (module_type) 3631 *module_type = hdev->hw.mac.module_type; 3632 } 3633 3634 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 3635 { 3636 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3637 3638 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 3639 } 3640 3641 static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle) 3642 { 3643 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3644 3645 return test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); 3646 } 3647 3648 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 3649 { 3650 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3651 3652 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 3653 } 3654 3655 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 3656 { 3657 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3658 3659 return hdev->rst_stats.hw_rst_done_cnt; 3660 } 3661 3662 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 3663 unsigned long *supported, 3664 unsigned long *advertising) 3665 { 3666 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3667 3668 *supported = hdev->hw.mac.supported; 3669 *advertising = hdev->hw.mac.advertising; 3670 } 3671 3672 #define MAX_SEPARATE_NUM 4 3673 #define SEPARATOR_VALUE 0xFDFCFBFA 3674 #define REG_NUM_PER_LINE 4 3675 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 3676 3677 static int hclgevf_get_regs_len(struct hnae3_handle *handle) 3678 { 3679 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 3680 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3681 3682 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 3683 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 3684 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 3685 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 3686 3687 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + 3688 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; 3689 } 3690 3691 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, 3692 void *data) 3693 { 3694 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3695 int i, j, reg_um, separator_num; 3696 u32 *reg = data; 3697 3698 *version = hdev->fw_version; 3699 3700 /* fetching per-VF registers values from VF PCIe register space */ 3701 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 3702 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3703 for (i = 0; i < reg_um; i++) 3704 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 3705 for (i = 0; i < separator_num; i++) 3706 *reg++ = SEPARATOR_VALUE; 3707 3708 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 3709 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3710 for (i = 0; i < reg_um; i++) 3711 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); 3712 for (i = 0; i < separator_num; i++) 3713 *reg++ = SEPARATOR_VALUE; 3714 3715 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 3716 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3717 for (j = 0; j < hdev->num_tqps; j++) { 3718 for (i = 0; i < reg_um; i++) 3719 *reg++ = hclgevf_read_dev(&hdev->hw, 3720 ring_reg_addr_list[i] + 3721 0x200 * j); 3722 for (i = 0; i < separator_num; i++) 3723 *reg++ = SEPARATOR_VALUE; 3724 } 3725 3726 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 3727 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 3728 for (j = 0; j < hdev->num_msi_used - 1; j++) { 3729 for (i = 0; i < reg_um; i++) 3730 *reg++ = hclgevf_read_dev(&hdev->hw, 3731 tqp_intr_reg_addr_list[i] + 3732 4 * j); 3733 for (i = 0; i < separator_num; i++) 3734 *reg++ = SEPARATOR_VALUE; 3735 } 3736 } 3737 3738 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 3739 u8 *port_base_vlan_info, u8 data_size) 3740 { 3741 struct hnae3_handle *nic = &hdev->nic; 3742 struct hclge_vf_to_pf_msg send_msg; 3743 int ret; 3744 3745 rtnl_lock(); 3746 3747 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 3748 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { 3749 dev_warn(&hdev->pdev->dev, 3750 "is resetting when updating port based vlan info\n"); 3751 rtnl_unlock(); 3752 return; 3753 } 3754 3755 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3756 if (ret) { 3757 rtnl_unlock(); 3758 return; 3759 } 3760 3761 /* send msg to PF and wait update port based vlan info */ 3762 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3763 HCLGE_MBX_PORT_BASE_VLAN_CFG); 3764 memcpy(send_msg.data, port_base_vlan_info, data_size); 3765 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3766 if (!ret) { 3767 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3768 nic->port_base_vlan_state = state; 3769 else 3770 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3771 } 3772 3773 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 3774 rtnl_unlock(); 3775 } 3776 3777 static const struct hnae3_ae_ops hclgevf_ops = { 3778 .init_ae_dev = hclgevf_init_ae_dev, 3779 .uninit_ae_dev = hclgevf_uninit_ae_dev, 3780 .reset_prepare = hclgevf_reset_prepare_general, 3781 .reset_done = hclgevf_reset_done, 3782 .init_client_instance = hclgevf_init_client_instance, 3783 .uninit_client_instance = hclgevf_uninit_client_instance, 3784 .start = hclgevf_ae_start, 3785 .stop = hclgevf_ae_stop, 3786 .client_start = hclgevf_client_start, 3787 .client_stop = hclgevf_client_stop, 3788 .map_ring_to_vector = hclgevf_map_ring_to_vector, 3789 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3790 .get_vector = hclgevf_get_vector, 3791 .put_vector = hclgevf_put_vector, 3792 .reset_queue = hclgevf_reset_tqp, 3793 .get_mac_addr = hclgevf_get_mac_addr, 3794 .set_mac_addr = hclgevf_set_mac_addr, 3795 .add_uc_addr = hclgevf_add_uc_addr, 3796 .rm_uc_addr = hclgevf_rm_uc_addr, 3797 .add_mc_addr = hclgevf_add_mc_addr, 3798 .rm_mc_addr = hclgevf_rm_mc_addr, 3799 .get_stats = hclgevf_get_stats, 3800 .update_stats = hclgevf_update_stats, 3801 .get_strings = hclgevf_get_strings, 3802 .get_sset_count = hclgevf_get_sset_count, 3803 .get_rss_key_size = hclgevf_get_rss_key_size, 3804 .get_rss = hclgevf_get_rss, 3805 .set_rss = hclgevf_set_rss, 3806 .get_rss_tuple = hclgevf_get_rss_tuple, 3807 .set_rss_tuple = hclgevf_set_rss_tuple, 3808 .get_tc_size = hclgevf_get_tc_size, 3809 .get_fw_version = hclgevf_get_fw_version, 3810 .set_vlan_filter = hclgevf_set_vlan_filter, 3811 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 3812 .reset_event = hclgevf_reset_event, 3813 .set_default_reset_request = hclgevf_set_def_reset_request, 3814 .set_channels = hclgevf_set_channels, 3815 .get_channels = hclgevf_get_channels, 3816 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 3817 .get_regs_len = hclgevf_get_regs_len, 3818 .get_regs = hclgevf_get_regs, 3819 .get_status = hclgevf_get_status, 3820 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3821 .get_media_type = hclgevf_get_media_type, 3822 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 3823 .ae_dev_resetting = hclgevf_ae_dev_resetting, 3824 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 3825 .set_gro_en = hclgevf_gro_en, 3826 .set_mtu = hclgevf_set_mtu, 3827 .get_global_queue_id = hclgevf_get_qid_global, 3828 .set_timer_task = hclgevf_set_timer_task, 3829 .get_link_mode = hclgevf_get_link_mode, 3830 .set_promisc_mode = hclgevf_set_promisc_mode, 3831 .request_update_promisc_mode = hclgevf_request_update_promisc_mode, 3832 .get_cmdq_stat = hclgevf_get_cmdq_stat, 3833 }; 3834 3835 static struct hnae3_ae_algo ae_algovf = { 3836 .ops = &hclgevf_ops, 3837 .pdev_id_table = ae_algovf_pci_tbl, 3838 }; 3839 3840 static int hclgevf_init(void) 3841 { 3842 pr_info("%s is initializing\n", HCLGEVF_NAME); 3843 3844 hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME); 3845 if (!hclgevf_wq) { 3846 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 3847 return -ENOMEM; 3848 } 3849 3850 hnae3_register_ae_algo(&ae_algovf); 3851 3852 return 0; 3853 } 3854 3855 static void hclgevf_exit(void) 3856 { 3857 hnae3_unregister_ae_algo(&ae_algovf); 3858 destroy_workqueue(hclgevf_wq); 3859 } 3860 module_init(hclgevf_init); 3861 module_exit(hclgevf_exit); 3862 3863 MODULE_LICENSE("GPL"); 3864 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3865 MODULE_DESCRIPTION("HCLGEVF Driver"); 3866 MODULE_VERSION(HCLGEVF_MOD_VERSION); 3867