1e2cb1decSSalil Mehta // SPDX-License-Identifier: GPL-2.0+
2e2cb1decSSalil Mehta // Copyright (c) 2016-2017 Hisilicon Limited.
3e2cb1decSSalil Mehta 
4e2cb1decSSalil Mehta #include <linux/etherdevice.h>
5aa5c4f17SHuazhong Tan #include <linux/iopoll.h>
66988eb2aSSalil Mehta #include <net/rtnetlink.h>
7e2cb1decSSalil Mehta #include "hclgevf_cmd.h"
8e2cb1decSSalil Mehta #include "hclgevf_main.h"
9e2cb1decSSalil Mehta #include "hclge_mbx.h"
10e2cb1decSSalil Mehta #include "hnae3.h"
11cd624299SYufeng Mo #include "hclgevf_devlink.h"
12027733b1SJie Wang #include "hclge_comm_rss.h"
13e2cb1decSSalil Mehta 
14e2cb1decSSalil Mehta #define HCLGEVF_NAME	"hclgevf"
15e2cb1decSSalil Mehta 
16bbe6540eSHuazhong Tan #define HCLGEVF_RESET_MAX_FAIL_CNT	5
17bbe6540eSHuazhong Tan 
189c6f7085SHuazhong Tan static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
195e7414cdSJian Shen static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
205e7414cdSJian Shen 				  unsigned long delay);
215e7414cdSJian Shen 
22e2cb1decSSalil Mehta static struct hnae3_ae_algo ae_algovf;
23e2cb1decSSalil Mehta 
240ea68902SYunsheng Lin static struct workqueue_struct *hclgevf_wq;
250ea68902SYunsheng Lin 
26e2cb1decSSalil Mehta static const struct pci_device_id ae_algovf_pci_tbl[] = {
27c155e22bSGuangbin Huang 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
28c155e22bSGuangbin Huang 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
29c155e22bSGuangbin Huang 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
30e2cb1decSSalil Mehta 	/* required last entry */
31e2cb1decSSalil Mehta 	{0, }
32e2cb1decSSalil Mehta };
33e2cb1decSSalil Mehta 
342f550a46SYunsheng Lin MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
352f550a46SYunsheng Lin 
36cb413bfaSJie Wang static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
37cb413bfaSJie Wang 					 HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
38cb413bfaSJie Wang 					 HCLGE_COMM_NIC_CSQ_DEPTH_REG,
39cb413bfaSJie Wang 					 HCLGE_COMM_NIC_CSQ_TAIL_REG,
40cb413bfaSJie Wang 					 HCLGE_COMM_NIC_CSQ_HEAD_REG,
41cb413bfaSJie Wang 					 HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
42cb413bfaSJie Wang 					 HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
43cb413bfaSJie Wang 					 HCLGE_COMM_NIC_CRQ_DEPTH_REG,
44cb413bfaSJie Wang 					 HCLGE_COMM_NIC_CRQ_TAIL_REG,
45cb413bfaSJie Wang 					 HCLGE_COMM_NIC_CRQ_HEAD_REG,
46cb413bfaSJie Wang 					 HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
47cb413bfaSJie Wang 					 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG,
48cb413bfaSJie Wang 					 HCLGE_COMM_CMDQ_INTR_EN_REG,
49cb413bfaSJie Wang 					 HCLGE_COMM_CMDQ_INTR_GEN_REG};
501600c3e5SJian Shen 
511600c3e5SJian Shen static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
521600c3e5SJian Shen 					   HCLGEVF_RST_ING,
531600c3e5SJian Shen 					   HCLGEVF_GRO_EN_REG};
541600c3e5SJian Shen 
551600c3e5SJian Shen static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
561600c3e5SJian Shen 					 HCLGEVF_RING_RX_ADDR_H_REG,
571600c3e5SJian Shen 					 HCLGEVF_RING_RX_BD_NUM_REG,
581600c3e5SJian Shen 					 HCLGEVF_RING_RX_BD_LENGTH_REG,
591600c3e5SJian Shen 					 HCLGEVF_RING_RX_MERGE_EN_REG,
601600c3e5SJian Shen 					 HCLGEVF_RING_RX_TAIL_REG,
611600c3e5SJian Shen 					 HCLGEVF_RING_RX_HEAD_REG,
621600c3e5SJian Shen 					 HCLGEVF_RING_RX_FBD_NUM_REG,
631600c3e5SJian Shen 					 HCLGEVF_RING_RX_OFFSET_REG,
641600c3e5SJian Shen 					 HCLGEVF_RING_RX_FBD_OFFSET_REG,
651600c3e5SJian Shen 					 HCLGEVF_RING_RX_STASH_REG,
661600c3e5SJian Shen 					 HCLGEVF_RING_RX_BD_ERR_REG,
671600c3e5SJian Shen 					 HCLGEVF_RING_TX_ADDR_L_REG,
681600c3e5SJian Shen 					 HCLGEVF_RING_TX_ADDR_H_REG,
691600c3e5SJian Shen 					 HCLGEVF_RING_TX_BD_NUM_REG,
701600c3e5SJian Shen 					 HCLGEVF_RING_TX_PRIORITY_REG,
711600c3e5SJian Shen 					 HCLGEVF_RING_TX_TC_REG,
721600c3e5SJian Shen 					 HCLGEVF_RING_TX_MERGE_EN_REG,
731600c3e5SJian Shen 					 HCLGEVF_RING_TX_TAIL_REG,
741600c3e5SJian Shen 					 HCLGEVF_RING_TX_HEAD_REG,
751600c3e5SJian Shen 					 HCLGEVF_RING_TX_FBD_NUM_REG,
761600c3e5SJian Shen 					 HCLGEVF_RING_TX_OFFSET_REG,
771600c3e5SJian Shen 					 HCLGEVF_RING_TX_EBD_NUM_REG,
781600c3e5SJian Shen 					 HCLGEVF_RING_TX_EBD_OFFSET_REG,
791600c3e5SJian Shen 					 HCLGEVF_RING_TX_BD_ERR_REG,
801600c3e5SJian Shen 					 HCLGEVF_RING_EN_REG};
811600c3e5SJian Shen 
821600c3e5SJian Shen static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
831600c3e5SJian Shen 					     HCLGEVF_TQP_INTR_GL0_REG,
841600c3e5SJian Shen 					     HCLGEVF_TQP_INTR_GL1_REG,
851600c3e5SJian Shen 					     HCLGEVF_TQP_INTR_GL2_REG,
861600c3e5SJian Shen 					     HCLGEVF_TQP_INTR_RL_REG};
871600c3e5SJian Shen 
88aab8d1c6SJie Wang /* hclgevf_cmd_send - send command to command queue
89aab8d1c6SJie Wang  * @hw: pointer to the hw struct
90aab8d1c6SJie Wang  * @desc: prefilled descriptor for describing the command
91aab8d1c6SJie Wang  * @num : the number of descriptors to be sent
92aab8d1c6SJie Wang  *
93aab8d1c6SJie Wang  * This is the main send command for command queue, it
94aab8d1c6SJie Wang  * sends the queue, cleans the queue, etc
95aab8d1c6SJie Wang  */
96aab8d1c6SJie Wang int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num)
97aab8d1c6SJie Wang {
989970308fSJie Wang 	return hclge_comm_cmd_send(&hw->hw, desc, num);
99aab8d1c6SJie Wang }
100aab8d1c6SJie Wang 
101aab8d1c6SJie Wang void hclgevf_arq_init(struct hclgevf_dev *hdev)
102aab8d1c6SJie Wang {
103aab8d1c6SJie Wang 	struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq;
104aab8d1c6SJie Wang 
105aab8d1c6SJie Wang 	spin_lock(&cmdq->crq.lock);
106aab8d1c6SJie Wang 	/* initialize the pointers of async rx queue of mailbox */
107aab8d1c6SJie Wang 	hdev->arq.hdev = hdev;
108aab8d1c6SJie Wang 	hdev->arq.head = 0;
109aab8d1c6SJie Wang 	hdev->arq.tail = 0;
110aab8d1c6SJie Wang 	atomic_set(&hdev->arq.count, 0);
111aab8d1c6SJie Wang 	spin_unlock(&cmdq->crq.lock);
112aab8d1c6SJie Wang }
113aab8d1c6SJie Wang 
1149b2f3477SWeihang Li static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
115e2cb1decSSalil Mehta {
116eed9535fSPeng Li 	if (!handle->client)
117eed9535fSPeng Li 		return container_of(handle, struct hclgevf_dev, nic);
118eed9535fSPeng Li 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
119eed9535fSPeng Li 		return container_of(handle, struct hclgevf_dev, roce);
120eed9535fSPeng Li 	else
121e2cb1decSSalil Mehta 		return container_of(handle, struct hclgevf_dev, nic);
122e2cb1decSSalil Mehta }
123e2cb1decSSalil Mehta 
124e2cb1decSSalil Mehta static void hclgevf_update_stats(struct hnae3_handle *handle,
125e2cb1decSSalil Mehta 				 struct net_device_stats *net_stats)
126e2cb1decSSalil Mehta {
127e2cb1decSSalil Mehta 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
128e2cb1decSSalil Mehta 	int status;
129e2cb1decSSalil Mehta 
1304afc310cSJie Wang 	status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
131e2cb1decSSalil Mehta 	if (status)
132e2cb1decSSalil Mehta 		dev_err(&hdev->pdev->dev,
133e2cb1decSSalil Mehta 			"VF update of TQPS stats fail, status = %d.\n",
134e2cb1decSSalil Mehta 			status);
135e2cb1decSSalil Mehta }
136e2cb1decSSalil Mehta 
137e2cb1decSSalil Mehta static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
138e2cb1decSSalil Mehta {
139e2cb1decSSalil Mehta 	if (strset == ETH_SS_TEST)
140e2cb1decSSalil Mehta 		return -EOPNOTSUPP;
141e2cb1decSSalil Mehta 	else if (strset == ETH_SS_STATS)
1424afc310cSJie Wang 		return hclge_comm_tqps_get_sset_count(handle);
143e2cb1decSSalil Mehta 
144e2cb1decSSalil Mehta 	return 0;
145e2cb1decSSalil Mehta }
146e2cb1decSSalil Mehta 
147e2cb1decSSalil Mehta static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
148e2cb1decSSalil Mehta 				u8 *data)
149e2cb1decSSalil Mehta {
150e2cb1decSSalil Mehta 	u8 *p = (char *)data;
151e2cb1decSSalil Mehta 
152e2cb1decSSalil Mehta 	if (strset == ETH_SS_STATS)
1534afc310cSJie Wang 		p = hclge_comm_tqps_get_strings(handle, p);
154e2cb1decSSalil Mehta }
155e2cb1decSSalil Mehta 
156e2cb1decSSalil Mehta static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
157e2cb1decSSalil Mehta {
1584afc310cSJie Wang 	hclge_comm_tqps_get_stats(handle, data);
159e2cb1decSSalil Mehta }
160e2cb1decSSalil Mehta 
161d3410018SYufeng Mo static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
162d3410018SYufeng Mo 				   u8 subcode)
163d3410018SYufeng Mo {
164d3410018SYufeng Mo 	if (msg) {
165d3410018SYufeng Mo 		memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg));
166d3410018SYufeng Mo 		msg->code = code;
167d3410018SYufeng Mo 		msg->subcode = subcode;
168d3410018SYufeng Mo 	}
169d3410018SYufeng Mo }
170d3410018SYufeng Mo 
17132e6d104SJian Shen static int hclgevf_get_basic_info(struct hclgevf_dev *hdev)
172e2cb1decSSalil Mehta {
17332e6d104SJian Shen 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
17432e6d104SJian Shen 	u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE];
17532e6d104SJian Shen 	struct hclge_basic_info *basic_info;
176d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
17732e6d104SJian Shen 	unsigned long caps;
178e2cb1decSSalil Mehta 	int status;
179e2cb1decSSalil Mehta 
18032e6d104SJian Shen 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0);
18132e6d104SJian Shen 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
182d3410018SYufeng Mo 				      sizeof(resp_msg));
183e2cb1decSSalil Mehta 	if (status) {
184e2cb1decSSalil Mehta 		dev_err(&hdev->pdev->dev,
18532e6d104SJian Shen 			"failed to get basic info from pf, ret = %d", status);
186e2cb1decSSalil Mehta 		return status;
187e2cb1decSSalil Mehta 	}
188e2cb1decSSalil Mehta 
18932e6d104SJian Shen 	basic_info = (struct hclge_basic_info *)resp_msg;
19032e6d104SJian Shen 
19132e6d104SJian Shen 	hdev->hw_tc_map = basic_info->hw_tc_map;
192416eedb6SJie Wang 	hdev->mbx_api_version = le16_to_cpu(basic_info->mbx_api_version);
193416eedb6SJie Wang 	caps = le32_to_cpu(basic_info->pf_caps);
19432e6d104SJian Shen 	if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps))
19532e6d104SJian Shen 		set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
196e2cb1decSSalil Mehta 
197e2cb1decSSalil Mehta 	return 0;
198e2cb1decSSalil Mehta }
199e2cb1decSSalil Mehta 
20092f11ea1SJian Shen static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
20192f11ea1SJian Shen {
20292f11ea1SJian Shen 	struct hnae3_handle *nic = &hdev->nic;
203d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
20492f11ea1SJian Shen 	u8 resp_msg;
20592f11ea1SJian Shen 	int ret;
20692f11ea1SJian Shen 
207d3410018SYufeng Mo 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
208d3410018SYufeng Mo 			       HCLGE_MBX_GET_PORT_BASE_VLAN_STATE);
209d3410018SYufeng Mo 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
210d3410018SYufeng Mo 				   sizeof(u8));
21192f11ea1SJian Shen 	if (ret) {
21292f11ea1SJian Shen 		dev_err(&hdev->pdev->dev,
21392f11ea1SJian Shen 			"VF request to get port based vlan state failed %d",
21492f11ea1SJian Shen 			ret);
21592f11ea1SJian Shen 		return ret;
21692f11ea1SJian Shen 	}
21792f11ea1SJian Shen 
21892f11ea1SJian Shen 	nic->port_base_vlan_state = resp_msg;
21992f11ea1SJian Shen 
22092f11ea1SJian Shen 	return 0;
22192f11ea1SJian Shen }
22292f11ea1SJian Shen 
2236cee6fc3SJian Shen static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
224e2cb1decSSalil Mehta {
225c0425944SPeng Li #define HCLGEVF_TQPS_RSS_INFO_LEN	6
226d3410018SYufeng Mo 
227416eedb6SJie Wang 	struct hclge_mbx_vf_queue_info *queue_info;
228e2cb1decSSalil Mehta 	u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
229d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
230e2cb1decSSalil Mehta 	int status;
231e2cb1decSSalil Mehta 
232d3410018SYufeng Mo 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0);
233d3410018SYufeng Mo 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
234e2cb1decSSalil Mehta 				      HCLGEVF_TQPS_RSS_INFO_LEN);
235e2cb1decSSalil Mehta 	if (status) {
236e2cb1decSSalil Mehta 		dev_err(&hdev->pdev->dev,
237e2cb1decSSalil Mehta 			"VF request to get tqp info from PF failed %d",
238e2cb1decSSalil Mehta 			status);
239e2cb1decSSalil Mehta 		return status;
240e2cb1decSSalil Mehta 	}
241e2cb1decSSalil Mehta 
242416eedb6SJie Wang 	queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg;
243416eedb6SJie Wang 	hdev->num_tqps = le16_to_cpu(queue_info->num_tqps);
244416eedb6SJie Wang 	hdev->rss_size_max = le16_to_cpu(queue_info->rss_size);
245416eedb6SJie Wang 	hdev->rx_buf_len = le16_to_cpu(queue_info->rx_buf_len);
246c0425944SPeng Li 
247c0425944SPeng Li 	return 0;
248c0425944SPeng Li }
249c0425944SPeng Li 
250c0425944SPeng Li static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
251c0425944SPeng Li {
252c0425944SPeng Li #define HCLGEVF_TQPS_DEPTH_INFO_LEN	4
253d3410018SYufeng Mo 
254416eedb6SJie Wang 	struct hclge_mbx_vf_queue_depth *queue_depth;
255c0425944SPeng Li 	u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN];
256d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
257c0425944SPeng Li 	int ret;
258c0425944SPeng Li 
259d3410018SYufeng Mo 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0);
260d3410018SYufeng Mo 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
261c0425944SPeng Li 				   HCLGEVF_TQPS_DEPTH_INFO_LEN);
262c0425944SPeng Li 	if (ret) {
263c0425944SPeng Li 		dev_err(&hdev->pdev->dev,
264c0425944SPeng Li 			"VF request to get tqp depth info from PF failed %d",
265c0425944SPeng Li 			ret);
266c0425944SPeng Li 		return ret;
267c0425944SPeng Li 	}
268c0425944SPeng Li 
269416eedb6SJie Wang 	queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg;
270416eedb6SJie Wang 	hdev->num_tx_desc = le16_to_cpu(queue_depth->num_tx_desc);
271416eedb6SJie Wang 	hdev->num_rx_desc = le16_to_cpu(queue_depth->num_rx_desc);
272e2cb1decSSalil Mehta 
273e2cb1decSSalil Mehta 	return 0;
274e2cb1decSSalil Mehta }
275e2cb1decSSalil Mehta 
2760c29d191Sliuzhongzhu static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
2770c29d191Sliuzhongzhu {
2780c29d191Sliuzhongzhu 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
279d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
2800c29d191Sliuzhongzhu 	u16 qid_in_pf = 0;
281d3410018SYufeng Mo 	u8 resp_data[2];
2820c29d191Sliuzhongzhu 	int ret;
2830c29d191Sliuzhongzhu 
284d3410018SYufeng Mo 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0);
285416eedb6SJie Wang 	*(__le16 *)send_msg.data = cpu_to_le16(queue_id);
286d3410018SYufeng Mo 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data,
28763cbf7a9SYufeng Mo 				   sizeof(resp_data));
2880c29d191Sliuzhongzhu 	if (!ret)
289416eedb6SJie Wang 		qid_in_pf = le16_to_cpu(*(__le16 *)resp_data);
2900c29d191Sliuzhongzhu 
2910c29d191Sliuzhongzhu 	return qid_in_pf;
2920c29d191Sliuzhongzhu }
2930c29d191Sliuzhongzhu 
2949c3e7130Sliuzhongzhu static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
2959c3e7130Sliuzhongzhu {
296d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
29788d10bd6SJian Shen 	u8 resp_msg[2];
2989c3e7130Sliuzhongzhu 	int ret;
2999c3e7130Sliuzhongzhu 
300d3410018SYufeng Mo 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0);
301d3410018SYufeng Mo 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
302d3410018SYufeng Mo 				   sizeof(resp_msg));
3039c3e7130Sliuzhongzhu 	if (ret) {
3049c3e7130Sliuzhongzhu 		dev_err(&hdev->pdev->dev,
3059c3e7130Sliuzhongzhu 			"VF request to get the pf port media type failed %d",
3069c3e7130Sliuzhongzhu 			ret);
3079c3e7130Sliuzhongzhu 		return ret;
3089c3e7130Sliuzhongzhu 	}
3099c3e7130Sliuzhongzhu 
31088d10bd6SJian Shen 	hdev->hw.mac.media_type = resp_msg[0];
31188d10bd6SJian Shen 	hdev->hw.mac.module_type = resp_msg[1];
3129c3e7130Sliuzhongzhu 
3139c3e7130Sliuzhongzhu 	return 0;
3149c3e7130Sliuzhongzhu }
3159c3e7130Sliuzhongzhu 
316e2cb1decSSalil Mehta static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
317e2cb1decSSalil Mehta {
31887a9b2fdSYufeng Mo 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3194afc310cSJie Wang 	struct hclge_comm_tqp *tqp;
320e2cb1decSSalil Mehta 	int i;
321e2cb1decSSalil Mehta 
322e2cb1decSSalil Mehta 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
3234afc310cSJie Wang 				  sizeof(struct hclge_comm_tqp), GFP_KERNEL);
324e2cb1decSSalil Mehta 	if (!hdev->htqp)
325e2cb1decSSalil Mehta 		return -ENOMEM;
326e2cb1decSSalil Mehta 
327e2cb1decSSalil Mehta 	tqp = hdev->htqp;
328e2cb1decSSalil Mehta 
329e2cb1decSSalil Mehta 	for (i = 0; i < hdev->num_tqps; i++) {
330e2cb1decSSalil Mehta 		tqp->dev = &hdev->pdev->dev;
331e2cb1decSSalil Mehta 		tqp->index = i;
332e2cb1decSSalil Mehta 
333e2cb1decSSalil Mehta 		tqp->q.ae_algo = &ae_algovf;
334e2cb1decSSalil Mehta 		tqp->q.buf_size = hdev->rx_buf_len;
335c0425944SPeng Li 		tqp->q.tx_desc_num = hdev->num_tx_desc;
336c0425944SPeng Li 		tqp->q.rx_desc_num = hdev->num_rx_desc;
3379a5ef4aaSYonglong Liu 
3389a5ef4aaSYonglong Liu 		/* need an extended offset to configure queues >=
3399a5ef4aaSYonglong Liu 		 * HCLGEVF_TQP_MAX_SIZE_DEV_V2.
3409a5ef4aaSYonglong Liu 		 */
3419a5ef4aaSYonglong Liu 		if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2)
342076bb537SJie Wang 			tqp->q.io_base = hdev->hw.hw.io_base +
3439a5ef4aaSYonglong Liu 					 HCLGEVF_TQP_REG_OFFSET +
344e2cb1decSSalil Mehta 					 i * HCLGEVF_TQP_REG_SIZE;
3459a5ef4aaSYonglong Liu 		else
346076bb537SJie Wang 			tqp->q.io_base = hdev->hw.hw.io_base +
3479a5ef4aaSYonglong Liu 					 HCLGEVF_TQP_REG_OFFSET +
3489a5ef4aaSYonglong Liu 					 HCLGEVF_TQP_EXT_REG_OFFSET +
3499a5ef4aaSYonglong Liu 					 (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) *
3509a5ef4aaSYonglong Liu 					 HCLGEVF_TQP_REG_SIZE;
351e2cb1decSSalil Mehta 
35287a9b2fdSYufeng Mo 		/* when device supports tx push and has device memory,
35387a9b2fdSYufeng Mo 		 * the queue can execute push mode or doorbell mode on
35487a9b2fdSYufeng Mo 		 * device memory.
35587a9b2fdSYufeng Mo 		 */
35687a9b2fdSYufeng Mo 		if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
35787a9b2fdSYufeng Mo 			tqp->q.mem_base = hdev->hw.hw.mem_base +
35887a9b2fdSYufeng Mo 					  HCLGEVF_TQP_MEM_OFFSET(hdev, i);
35987a9b2fdSYufeng Mo 
360e2cb1decSSalil Mehta 		tqp++;
361e2cb1decSSalil Mehta 	}
362e2cb1decSSalil Mehta 
363e2cb1decSSalil Mehta 	return 0;
364e2cb1decSSalil Mehta }
365e2cb1decSSalil Mehta 
366e2cb1decSSalil Mehta static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
367e2cb1decSSalil Mehta {
368e2cb1decSSalil Mehta 	struct hnae3_handle *nic = &hdev->nic;
369e2cb1decSSalil Mehta 	struct hnae3_knic_private_info *kinfo;
370e2cb1decSSalil Mehta 	u16 new_tqps = hdev->num_tqps;
371ebaf1908SWeihang Li 	unsigned int i;
37235244430SJian Shen 	u8 num_tc = 0;
373e2cb1decSSalil Mehta 
374e2cb1decSSalil Mehta 	kinfo = &nic->kinfo;
375c0425944SPeng Li 	kinfo->num_tx_desc = hdev->num_tx_desc;
376c0425944SPeng Li 	kinfo->num_rx_desc = hdev->num_rx_desc;
377e2cb1decSSalil Mehta 	kinfo->rx_buf_len = hdev->rx_buf_len;
37893969dc1SJie Wang 	for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++)
379e2cb1decSSalil Mehta 		if (hdev->hw_tc_map & BIT(i))
38035244430SJian Shen 			num_tc++;
381e2cb1decSSalil Mehta 
38235244430SJian Shen 	num_tc = num_tc ? num_tc : 1;
38335244430SJian Shen 	kinfo->tc_info.num_tc = num_tc;
38435244430SJian Shen 	kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc);
38535244430SJian Shen 	new_tqps = kinfo->rss_size * num_tc;
386e2cb1decSSalil Mehta 	kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
387e2cb1decSSalil Mehta 
388e2cb1decSSalil Mehta 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
389e2cb1decSSalil Mehta 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
390e2cb1decSSalil Mehta 	if (!kinfo->tqp)
391e2cb1decSSalil Mehta 		return -ENOMEM;
392e2cb1decSSalil Mehta 
393e2cb1decSSalil Mehta 	for (i = 0; i < kinfo->num_tqps; i++) {
394e2cb1decSSalil Mehta 		hdev->htqp[i].q.handle = &hdev->nic;
395e2cb1decSSalil Mehta 		hdev->htqp[i].q.tqp_index = i;
396e2cb1decSSalil Mehta 		kinfo->tqp[i] = &hdev->htqp[i].q;
397e2cb1decSSalil Mehta 	}
398e2cb1decSSalil Mehta 
399580a05f9SYonglong Liu 	/* after init the max rss_size and tqps, adjust the default tqp numbers
400580a05f9SYonglong Liu 	 * and rss size with the actual vector numbers
401580a05f9SYonglong Liu 	 */
402580a05f9SYonglong Liu 	kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
40335244430SJian Shen 	kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc,
404580a05f9SYonglong Liu 				kinfo->rss_size);
405580a05f9SYonglong Liu 
406e2cb1decSSalil Mehta 	return 0;
407e2cb1decSSalil Mehta }
408e2cb1decSSalil Mehta 
409e2cb1decSSalil Mehta static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
410e2cb1decSSalil Mehta {
411d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
412e2cb1decSSalil Mehta 	int status;
413e2cb1decSSalil Mehta 
414d3410018SYufeng Mo 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0);
415d3410018SYufeng Mo 	status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
416e2cb1decSSalil Mehta 	if (status)
417e2cb1decSSalil Mehta 		dev_err(&hdev->pdev->dev,
418e2cb1decSSalil Mehta 			"VF failed to fetch link status(%d) from PF", status);
419e2cb1decSSalil Mehta }
420e2cb1decSSalil Mehta 
421e2cb1decSSalil Mehta void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
422e2cb1decSSalil Mehta {
42345e92b7eSPeng Li 	struct hnae3_handle *rhandle = &hdev->roce;
424e2cb1decSSalil Mehta 	struct hnae3_handle *handle = &hdev->nic;
42545e92b7eSPeng Li 	struct hnae3_client *rclient;
426e2cb1decSSalil Mehta 	struct hnae3_client *client;
427e2cb1decSSalil Mehta 
428ff200099SYunsheng Lin 	if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state))
429ff200099SYunsheng Lin 		return;
430ff200099SYunsheng Lin 
431e2cb1decSSalil Mehta 	client = handle->client;
43245e92b7eSPeng Li 	rclient = hdev->roce_client;
433e2cb1decSSalil Mehta 
434582d37bbSPeng Li 	link_state =
435582d37bbSPeng Li 		test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
436e2cb1decSSalil Mehta 	if (link_state != hdev->hw.mac.link) {
437b15c072aSYonglong Liu 		hdev->hw.mac.link = link_state;
438e2cb1decSSalil Mehta 		client->ops->link_status_change(handle, !!link_state);
43945e92b7eSPeng Li 		if (rclient && rclient->ops->link_status_change)
44045e92b7eSPeng Li 			rclient->ops->link_status_change(rhandle, !!link_state);
441e2cb1decSSalil Mehta 	}
442ff200099SYunsheng Lin 
443ff200099SYunsheng Lin 	clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
444e2cb1decSSalil Mehta }
445e2cb1decSSalil Mehta 
446538abaf3SYueHaibing static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
4479194d18bSliuzhongzhu {
4489194d18bSliuzhongzhu #define HCLGEVF_ADVERTISING	0
4499194d18bSliuzhongzhu #define HCLGEVF_SUPPORTED	1
4509194d18bSliuzhongzhu 
451d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
452d3410018SYufeng Mo 
453d3410018SYufeng Mo 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0);
454d3410018SYufeng Mo 	send_msg.data[0] = HCLGEVF_ADVERTISING;
455d3410018SYufeng Mo 	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
456d3410018SYufeng Mo 	send_msg.data[0] = HCLGEVF_SUPPORTED;
457d3410018SYufeng Mo 	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
4589194d18bSliuzhongzhu }
4599194d18bSliuzhongzhu 
460e2cb1decSSalil Mehta static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
461e2cb1decSSalil Mehta {
462e2cb1decSSalil Mehta 	struct hnae3_handle *nic = &hdev->nic;
463e2cb1decSSalil Mehta 	int ret;
464e2cb1decSSalil Mehta 
465e2cb1decSSalil Mehta 	nic->ae_algo = &ae_algovf;
466e2cb1decSSalil Mehta 	nic->pdev = hdev->pdev;
467e2cb1decSSalil Mehta 	nic->numa_node_mask = hdev->numa_node_mask;
468424eb834SSalil Mehta 	nic->flags |= HNAE3_SUPPORT_VF;
469076bb537SJie Wang 	nic->kinfo.io_base = hdev->hw.hw.io_base;
470e2cb1decSSalil Mehta 
471e2cb1decSSalil Mehta 	ret = hclgevf_knic_setup(hdev);
472e2cb1decSSalil Mehta 	if (ret)
473e2cb1decSSalil Mehta 		dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
474e2cb1decSSalil Mehta 			ret);
475e2cb1decSSalil Mehta 	return ret;
476e2cb1decSSalil Mehta }
477e2cb1decSSalil Mehta 
478e2cb1decSSalil Mehta static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
479e2cb1decSSalil Mehta {
48036cbbdf6SPeng Li 	if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
48136cbbdf6SPeng Li 		dev_warn(&hdev->pdev->dev,
48236cbbdf6SPeng Li 			 "vector(vector_id %d) has been freed.\n", vector_id);
48336cbbdf6SPeng Li 		return;
48436cbbdf6SPeng Li 	}
48536cbbdf6SPeng Li 
486e2cb1decSSalil Mehta 	hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
487e2cb1decSSalil Mehta 	hdev->num_msi_left += 1;
488e2cb1decSSalil Mehta 	hdev->num_msi_used -= 1;
489e2cb1decSSalil Mehta }
490e2cb1decSSalil Mehta 
491e2cb1decSSalil Mehta static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
492e2cb1decSSalil Mehta 			      struct hnae3_vector_info *vector_info)
493e2cb1decSSalil Mehta {
494e2cb1decSSalil Mehta 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
495e2cb1decSSalil Mehta 	struct hnae3_vector_info *vector = vector_info;
496e2cb1decSSalil Mehta 	int alloc = 0;
497e2cb1decSSalil Mehta 	int i, j;
498e2cb1decSSalil Mehta 
499580a05f9SYonglong Liu 	vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num);
500e2cb1decSSalil Mehta 	vector_num = min(hdev->num_msi_left, vector_num);
501e2cb1decSSalil Mehta 
502e2cb1decSSalil Mehta 	for (j = 0; j < vector_num; j++) {
503e2cb1decSSalil Mehta 		for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
504e2cb1decSSalil Mehta 			if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
505e2cb1decSSalil Mehta 				vector->vector = pci_irq_vector(hdev->pdev, i);
506076bb537SJie Wang 				vector->io_addr = hdev->hw.hw.io_base +
507e2cb1decSSalil Mehta 					HCLGEVF_VECTOR_REG_BASE +
508e2cb1decSSalil Mehta 					(i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
509e2cb1decSSalil Mehta 				hdev->vector_status[i] = 0;
510e2cb1decSSalil Mehta 				hdev->vector_irq[i] = vector->vector;
511e2cb1decSSalil Mehta 
512e2cb1decSSalil Mehta 				vector++;
513e2cb1decSSalil Mehta 				alloc++;
514e2cb1decSSalil Mehta 
515e2cb1decSSalil Mehta 				break;
516e2cb1decSSalil Mehta 			}
517e2cb1decSSalil Mehta 		}
518e2cb1decSSalil Mehta 	}
519e2cb1decSSalil Mehta 	hdev->num_msi_left -= alloc;
520e2cb1decSSalil Mehta 	hdev->num_msi_used += alloc;
521e2cb1decSSalil Mehta 
522e2cb1decSSalil Mehta 	return alloc;
523e2cb1decSSalil Mehta }
524e2cb1decSSalil Mehta 
525e2cb1decSSalil Mehta static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
526e2cb1decSSalil Mehta {
527e2cb1decSSalil Mehta 	int i;
528e2cb1decSSalil Mehta 
529e2cb1decSSalil Mehta 	for (i = 0; i < hdev->num_msi; i++)
530e2cb1decSSalil Mehta 		if (vector == hdev->vector_irq[i])
531e2cb1decSSalil Mehta 			return i;
532e2cb1decSSalil Mehta 
533e2cb1decSSalil Mehta 	return -EINVAL;
534e2cb1decSSalil Mehta }
535e2cb1decSSalil Mehta 
536a638b1d8SJian Shen /* for revision 0x20, vf shared the same rss config with pf */
537a638b1d8SJian Shen static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
538a638b1d8SJian Shen {
539a638b1d8SJian Shen #define HCLGEVF_RSS_MBX_RESP_LEN	8
540027733b1SJie Wang 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
541a638b1d8SJian Shen 	u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
542d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
543a638b1d8SJian Shen 	u16 msg_num, hash_key_index;
544a638b1d8SJian Shen 	u8 index;
545a638b1d8SJian Shen 	int ret;
546a638b1d8SJian Shen 
547d3410018SYufeng Mo 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0);
5487428d6c9SJie Wang 	msg_num = (HCLGE_COMM_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
549a638b1d8SJian Shen 			HCLGEVF_RSS_MBX_RESP_LEN;
550a638b1d8SJian Shen 	for (index = 0; index < msg_num; index++) {
551d3410018SYufeng Mo 		send_msg.data[0] = index;
552d3410018SYufeng Mo 		ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
553a638b1d8SJian Shen 					   HCLGEVF_RSS_MBX_RESP_LEN);
554a638b1d8SJian Shen 		if (ret) {
555a638b1d8SJian Shen 			dev_err(&hdev->pdev->dev,
556a638b1d8SJian Shen 				"VF get rss hash key from PF failed, ret=%d",
557a638b1d8SJian Shen 				ret);
558a638b1d8SJian Shen 			return ret;
559a638b1d8SJian Shen 		}
560a638b1d8SJian Shen 
561a638b1d8SJian Shen 		hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index;
562a638b1d8SJian Shen 		if (index == msg_num - 1)
563a638b1d8SJian Shen 			memcpy(&rss_cfg->rss_hash_key[hash_key_index],
564a638b1d8SJian Shen 			       &resp_msg[0],
5657428d6c9SJie Wang 			       HCLGE_COMM_RSS_KEY_SIZE - hash_key_index);
566a638b1d8SJian Shen 		else
567a638b1d8SJian Shen 			memcpy(&rss_cfg->rss_hash_key[hash_key_index],
568a638b1d8SJian Shen 			       &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN);
569a638b1d8SJian Shen 	}
570a638b1d8SJian Shen 
571a638b1d8SJian Shen 	return 0;
572a638b1d8SJian Shen }
573a638b1d8SJian Shen 
574e2cb1decSSalil Mehta static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
575e2cb1decSSalil Mehta 			   u8 *hfunc)
576e2cb1decSSalil Mehta {
577e2cb1decSSalil Mehta 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
578027733b1SJie Wang 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
5797428d6c9SJie Wang 	int ret;
580e2cb1decSSalil Mehta 
581295ba232SGuangbin Huang 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
5827428d6c9SJie Wang 		hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
583a638b1d8SJian Shen 	} else {
584a638b1d8SJian Shen 		if (hfunc)
585a638b1d8SJian Shen 			*hfunc = ETH_RSS_HASH_TOP;
586a638b1d8SJian Shen 		if (key) {
587a638b1d8SJian Shen 			ret = hclgevf_get_rss_hash_key(hdev);
588a638b1d8SJian Shen 			if (ret)
589a638b1d8SJian Shen 				return ret;
590a638b1d8SJian Shen 			memcpy(key, rss_cfg->rss_hash_key,
5917428d6c9SJie Wang 			       HCLGE_COMM_RSS_KEY_SIZE);
592a638b1d8SJian Shen 		}
593374ad291SJian Shen 	}
594374ad291SJian Shen 
5957428d6c9SJie Wang 	hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
5967428d6c9SJie Wang 				     hdev->ae_dev->dev_specs.rss_ind_tbl_size);
597e2cb1decSSalil Mehta 
598374ad291SJian Shen 	return 0;
599e2cb1decSSalil Mehta }
600e2cb1decSSalil Mehta 
601e2cb1decSSalil Mehta static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
602e2cb1decSSalil Mehta 			   const u8 *key, const u8 hfunc)
603e2cb1decSSalil Mehta {
604e2cb1decSSalil Mehta 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
605027733b1SJie Wang 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
606374ad291SJian Shen 	int ret, i;
607374ad291SJian Shen 
608295ba232SGuangbin Huang 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
60993969dc1SJie Wang 		ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key,
61093969dc1SJie Wang 						  hfunc);
611374ad291SJian Shen 		if (ret)
612374ad291SJian Shen 			return ret;
613374ad291SJian Shen 	}
614e2cb1decSSalil Mehta 
615e2cb1decSSalil Mehta 	/* update the shadow RSS table with user specified qids */
61687ce161eSGuangbin Huang 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
617e2cb1decSSalil Mehta 		rss_cfg->rss_indirection_tbl[i] = indir[i];
618e2cb1decSSalil Mehta 
619e2cb1decSSalil Mehta 	/* update the hardware */
6207428d6c9SJie Wang 	return hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
6217428d6c9SJie Wang 					      rss_cfg->rss_indirection_tbl);
6225fd0e7b4SHuazhong Tan }
6235fd0e7b4SHuazhong Tan 
6245fd0e7b4SHuazhong Tan static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
6255fd0e7b4SHuazhong Tan 				 struct ethtool_rxnfc *nfc)
6265fd0e7b4SHuazhong Tan {
6275fd0e7b4SHuazhong Tan 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
6285fd0e7b4SHuazhong Tan 	int ret;
6295fd0e7b4SHuazhong Tan 
6305fd0e7b4SHuazhong Tan 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
6315fd0e7b4SHuazhong Tan 		return -EOPNOTSUPP;
6325fd0e7b4SHuazhong Tan 
63393969dc1SJie Wang 	ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
63493969dc1SJie Wang 				       &hdev->rss_cfg, nfc);
63593969dc1SJie Wang 	if (ret)
6365fd0e7b4SHuazhong Tan 		dev_err(&hdev->pdev->dev,
63793969dc1SJie Wang 		"failed to set rss tuple, ret = %d.\n", ret);
6385fd0e7b4SHuazhong Tan 
639d97b3072SJian Shen 	return ret;
640d97b3072SJian Shen }
641d97b3072SJian Shen 
642d97b3072SJian Shen static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
643d97b3072SJian Shen 				 struct ethtool_rxnfc *nfc)
644d97b3072SJian Shen {
645d97b3072SJian Shen 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
646d97b3072SJian Shen 	u8 tuple_sets;
64773f7767eSJian Shen 	int ret;
648d97b3072SJian Shen 
649295ba232SGuangbin Huang 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
650d97b3072SJian Shen 		return -EOPNOTSUPP;
651d97b3072SJian Shen 
652d97b3072SJian Shen 	nfc->data = 0;
653d97b3072SJian Shen 
654027733b1SJie Wang 	ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type,
65573f7767eSJian Shen 				       &tuple_sets);
65673f7767eSJian Shen 	if (ret || !tuple_sets)
65773f7767eSJian Shen 		return ret;
658d97b3072SJian Shen 
6597428d6c9SJie Wang 	nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
660d97b3072SJian Shen 
661d97b3072SJian Shen 	return 0;
662d97b3072SJian Shen }
663d97b3072SJian Shen 
664e2cb1decSSalil Mehta static int hclgevf_get_tc_size(struct hnae3_handle *handle)
665e2cb1decSSalil Mehta {
666e2cb1decSSalil Mehta 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
667027733b1SJie Wang 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
668e2cb1decSSalil Mehta 
669e2cb1decSSalil Mehta 	return rss_cfg->rss_size;
670e2cb1decSSalil Mehta }
671e2cb1decSSalil Mehta 
672e2cb1decSSalil Mehta static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
673b204bc74SPeng Li 				       int vector_id,
674e2cb1decSSalil Mehta 				       struct hnae3_ring_chain_node *ring_chain)
675e2cb1decSSalil Mehta {
676e2cb1decSSalil Mehta 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
677d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
678e2cb1decSSalil Mehta 	struct hnae3_ring_chain_node *node;
679e2cb1decSSalil Mehta 	int status;
680d3410018SYufeng Mo 	int i = 0;
681e2cb1decSSalil Mehta 
682d3410018SYufeng Mo 	memset(&send_msg, 0, sizeof(send_msg));
683d3410018SYufeng Mo 	send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR :
684c09ba484SPeng Li 		HCLGE_MBX_UNMAP_RING_TO_VECTOR;
685d3410018SYufeng Mo 	send_msg.vector_id = vector_id;
686e2cb1decSSalil Mehta 
687e2cb1decSSalil Mehta 	for (node = ring_chain; node; node = node->next) {
688d3410018SYufeng Mo 		send_msg.param[i].ring_type =
689e4e87715SPeng Li 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
690d3410018SYufeng Mo 
691d3410018SYufeng Mo 		send_msg.param[i].tqp_index = node->tqp_index;
692d3410018SYufeng Mo 		send_msg.param[i].int_gl_index =
693d3410018SYufeng Mo 					hnae3_get_field(node->int_gl_idx,
69479eee410SFuyun Liang 							HNAE3_RING_GL_IDX_M,
69579eee410SFuyun Liang 							HNAE3_RING_GL_IDX_S);
69679eee410SFuyun Liang 
6975d02a58dSYunsheng Lin 		i++;
698d3410018SYufeng Mo 		if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) {
699d3410018SYufeng Mo 			send_msg.ring_num = i;
700e2cb1decSSalil Mehta 
701d3410018SYufeng Mo 			status = hclgevf_send_mbx_msg(hdev, &send_msg, false,
702d3410018SYufeng Mo 						      NULL, 0);
703e2cb1decSSalil Mehta 			if (status) {
704e2cb1decSSalil Mehta 				dev_err(&hdev->pdev->dev,
705e2cb1decSSalil Mehta 					"Map TQP fail, status is %d.\n",
706e2cb1decSSalil Mehta 					status);
707e2cb1decSSalil Mehta 				return status;
708e2cb1decSSalil Mehta 			}
709e2cb1decSSalil Mehta 			i = 0;
710e2cb1decSSalil Mehta 		}
711e2cb1decSSalil Mehta 	}
712e2cb1decSSalil Mehta 
713e2cb1decSSalil Mehta 	return 0;
714e2cb1decSSalil Mehta }
715e2cb1decSSalil Mehta 
716e2cb1decSSalil Mehta static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
717e2cb1decSSalil Mehta 				      struct hnae3_ring_chain_node *ring_chain)
718e2cb1decSSalil Mehta {
719b204bc74SPeng Li 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
720b204bc74SPeng Li 	int vector_id;
721b204bc74SPeng Li 
722b204bc74SPeng Li 	vector_id = hclgevf_get_vector_index(hdev, vector);
723b204bc74SPeng Li 	if (vector_id < 0) {
724b204bc74SPeng Li 		dev_err(&handle->pdev->dev,
725b204bc74SPeng Li 			"Get vector index fail. ret =%d\n", vector_id);
726b204bc74SPeng Li 		return vector_id;
727b204bc74SPeng Li 	}
728b204bc74SPeng Li 
729b204bc74SPeng Li 	return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
730e2cb1decSSalil Mehta }
731e2cb1decSSalil Mehta 
732e2cb1decSSalil Mehta static int hclgevf_unmap_ring_from_vector(
733e2cb1decSSalil Mehta 				struct hnae3_handle *handle,
734e2cb1decSSalil Mehta 				int vector,
735e2cb1decSSalil Mehta 				struct hnae3_ring_chain_node *ring_chain)
736e2cb1decSSalil Mehta {
737e2cb1decSSalil Mehta 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
738e2cb1decSSalil Mehta 	int ret, vector_id;
739e2cb1decSSalil Mehta 
740dea846e8SHuazhong Tan 	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
741dea846e8SHuazhong Tan 		return 0;
742dea846e8SHuazhong Tan 
743e2cb1decSSalil Mehta 	vector_id = hclgevf_get_vector_index(hdev, vector);
744e2cb1decSSalil Mehta 	if (vector_id < 0) {
745e2cb1decSSalil Mehta 		dev_err(&handle->pdev->dev,
746e2cb1decSSalil Mehta 			"Get vector index fail. ret =%d\n", vector_id);
747e2cb1decSSalil Mehta 		return vector_id;
748e2cb1decSSalil Mehta 	}
749e2cb1decSSalil Mehta 
750b204bc74SPeng Li 	ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
7510d3e6631SYunsheng Lin 	if (ret)
752e2cb1decSSalil Mehta 		dev_err(&handle->pdev->dev,
753e2cb1decSSalil Mehta 			"Unmap ring from vector fail. vector=%d, ret =%d\n",
754e2cb1decSSalil Mehta 			vector_id,
755e2cb1decSSalil Mehta 			ret);
7560d3e6631SYunsheng Lin 
757e2cb1decSSalil Mehta 	return ret;
758e2cb1decSSalil Mehta }
759e2cb1decSSalil Mehta 
7600d3e6631SYunsheng Lin static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
7610d3e6631SYunsheng Lin {
7620d3e6631SYunsheng Lin 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
76303718db9SYunsheng Lin 	int vector_id;
7640d3e6631SYunsheng Lin 
76503718db9SYunsheng Lin 	vector_id = hclgevf_get_vector_index(hdev, vector);
76603718db9SYunsheng Lin 	if (vector_id < 0) {
76703718db9SYunsheng Lin 		dev_err(&handle->pdev->dev,
76803718db9SYunsheng Lin 			"hclgevf_put_vector get vector index fail. ret =%d\n",
76903718db9SYunsheng Lin 			vector_id);
77003718db9SYunsheng Lin 		return vector_id;
77103718db9SYunsheng Lin 	}
77203718db9SYunsheng Lin 
77303718db9SYunsheng Lin 	hclgevf_free_vector(hdev, vector_id);
774e2cb1decSSalil Mehta 
775e2cb1decSSalil Mehta 	return 0;
776e2cb1decSSalil Mehta }
777e2cb1decSSalil Mehta 
7783b75c3dfSPeng Li static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
779e196ec75SJian Shen 					bool en_uc_pmc, bool en_mc_pmc,
780f01f5559SJian Shen 					bool en_bc_pmc)
781e2cb1decSSalil Mehta {
7825e7414cdSJian Shen 	struct hnae3_handle *handle = &hdev->nic;
783d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
784f01f5559SJian Shen 	int ret;
785e2cb1decSSalil Mehta 
786d3410018SYufeng Mo 	memset(&send_msg, 0, sizeof(send_msg));
787d3410018SYufeng Mo 	send_msg.code = HCLGE_MBX_SET_PROMISC_MODE;
788d3410018SYufeng Mo 	send_msg.en_bc = en_bc_pmc ? 1 : 0;
789d3410018SYufeng Mo 	send_msg.en_uc = en_uc_pmc ? 1 : 0;
790d3410018SYufeng Mo 	send_msg.en_mc = en_mc_pmc ? 1 : 0;
7915e7414cdSJian Shen 	send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC,
7925e7414cdSJian Shen 					     &handle->priv_flags) ? 1 : 0;
793e2cb1decSSalil Mehta 
794d3410018SYufeng Mo 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
795f01f5559SJian Shen 	if (ret)
796e2cb1decSSalil Mehta 		dev_err(&hdev->pdev->dev,
797f01f5559SJian Shen 			"Set promisc mode fail, status is %d.\n", ret);
798e2cb1decSSalil Mehta 
799f01f5559SJian Shen 	return ret;
800e2cb1decSSalil Mehta }
801e2cb1decSSalil Mehta 
802e196ec75SJian Shen static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
803e196ec75SJian Shen 				    bool en_mc_pmc)
804e2cb1decSSalil Mehta {
805e196ec75SJian Shen 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
806e196ec75SJian Shen 	bool en_bc_pmc;
807e196ec75SJian Shen 
808295ba232SGuangbin Huang 	en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
809e196ec75SJian Shen 
810e196ec75SJian Shen 	return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc,
811e196ec75SJian Shen 					    en_bc_pmc);
812e2cb1decSSalil Mehta }
813e2cb1decSSalil Mehta 
814c631c696SJian Shen static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle)
815c631c696SJian Shen {
816c631c696SJian Shen 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
817c631c696SJian Shen 
818c631c696SJian Shen 	set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
8195e7414cdSJian Shen 	hclgevf_task_schedule(hdev, 0);
820c631c696SJian Shen }
821c631c696SJian Shen 
822c631c696SJian Shen static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev)
823c631c696SJian Shen {
824c631c696SJian Shen 	struct hnae3_handle *handle = &hdev->nic;
825c631c696SJian Shen 	bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE;
826c631c696SJian Shen 	bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE;
827c631c696SJian Shen 	int ret;
828c631c696SJian Shen 
829c631c696SJian Shen 	if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) {
830c631c696SJian Shen 		ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc);
831c631c696SJian Shen 		if (!ret)
832c631c696SJian Shen 			clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
833c631c696SJian Shen 	}
834c631c696SJian Shen }
835c631c696SJian Shen 
8368fa86551SYufeng Mo static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id,
8378fa86551SYufeng Mo 				       u16 stream_id, bool enable)
838e2cb1decSSalil Mehta {
839e2cb1decSSalil Mehta 	struct hclgevf_cfg_com_tqp_queue_cmd *req;
8406befad60SJie Wang 	struct hclge_desc desc;
841e2cb1decSSalil Mehta 
842e2cb1decSSalil Mehta 	req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
843e2cb1decSSalil Mehta 
84443710bfeSJie Wang 	hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
845e2cb1decSSalil Mehta 	req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
846e2cb1decSSalil Mehta 	req->stream_id = cpu_to_le16(stream_id);
847ebaf1908SWeihang Li 	if (enable)
848ebaf1908SWeihang Li 		req->enable |= 1U << HCLGEVF_TQP_ENABLE_B;
849e2cb1decSSalil Mehta 
8508fa86551SYufeng Mo 	return hclgevf_cmd_send(&hdev->hw, &desc, 1);
8518fa86551SYufeng Mo }
852e2cb1decSSalil Mehta 
8538fa86551SYufeng Mo static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable)
8548fa86551SYufeng Mo {
8558fa86551SYufeng Mo 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
8568fa86551SYufeng Mo 	int ret;
8578fa86551SYufeng Mo 	u16 i;
8588fa86551SYufeng Mo 
8598fa86551SYufeng Mo 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
8608fa86551SYufeng Mo 		ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable);
8618fa86551SYufeng Mo 		if (ret)
8628fa86551SYufeng Mo 			return ret;
8638fa86551SYufeng Mo 	}
8648fa86551SYufeng Mo 
8658fa86551SYufeng Mo 	return 0;
866e2cb1decSSalil Mehta }
867e2cb1decSSalil Mehta 
8688e6de441SHuazhong Tan static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
8698e6de441SHuazhong Tan {
870d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
8718e6de441SHuazhong Tan 	u8 host_mac[ETH_ALEN];
8728e6de441SHuazhong Tan 	int status;
8738e6de441SHuazhong Tan 
874d3410018SYufeng Mo 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0);
875d3410018SYufeng Mo 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac,
876d3410018SYufeng Mo 				      ETH_ALEN);
8778e6de441SHuazhong Tan 	if (status) {
8788e6de441SHuazhong Tan 		dev_err(&hdev->pdev->dev,
8798e6de441SHuazhong Tan 			"fail to get VF MAC from host %d", status);
8808e6de441SHuazhong Tan 		return status;
8818e6de441SHuazhong Tan 	}
8828e6de441SHuazhong Tan 
8838e6de441SHuazhong Tan 	ether_addr_copy(p, host_mac);
8848e6de441SHuazhong Tan 
8858e6de441SHuazhong Tan 	return 0;
8868e6de441SHuazhong Tan }
8878e6de441SHuazhong Tan 
888e2cb1decSSalil Mehta static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
889e2cb1decSSalil Mehta {
890e2cb1decSSalil Mehta 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
8918e6de441SHuazhong Tan 	u8 host_mac_addr[ETH_ALEN];
892e2cb1decSSalil Mehta 
8938e6de441SHuazhong Tan 	if (hclgevf_get_host_mac_addr(hdev, host_mac_addr))
8948e6de441SHuazhong Tan 		return;
8958e6de441SHuazhong Tan 
8968e6de441SHuazhong Tan 	hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr);
8978e6de441SHuazhong Tan 	if (hdev->has_pf_mac)
8988e6de441SHuazhong Tan 		ether_addr_copy(p, host_mac_addr);
8998e6de441SHuazhong Tan 	else
900e2cb1decSSalil Mehta 		ether_addr_copy(p, hdev->hw.mac.mac_addr);
901e2cb1decSSalil Mehta }
902e2cb1decSSalil Mehta 
90376660757SJakub Kicinski static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p,
90459098055SFuyun Liang 				bool is_first)
905e2cb1decSSalil Mehta {
906e2cb1decSSalil Mehta 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
907e2cb1decSSalil Mehta 	u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
908d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
909e2cb1decSSalil Mehta 	u8 *new_mac_addr = (u8 *)p;
910e2cb1decSSalil Mehta 	int status;
911e2cb1decSSalil Mehta 
912d3410018SYufeng Mo 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0);
913ee4bcd3bSJian Shen 	send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY;
914d3410018SYufeng Mo 	ether_addr_copy(send_msg.data, new_mac_addr);
915ee4bcd3bSJian Shen 	if (is_first && !hdev->has_pf_mac)
916ee4bcd3bSJian Shen 		eth_zero_addr(&send_msg.data[ETH_ALEN]);
917ee4bcd3bSJian Shen 	else
918d3410018SYufeng Mo 		ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
919d3410018SYufeng Mo 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
920e2cb1decSSalil Mehta 	if (!status)
921e2cb1decSSalil Mehta 		ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
922e2cb1decSSalil Mehta 
923e2cb1decSSalil Mehta 	return status;
924e2cb1decSSalil Mehta }
925e2cb1decSSalil Mehta 
926ee4bcd3bSJian Shen static struct hclgevf_mac_addr_node *
927ee4bcd3bSJian Shen hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr)
928ee4bcd3bSJian Shen {
929ee4bcd3bSJian Shen 	struct hclgevf_mac_addr_node *mac_node, *tmp;
930ee4bcd3bSJian Shen 
931ee4bcd3bSJian Shen 	list_for_each_entry_safe(mac_node, tmp, list, node)
932ee4bcd3bSJian Shen 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
933ee4bcd3bSJian Shen 			return mac_node;
934ee4bcd3bSJian Shen 
935ee4bcd3bSJian Shen 	return NULL;
936ee4bcd3bSJian Shen }
937ee4bcd3bSJian Shen 
938ee4bcd3bSJian Shen static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node,
939ee4bcd3bSJian Shen 				    enum HCLGEVF_MAC_NODE_STATE state)
940ee4bcd3bSJian Shen {
941ee4bcd3bSJian Shen 	switch (state) {
942ee4bcd3bSJian Shen 	/* from set_rx_mode or tmp_add_list */
943ee4bcd3bSJian Shen 	case HCLGEVF_MAC_TO_ADD:
944ee4bcd3bSJian Shen 		if (mac_node->state == HCLGEVF_MAC_TO_DEL)
945ee4bcd3bSJian Shen 			mac_node->state = HCLGEVF_MAC_ACTIVE;
946ee4bcd3bSJian Shen 		break;
947ee4bcd3bSJian Shen 	/* only from set_rx_mode */
948ee4bcd3bSJian Shen 	case HCLGEVF_MAC_TO_DEL:
949ee4bcd3bSJian Shen 		if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
950ee4bcd3bSJian Shen 			list_del(&mac_node->node);
951ee4bcd3bSJian Shen 			kfree(mac_node);
952ee4bcd3bSJian Shen 		} else {
953ee4bcd3bSJian Shen 			mac_node->state = HCLGEVF_MAC_TO_DEL;
954ee4bcd3bSJian Shen 		}
955ee4bcd3bSJian Shen 		break;
956ee4bcd3bSJian Shen 	/* only from tmp_add_list, the mac_node->state won't be
957ee4bcd3bSJian Shen 	 * HCLGEVF_MAC_ACTIVE
958ee4bcd3bSJian Shen 	 */
959ee4bcd3bSJian Shen 	case HCLGEVF_MAC_ACTIVE:
960ee4bcd3bSJian Shen 		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
961ee4bcd3bSJian Shen 			mac_node->state = HCLGEVF_MAC_ACTIVE;
962ee4bcd3bSJian Shen 		break;
963ee4bcd3bSJian Shen 	}
964ee4bcd3bSJian Shen }
965ee4bcd3bSJian Shen 
966ee4bcd3bSJian Shen static int hclgevf_update_mac_list(struct hnae3_handle *handle,
967ee4bcd3bSJian Shen 				   enum HCLGEVF_MAC_NODE_STATE state,
968ee4bcd3bSJian Shen 				   enum HCLGEVF_MAC_ADDR_TYPE mac_type,
969e2cb1decSSalil Mehta 				   const unsigned char *addr)
970e2cb1decSSalil Mehta {
971e2cb1decSSalil Mehta 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
972ee4bcd3bSJian Shen 	struct hclgevf_mac_addr_node *mac_node;
973ee4bcd3bSJian Shen 	struct list_head *list;
974e2cb1decSSalil Mehta 
975ee4bcd3bSJian Shen 	list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
976ee4bcd3bSJian Shen 	       &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
977ee4bcd3bSJian Shen 
978ee4bcd3bSJian Shen 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
979ee4bcd3bSJian Shen 
980ee4bcd3bSJian Shen 	/* if the mac addr is already in the mac list, no need to add a new
981ee4bcd3bSJian Shen 	 * one into it, just check the mac addr state, convert it to a new
98234eff17eSJilin Yuan 	 * state, or just remove it, or do nothing.
983ee4bcd3bSJian Shen 	 */
984ee4bcd3bSJian Shen 	mac_node = hclgevf_find_mac_node(list, addr);
985ee4bcd3bSJian Shen 	if (mac_node) {
986ee4bcd3bSJian Shen 		hclgevf_update_mac_node(mac_node, state);
987ee4bcd3bSJian Shen 		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
988ee4bcd3bSJian Shen 		return 0;
989ee4bcd3bSJian Shen 	}
990ee4bcd3bSJian Shen 	/* if this address is never added, unnecessary to delete */
991ee4bcd3bSJian Shen 	if (state == HCLGEVF_MAC_TO_DEL) {
992ee4bcd3bSJian Shen 		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
993ee4bcd3bSJian Shen 		return -ENOENT;
994ee4bcd3bSJian Shen 	}
995ee4bcd3bSJian Shen 
996ee4bcd3bSJian Shen 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
997ee4bcd3bSJian Shen 	if (!mac_node) {
998ee4bcd3bSJian Shen 		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
999ee4bcd3bSJian Shen 		return -ENOMEM;
1000ee4bcd3bSJian Shen 	}
1001ee4bcd3bSJian Shen 
1002ee4bcd3bSJian Shen 	mac_node->state = state;
1003ee4bcd3bSJian Shen 	ether_addr_copy(mac_node->mac_addr, addr);
1004ee4bcd3bSJian Shen 	list_add_tail(&mac_node->node, list);
1005ee4bcd3bSJian Shen 
1006ee4bcd3bSJian Shen 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1007ee4bcd3bSJian Shen 	return 0;
1008ee4bcd3bSJian Shen }
1009ee4bcd3bSJian Shen 
1010ee4bcd3bSJian Shen static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
1011ee4bcd3bSJian Shen 			       const unsigned char *addr)
1012ee4bcd3bSJian Shen {
1013ee4bcd3bSJian Shen 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
1014ee4bcd3bSJian Shen 				       HCLGEVF_MAC_ADDR_UC, addr);
1015e2cb1decSSalil Mehta }
1016e2cb1decSSalil Mehta 
1017e2cb1decSSalil Mehta static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
1018e2cb1decSSalil Mehta 			      const unsigned char *addr)
1019e2cb1decSSalil Mehta {
1020ee4bcd3bSJian Shen 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
1021ee4bcd3bSJian Shen 				       HCLGEVF_MAC_ADDR_UC, addr);
1022e2cb1decSSalil Mehta }
1023e2cb1decSSalil Mehta 
1024e2cb1decSSalil Mehta static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
1025e2cb1decSSalil Mehta 			       const unsigned char *addr)
1026e2cb1decSSalil Mehta {
1027ee4bcd3bSJian Shen 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
1028ee4bcd3bSJian Shen 				       HCLGEVF_MAC_ADDR_MC, addr);
1029e2cb1decSSalil Mehta }
1030e2cb1decSSalil Mehta 
1031e2cb1decSSalil Mehta static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
1032e2cb1decSSalil Mehta 			      const unsigned char *addr)
1033e2cb1decSSalil Mehta {
1034ee4bcd3bSJian Shen 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
1035ee4bcd3bSJian Shen 				       HCLGEVF_MAC_ADDR_MC, addr);
1036ee4bcd3bSJian Shen }
1037e2cb1decSSalil Mehta 
1038ee4bcd3bSJian Shen static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev,
1039ee4bcd3bSJian Shen 				    struct hclgevf_mac_addr_node *mac_node,
1040ee4bcd3bSJian Shen 				    enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1041ee4bcd3bSJian Shen {
1042ee4bcd3bSJian Shen 	struct hclge_vf_to_pf_msg send_msg;
1043ee4bcd3bSJian Shen 	u8 code, subcode;
1044ee4bcd3bSJian Shen 
1045ee4bcd3bSJian Shen 	if (mac_type == HCLGEVF_MAC_ADDR_UC) {
1046ee4bcd3bSJian Shen 		code = HCLGE_MBX_SET_UNICAST;
1047ee4bcd3bSJian Shen 		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1048ee4bcd3bSJian Shen 			subcode = HCLGE_MBX_MAC_VLAN_UC_ADD;
1049ee4bcd3bSJian Shen 		else
1050ee4bcd3bSJian Shen 			subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE;
1051ee4bcd3bSJian Shen 	} else {
1052ee4bcd3bSJian Shen 		code = HCLGE_MBX_SET_MULTICAST;
1053ee4bcd3bSJian Shen 		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1054ee4bcd3bSJian Shen 			subcode = HCLGE_MBX_MAC_VLAN_MC_ADD;
1055ee4bcd3bSJian Shen 		else
1056ee4bcd3bSJian Shen 			subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE;
1057ee4bcd3bSJian Shen 	}
1058ee4bcd3bSJian Shen 
1059ee4bcd3bSJian Shen 	hclgevf_build_send_msg(&send_msg, code, subcode);
1060ee4bcd3bSJian Shen 	ether_addr_copy(send_msg.data, mac_node->mac_addr);
1061d3410018SYufeng Mo 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1062e2cb1decSSalil Mehta }
1063e2cb1decSSalil Mehta 
1064ee4bcd3bSJian Shen static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
1065ee4bcd3bSJian Shen 				    struct list_head *list,
1066ee4bcd3bSJian Shen 				    enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1067ee4bcd3bSJian Shen {
10684f331fdaSYufeng Mo 	char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
1069ee4bcd3bSJian Shen 	struct hclgevf_mac_addr_node *mac_node, *tmp;
1070ee4bcd3bSJian Shen 	int ret;
1071ee4bcd3bSJian Shen 
1072ee4bcd3bSJian Shen 	list_for_each_entry_safe(mac_node, tmp, list, node) {
1073ee4bcd3bSJian Shen 		ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
1074ee4bcd3bSJian Shen 		if  (ret) {
10754f331fdaSYufeng Mo 			hnae3_format_mac_addr(format_mac_addr,
10764f331fdaSYufeng Mo 					      mac_node->mac_addr);
1077ee4bcd3bSJian Shen 			dev_err(&hdev->pdev->dev,
10784f331fdaSYufeng Mo 				"failed to configure mac %s, state = %d, ret = %d\n",
10794f331fdaSYufeng Mo 				format_mac_addr, mac_node->state, ret);
1080ee4bcd3bSJian Shen 			return;
1081ee4bcd3bSJian Shen 		}
1082ee4bcd3bSJian Shen 		if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
1083ee4bcd3bSJian Shen 			mac_node->state = HCLGEVF_MAC_ACTIVE;
1084ee4bcd3bSJian Shen 		} else {
1085ee4bcd3bSJian Shen 			list_del(&mac_node->node);
1086ee4bcd3bSJian Shen 			kfree(mac_node);
1087ee4bcd3bSJian Shen 		}
1088ee4bcd3bSJian Shen 	}
1089ee4bcd3bSJian Shen }
1090ee4bcd3bSJian Shen 
1091ee4bcd3bSJian Shen static void hclgevf_sync_from_add_list(struct list_head *add_list,
1092ee4bcd3bSJian Shen 				       struct list_head *mac_list)
1093ee4bcd3bSJian Shen {
1094ee4bcd3bSJian Shen 	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1095ee4bcd3bSJian Shen 
1096ee4bcd3bSJian Shen 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
1097ee4bcd3bSJian Shen 		/* if the mac address from tmp_add_list is not in the
1098ee4bcd3bSJian Shen 		 * uc/mc_mac_list, it means have received a TO_DEL request
1099ee4bcd3bSJian Shen 		 * during the time window of sending mac config request to PF
1100ee4bcd3bSJian Shen 		 * If mac_node state is ACTIVE, then change its state to TO_DEL,
1101ee4bcd3bSJian Shen 		 * then it will be removed at next time. If is TO_ADD, it means
1102ee4bcd3bSJian Shen 		 * send TO_ADD request failed, so just remove the mac node.
1103ee4bcd3bSJian Shen 		 */
1104ee4bcd3bSJian Shen 		new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
1105ee4bcd3bSJian Shen 		if (new_node) {
1106ee4bcd3bSJian Shen 			hclgevf_update_mac_node(new_node, mac_node->state);
1107ee4bcd3bSJian Shen 			list_del(&mac_node->node);
1108ee4bcd3bSJian Shen 			kfree(mac_node);
1109ee4bcd3bSJian Shen 		} else if (mac_node->state == HCLGEVF_MAC_ACTIVE) {
1110ee4bcd3bSJian Shen 			mac_node->state = HCLGEVF_MAC_TO_DEL;
111149768ce9SBaokun Li 			list_move_tail(&mac_node->node, mac_list);
1112ee4bcd3bSJian Shen 		} else {
1113ee4bcd3bSJian Shen 			list_del(&mac_node->node);
1114ee4bcd3bSJian Shen 			kfree(mac_node);
1115ee4bcd3bSJian Shen 		}
1116ee4bcd3bSJian Shen 	}
1117ee4bcd3bSJian Shen }
1118ee4bcd3bSJian Shen 
1119ee4bcd3bSJian Shen static void hclgevf_sync_from_del_list(struct list_head *del_list,
1120ee4bcd3bSJian Shen 				       struct list_head *mac_list)
1121ee4bcd3bSJian Shen {
1122ee4bcd3bSJian Shen 	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1123ee4bcd3bSJian Shen 
1124ee4bcd3bSJian Shen 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
1125ee4bcd3bSJian Shen 		new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
1126ee4bcd3bSJian Shen 		if (new_node) {
1127ee4bcd3bSJian Shen 			/* If the mac addr is exist in the mac list, it means
1128ee4bcd3bSJian Shen 			 * received a new request TO_ADD during the time window
1129ee4bcd3bSJian Shen 			 * of sending mac addr configurrequest to PF, so just
1130ee4bcd3bSJian Shen 			 * change the mac state to ACTIVE.
1131ee4bcd3bSJian Shen 			 */
1132ee4bcd3bSJian Shen 			new_node->state = HCLGEVF_MAC_ACTIVE;
1133ee4bcd3bSJian Shen 			list_del(&mac_node->node);
1134ee4bcd3bSJian Shen 			kfree(mac_node);
1135ee4bcd3bSJian Shen 		} else {
113649768ce9SBaokun Li 			list_move_tail(&mac_node->node, mac_list);
1137ee4bcd3bSJian Shen 		}
1138ee4bcd3bSJian Shen 	}
1139ee4bcd3bSJian Shen }
1140ee4bcd3bSJian Shen 
1141ee4bcd3bSJian Shen static void hclgevf_clear_list(struct list_head *list)
1142ee4bcd3bSJian Shen {
1143ee4bcd3bSJian Shen 	struct hclgevf_mac_addr_node *mac_node, *tmp;
1144ee4bcd3bSJian Shen 
1145ee4bcd3bSJian Shen 	list_for_each_entry_safe(mac_node, tmp, list, node) {
1146ee4bcd3bSJian Shen 		list_del(&mac_node->node);
1147ee4bcd3bSJian Shen 		kfree(mac_node);
1148ee4bcd3bSJian Shen 	}
1149ee4bcd3bSJian Shen }
1150ee4bcd3bSJian Shen 
1151ee4bcd3bSJian Shen static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev,
1152ee4bcd3bSJian Shen 				  enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1153ee4bcd3bSJian Shen {
1154ee4bcd3bSJian Shen 	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1155ee4bcd3bSJian Shen 	struct list_head tmp_add_list, tmp_del_list;
1156ee4bcd3bSJian Shen 	struct list_head *list;
1157ee4bcd3bSJian Shen 
1158ee4bcd3bSJian Shen 	INIT_LIST_HEAD(&tmp_add_list);
1159ee4bcd3bSJian Shen 	INIT_LIST_HEAD(&tmp_del_list);
1160ee4bcd3bSJian Shen 
1161ee4bcd3bSJian Shen 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
1162ee4bcd3bSJian Shen 	 * we can add/delete these mac addr outside the spin lock
1163ee4bcd3bSJian Shen 	 */
1164ee4bcd3bSJian Shen 	list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
1165ee4bcd3bSJian Shen 		&hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
1166ee4bcd3bSJian Shen 
1167ee4bcd3bSJian Shen 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
1168ee4bcd3bSJian Shen 
1169ee4bcd3bSJian Shen 	list_for_each_entry_safe(mac_node, tmp, list, node) {
1170ee4bcd3bSJian Shen 		switch (mac_node->state) {
1171ee4bcd3bSJian Shen 		case HCLGEVF_MAC_TO_DEL:
117249768ce9SBaokun Li 			list_move_tail(&mac_node->node, &tmp_del_list);
1173ee4bcd3bSJian Shen 			break;
1174ee4bcd3bSJian Shen 		case HCLGEVF_MAC_TO_ADD:
1175ee4bcd3bSJian Shen 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
1176ee4bcd3bSJian Shen 			if (!new_node)
1177ee4bcd3bSJian Shen 				goto stop_traverse;
1178ee4bcd3bSJian Shen 
1179ee4bcd3bSJian Shen 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
1180ee4bcd3bSJian Shen 			new_node->state = mac_node->state;
1181ee4bcd3bSJian Shen 			list_add_tail(&new_node->node, &tmp_add_list);
1182ee4bcd3bSJian Shen 			break;
1183ee4bcd3bSJian Shen 		default:
1184ee4bcd3bSJian Shen 			break;
1185ee4bcd3bSJian Shen 		}
1186ee4bcd3bSJian Shen 	}
1187ee4bcd3bSJian Shen 
1188ee4bcd3bSJian Shen stop_traverse:
1189ee4bcd3bSJian Shen 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1190ee4bcd3bSJian Shen 
1191ee4bcd3bSJian Shen 	/* delete first, in order to get max mac table space for adding */
1192ee4bcd3bSJian Shen 	hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type);
1193ee4bcd3bSJian Shen 	hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type);
1194ee4bcd3bSJian Shen 
1195ee4bcd3bSJian Shen 	/* if some mac addresses were added/deleted fail, move back to the
1196ee4bcd3bSJian Shen 	 * mac_list, and retry at next time.
1197ee4bcd3bSJian Shen 	 */
1198ee4bcd3bSJian Shen 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
1199ee4bcd3bSJian Shen 
1200ee4bcd3bSJian Shen 	hclgevf_sync_from_del_list(&tmp_del_list, list);
1201ee4bcd3bSJian Shen 	hclgevf_sync_from_add_list(&tmp_add_list, list);
1202ee4bcd3bSJian Shen 
1203ee4bcd3bSJian Shen 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1204ee4bcd3bSJian Shen }
1205ee4bcd3bSJian Shen 
1206ee4bcd3bSJian Shen static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev)
1207ee4bcd3bSJian Shen {
1208ee4bcd3bSJian Shen 	hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC);
1209ee4bcd3bSJian Shen 	hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC);
1210ee4bcd3bSJian Shen }
1211ee4bcd3bSJian Shen 
1212ee4bcd3bSJian Shen static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev)
1213ee4bcd3bSJian Shen {
1214ee4bcd3bSJian Shen 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
1215ee4bcd3bSJian Shen 
1216ee4bcd3bSJian Shen 	hclgevf_clear_list(&hdev->mac_table.uc_mac_list);
1217ee4bcd3bSJian Shen 	hclgevf_clear_list(&hdev->mac_table.mc_mac_list);
1218ee4bcd3bSJian Shen 
1219ee4bcd3bSJian Shen 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1220ee4bcd3bSJian Shen }
1221ee4bcd3bSJian Shen 
1222fa6a262aSJian Shen static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
1223fa6a262aSJian Shen {
1224fa6a262aSJian Shen 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1225fa6a262aSJian Shen 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
1226fa6a262aSJian Shen 	struct hclge_vf_to_pf_msg send_msg;
1227fa6a262aSJian Shen 
1228fa6a262aSJian Shen 	if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
1229fa6a262aSJian Shen 		return -EOPNOTSUPP;
1230fa6a262aSJian Shen 
1231fa6a262aSJian Shen 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1232fa6a262aSJian Shen 			       HCLGE_MBX_ENABLE_VLAN_FILTER);
1233fa6a262aSJian Shen 	send_msg.data[0] = enable ? 1 : 0;
1234fa6a262aSJian Shen 
1235fa6a262aSJian Shen 	return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1236fa6a262aSJian Shen }
1237fa6a262aSJian Shen 
1238e2cb1decSSalil Mehta static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
1239e2cb1decSSalil Mehta 				   __be16 proto, u16 vlan_id,
1240e2cb1decSSalil Mehta 				   bool is_kill)
1241e2cb1decSSalil Mehta {
1242e2cb1decSSalil Mehta 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1243416eedb6SJie Wang 	struct hclge_mbx_vlan_filter *vlan_filter;
1244d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
1245fe4144d4SJian Shen 	int ret;
1246e2cb1decSSalil Mehta 
1247b37ce587SYufeng Mo 	if (vlan_id > HCLGEVF_MAX_VLAN_ID)
1248e2cb1decSSalil Mehta 		return -EINVAL;
1249e2cb1decSSalil Mehta 
1250e2cb1decSSalil Mehta 	if (proto != htons(ETH_P_8021Q))
1251e2cb1decSSalil Mehta 		return -EPROTONOSUPPORT;
1252e2cb1decSSalil Mehta 
1253b7b5d25bSGuojia Liao 	/* When device is resetting or reset failed, firmware is unable to
1254b7b5d25bSGuojia Liao 	 * handle mailbox. Just record the vlan id, and remove it after
1255fe4144d4SJian Shen 	 * reset finished.
1256fe4144d4SJian Shen 	 */
1257b7b5d25bSGuojia Liao 	if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
1258b7b5d25bSGuojia Liao 	     test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
1259fe4144d4SJian Shen 		set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1260fe4144d4SJian Shen 		return -EBUSY;
1261fe4144d4SJian Shen 	}
1262fe4144d4SJian Shen 
1263d3410018SYufeng Mo 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1264d3410018SYufeng Mo 			       HCLGE_MBX_VLAN_FILTER);
1265416eedb6SJie Wang 	vlan_filter = (struct hclge_mbx_vlan_filter *)send_msg.data;
1266416eedb6SJie Wang 	vlan_filter->is_kill = is_kill;
1267416eedb6SJie Wang 	vlan_filter->vlan_id = cpu_to_le16(vlan_id);
1268416eedb6SJie Wang 	vlan_filter->proto = cpu_to_le16(be16_to_cpu(proto));
1269416eedb6SJie Wang 
127046ee7350SGuojia Liao 	/* when remove hw vlan filter failed, record the vlan id,
1271fe4144d4SJian Shen 	 * and try to remove it from hw later, to be consistence
1272fe4144d4SJian Shen 	 * with stack.
1273fe4144d4SJian Shen 	 */
1274d3410018SYufeng Mo 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1275fe4144d4SJian Shen 	if (is_kill && ret)
1276fe4144d4SJian Shen 		set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1277fe4144d4SJian Shen 
1278fe4144d4SJian Shen 	return ret;
1279fe4144d4SJian Shen }
1280fe4144d4SJian Shen 
1281fe4144d4SJian Shen static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
1282fe4144d4SJian Shen {
1283fe4144d4SJian Shen #define HCLGEVF_MAX_SYNC_COUNT	60
1284fe4144d4SJian Shen 	struct hnae3_handle *handle = &hdev->nic;
1285fe4144d4SJian Shen 	int ret, sync_cnt = 0;
1286fe4144d4SJian Shen 	u16 vlan_id;
1287fe4144d4SJian Shen 
1288fe4144d4SJian Shen 	vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
1289fe4144d4SJian Shen 	while (vlan_id != VLAN_N_VID) {
1290fe4144d4SJian Shen 		ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
1291fe4144d4SJian Shen 					      vlan_id, true);
1292fe4144d4SJian Shen 		if (ret)
1293fe4144d4SJian Shen 			return;
1294fe4144d4SJian Shen 
1295fe4144d4SJian Shen 		clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
1296fe4144d4SJian Shen 		sync_cnt++;
1297fe4144d4SJian Shen 		if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
1298fe4144d4SJian Shen 			return;
1299fe4144d4SJian Shen 
1300fe4144d4SJian Shen 		vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
1301fe4144d4SJian Shen 	}
1302e2cb1decSSalil Mehta }
1303e2cb1decSSalil Mehta 
1304b2641e2aSYunsheng Lin static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
1305b2641e2aSYunsheng Lin {
1306b2641e2aSYunsheng Lin 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1307d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
1308b2641e2aSYunsheng Lin 
1309d3410018SYufeng Mo 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1310d3410018SYufeng Mo 			       HCLGE_MBX_VLAN_RX_OFF_CFG);
1311d3410018SYufeng Mo 	send_msg.data[0] = enable ? 1 : 0;
1312d3410018SYufeng Mo 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1313b2641e2aSYunsheng Lin }
1314b2641e2aSYunsheng Lin 
13158fa86551SYufeng Mo static int hclgevf_reset_tqp(struct hnae3_handle *handle)
1316e2cb1decSSalil Mehta {
13178fa86551SYufeng Mo #define HCLGEVF_RESET_ALL_QUEUE_DONE	1U
1318e2cb1decSSalil Mehta 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1319d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
13208fa86551SYufeng Mo 	u8 return_status = 0;
13211a426f8bSPeng Li 	int ret;
13228fa86551SYufeng Mo 	u16 i;
1323e2cb1decSSalil Mehta 
13241a426f8bSPeng Li 	/* disable vf queue before send queue reset msg to PF */
13258fa86551SYufeng Mo 	ret = hclgevf_tqp_enable(handle, false);
13268fa86551SYufeng Mo 	if (ret) {
13278fa86551SYufeng Mo 		dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n",
13288fa86551SYufeng Mo 			ret);
13297fa6be4fSHuazhong Tan 		return ret;
13308fa86551SYufeng Mo 	}
13311a426f8bSPeng Li 
1332d3410018SYufeng Mo 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
13338fa86551SYufeng Mo 
13348fa86551SYufeng Mo 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status,
13358fa86551SYufeng Mo 				   sizeof(return_status));
13368fa86551SYufeng Mo 	if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE)
13378fa86551SYufeng Mo 		return ret;
13388fa86551SYufeng Mo 
13398fa86551SYufeng Mo 	for (i = 1; i < handle->kinfo.num_tqps; i++) {
13408fa86551SYufeng Mo 		hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
1341416eedb6SJie Wang 		*(__le16 *)send_msg.data = cpu_to_le16(i);
13428fa86551SYufeng Mo 		ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
13438fa86551SYufeng Mo 		if (ret)
13448fa86551SYufeng Mo 			return ret;
13458fa86551SYufeng Mo 	}
13468fa86551SYufeng Mo 
13478fa86551SYufeng Mo 	return 0;
1348e2cb1decSSalil Mehta }
1349e2cb1decSSalil Mehta 
1350818f1675SYunsheng Lin static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
1351818f1675SYunsheng Lin {
1352818f1675SYunsheng Lin 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1353416eedb6SJie Wang 	struct hclge_mbx_mtu_info *mtu_info;
1354d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
1355818f1675SYunsheng Lin 
1356d3410018SYufeng Mo 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0);
1357416eedb6SJie Wang 	mtu_info = (struct hclge_mbx_mtu_info *)send_msg.data;
1358416eedb6SJie Wang 	mtu_info->mtu = cpu_to_le32(new_mtu);
1359416eedb6SJie Wang 
1360d3410018SYufeng Mo 	return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1361818f1675SYunsheng Lin }
1362818f1675SYunsheng Lin 
13636988eb2aSSalil Mehta static int hclgevf_notify_client(struct hclgevf_dev *hdev,
13646988eb2aSSalil Mehta 				 enum hnae3_reset_notify_type type)
13656988eb2aSSalil Mehta {
13666988eb2aSSalil Mehta 	struct hnae3_client *client = hdev->nic_client;
13676988eb2aSSalil Mehta 	struct hnae3_handle *handle = &hdev->nic;
13686a5f6fa3SHuazhong Tan 	int ret;
13696988eb2aSSalil Mehta 
137025d1817cSHuazhong Tan 	if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) ||
137125d1817cSHuazhong Tan 	    !client)
137225d1817cSHuazhong Tan 		return 0;
137325d1817cSHuazhong Tan 
13746988eb2aSSalil Mehta 	if (!client->ops->reset_notify)
13756988eb2aSSalil Mehta 		return -EOPNOTSUPP;
13766988eb2aSSalil Mehta 
13776a5f6fa3SHuazhong Tan 	ret = client->ops->reset_notify(handle, type);
13786a5f6fa3SHuazhong Tan 	if (ret)
13796a5f6fa3SHuazhong Tan 		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
13806a5f6fa3SHuazhong Tan 			type, ret);
13816a5f6fa3SHuazhong Tan 
13826a5f6fa3SHuazhong Tan 	return ret;
13836988eb2aSSalil Mehta }
13846988eb2aSSalil Mehta 
1385fe735c84SHuazhong Tan static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
1386fe735c84SHuazhong Tan 				      enum hnae3_reset_notify_type type)
1387fe735c84SHuazhong Tan {
1388fe735c84SHuazhong Tan 	struct hnae3_client *client = hdev->roce_client;
1389fe735c84SHuazhong Tan 	struct hnae3_handle *handle = &hdev->roce;
1390fe735c84SHuazhong Tan 	int ret;
1391fe735c84SHuazhong Tan 
1392fe735c84SHuazhong Tan 	if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client)
1393fe735c84SHuazhong Tan 		return 0;
1394fe735c84SHuazhong Tan 
1395fe735c84SHuazhong Tan 	if (!client->ops->reset_notify)
1396fe735c84SHuazhong Tan 		return -EOPNOTSUPP;
1397fe735c84SHuazhong Tan 
1398fe735c84SHuazhong Tan 	ret = client->ops->reset_notify(handle, type);
1399fe735c84SHuazhong Tan 	if (ret)
1400fe735c84SHuazhong Tan 		dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
1401fe735c84SHuazhong Tan 			type, ret);
1402fe735c84SHuazhong Tan 	return ret;
1403fe735c84SHuazhong Tan }
1404fe735c84SHuazhong Tan 
14056988eb2aSSalil Mehta static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
14066988eb2aSSalil Mehta {
1407aa5c4f17SHuazhong Tan #define HCLGEVF_RESET_WAIT_US	20000
1408aa5c4f17SHuazhong Tan #define HCLGEVF_RESET_WAIT_CNT	2000
1409aa5c4f17SHuazhong Tan #define HCLGEVF_RESET_WAIT_TIMEOUT_US	\
1410aa5c4f17SHuazhong Tan 	(HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
1411aa5c4f17SHuazhong Tan 
1412aa5c4f17SHuazhong Tan 	u32 val;
1413aa5c4f17SHuazhong Tan 	int ret;
14146988eb2aSSalil Mehta 
1415f28368bbSHuazhong Tan 	if (hdev->reset_type == HNAE3_VF_RESET)
1416076bb537SJie Wang 		ret = readl_poll_timeout(hdev->hw.hw.io_base +
141772e2fb07SHuazhong Tan 					 HCLGEVF_VF_RST_ING, val,
141872e2fb07SHuazhong Tan 					 !(val & HCLGEVF_VF_RST_ING_BIT),
141972e2fb07SHuazhong Tan 					 HCLGEVF_RESET_WAIT_US,
142072e2fb07SHuazhong Tan 					 HCLGEVF_RESET_WAIT_TIMEOUT_US);
142172e2fb07SHuazhong Tan 	else
1422076bb537SJie Wang 		ret = readl_poll_timeout(hdev->hw.hw.io_base +
142372e2fb07SHuazhong Tan 					 HCLGEVF_RST_ING, val,
1424aa5c4f17SHuazhong Tan 					 !(val & HCLGEVF_RST_ING_BITS),
1425aa5c4f17SHuazhong Tan 					 HCLGEVF_RESET_WAIT_US,
1426aa5c4f17SHuazhong Tan 					 HCLGEVF_RESET_WAIT_TIMEOUT_US);
14276988eb2aSSalil Mehta 
14286988eb2aSSalil Mehta 	/* hardware completion status should be available by this time */
1429aa5c4f17SHuazhong Tan 	if (ret) {
1430aa5c4f17SHuazhong Tan 		dev_err(&hdev->pdev->dev,
14318912fd6aSColin Ian King 			"couldn't get reset done status from h/w, timeout!\n");
1432aa5c4f17SHuazhong Tan 		return ret;
14336988eb2aSSalil Mehta 	}
14346988eb2aSSalil Mehta 
14356988eb2aSSalil Mehta 	/* we will wait a bit more to let reset of the stack to complete. This
14366988eb2aSSalil Mehta 	 * might happen in case reset assertion was made by PF. Yes, this also
14376988eb2aSSalil Mehta 	 * means we might end up waiting bit more even for VF reset.
14386988eb2aSSalil Mehta 	 */
14396988eb2aSSalil Mehta 	msleep(5000);
14406988eb2aSSalil Mehta 
14416988eb2aSSalil Mehta 	return 0;
14426988eb2aSSalil Mehta }
14436988eb2aSSalil Mehta 
14446b428b4fSHuazhong Tan static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable)
14456b428b4fSHuazhong Tan {
14466b428b4fSHuazhong Tan 	u32 reg_val;
14476b428b4fSHuazhong Tan 
1448cb413bfaSJie Wang 	reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
14496b428b4fSHuazhong Tan 	if (enable)
14506b428b4fSHuazhong Tan 		reg_val |= HCLGEVF_NIC_SW_RST_RDY;
14516b428b4fSHuazhong Tan 	else
14526b428b4fSHuazhong Tan 		reg_val &= ~HCLGEVF_NIC_SW_RST_RDY;
14536b428b4fSHuazhong Tan 
1454cb413bfaSJie Wang 	hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG,
14556b428b4fSHuazhong Tan 			  reg_val);
14566b428b4fSHuazhong Tan }
14576b428b4fSHuazhong Tan 
14586988eb2aSSalil Mehta static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
14596988eb2aSSalil Mehta {
14607a01c897SSalil Mehta 	int ret;
14617a01c897SSalil Mehta 
14626988eb2aSSalil Mehta 	/* uninitialize the nic client */
14636a5f6fa3SHuazhong Tan 	ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
14646a5f6fa3SHuazhong Tan 	if (ret)
14656a5f6fa3SHuazhong Tan 		return ret;
14666988eb2aSSalil Mehta 
14677a01c897SSalil Mehta 	/* re-initialize the hclge device */
14689c6f7085SHuazhong Tan 	ret = hclgevf_reset_hdev(hdev);
14697a01c897SSalil Mehta 	if (ret) {
14707a01c897SSalil Mehta 		dev_err(&hdev->pdev->dev,
14717a01c897SSalil Mehta 			"hclge device re-init failed, VF is disabled!\n");
14727a01c897SSalil Mehta 		return ret;
14737a01c897SSalil Mehta 	}
14746988eb2aSSalil Mehta 
14756988eb2aSSalil Mehta 	/* bring up the nic client again */
14766a5f6fa3SHuazhong Tan 	ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
14776a5f6fa3SHuazhong Tan 	if (ret)
14786a5f6fa3SHuazhong Tan 		return ret;
14796988eb2aSSalil Mehta 
14806b428b4fSHuazhong Tan 	/* clear handshake status with IMP */
14816b428b4fSHuazhong Tan 	hclgevf_reset_handshake(hdev, false);
14826b428b4fSHuazhong Tan 
14831cc9bc6eSHuazhong Tan 	/* bring up the nic to enable TX/RX again */
14841cc9bc6eSHuazhong Tan 	return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
14856988eb2aSSalil Mehta }
14866988eb2aSSalil Mehta 
1487dea846e8SHuazhong Tan static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
1488dea846e8SHuazhong Tan {
1489ada13ee3SHuazhong Tan #define HCLGEVF_RESET_SYNC_TIME 100
1490ada13ee3SHuazhong Tan 
1491f28368bbSHuazhong Tan 	if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
1492d41884eeSHuazhong Tan 		struct hclge_vf_to_pf_msg send_msg;
1493d41884eeSHuazhong Tan 		int ret;
1494d41884eeSHuazhong Tan 
1495d3410018SYufeng Mo 		hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
1496d3410018SYufeng Mo 		ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1497cddd5648SHuazhong Tan 		if (ret) {
1498cddd5648SHuazhong Tan 			dev_err(&hdev->pdev->dev,
1499cddd5648SHuazhong Tan 				"failed to assert VF reset, ret = %d\n", ret);
1500cddd5648SHuazhong Tan 			return ret;
1501cddd5648SHuazhong Tan 		}
1502c88a6e7dSHuazhong Tan 		hdev->rst_stats.vf_func_rst_cnt++;
1503dea846e8SHuazhong Tan 	}
1504dea846e8SHuazhong Tan 
1505076bb537SJie Wang 	set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
1506ada13ee3SHuazhong Tan 	/* inform hardware that preparatory work is done */
1507ada13ee3SHuazhong Tan 	msleep(HCLGEVF_RESET_SYNC_TIME);
15086b428b4fSHuazhong Tan 	hclgevf_reset_handshake(hdev, true);
1509d41884eeSHuazhong Tan 	dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n",
1510d41884eeSHuazhong Tan 		 hdev->reset_type);
1511dea846e8SHuazhong Tan 
1512d41884eeSHuazhong Tan 	return 0;
1513dea846e8SHuazhong Tan }
1514dea846e8SHuazhong Tan 
15153d77d0cbSHuazhong Tan static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
15163d77d0cbSHuazhong Tan {
15173d77d0cbSHuazhong Tan 	dev_info(&hdev->pdev->dev, "VF function reset count: %u\n",
15183d77d0cbSHuazhong Tan 		 hdev->rst_stats.vf_func_rst_cnt);
15193d77d0cbSHuazhong Tan 	dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
15203d77d0cbSHuazhong Tan 		 hdev->rst_stats.flr_rst_cnt);
15213d77d0cbSHuazhong Tan 	dev_info(&hdev->pdev->dev, "VF reset count: %u\n",
15223d77d0cbSHuazhong Tan 		 hdev->rst_stats.vf_rst_cnt);
15233d77d0cbSHuazhong Tan 	dev_info(&hdev->pdev->dev, "reset done count: %u\n",
15243d77d0cbSHuazhong Tan 		 hdev->rst_stats.rst_done_cnt);
15253d77d0cbSHuazhong Tan 	dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
15263d77d0cbSHuazhong Tan 		 hdev->rst_stats.hw_rst_done_cnt);
15273d77d0cbSHuazhong Tan 	dev_info(&hdev->pdev->dev, "reset count: %u\n",
15283d77d0cbSHuazhong Tan 		 hdev->rst_stats.rst_cnt);
15293d77d0cbSHuazhong Tan 	dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
15303d77d0cbSHuazhong Tan 		 hdev->rst_stats.rst_fail_cnt);
15313d77d0cbSHuazhong Tan 	dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
15323d77d0cbSHuazhong Tan 		 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
15333d77d0cbSHuazhong Tan 	dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
1534cb413bfaSJie Wang 		 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG));
15353d77d0cbSHuazhong Tan 	dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
1536cb413bfaSJie Wang 		 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG));
15373d77d0cbSHuazhong Tan 	dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
15383d77d0cbSHuazhong Tan 		 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
15393d77d0cbSHuazhong Tan 	dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
15403d77d0cbSHuazhong Tan }
15413d77d0cbSHuazhong Tan 
1542bbe6540eSHuazhong Tan static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
1543bbe6540eSHuazhong Tan {
15446b428b4fSHuazhong Tan 	/* recover handshake status with IMP when reset fail */
15456b428b4fSHuazhong Tan 	hclgevf_reset_handshake(hdev, true);
1546bbe6540eSHuazhong Tan 	hdev->rst_stats.rst_fail_cnt++;
1547adcf738bSGuojia Liao 	dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n",
1548bbe6540eSHuazhong Tan 		hdev->rst_stats.rst_fail_cnt);
1549bbe6540eSHuazhong Tan 
1550bbe6540eSHuazhong Tan 	if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
1551bbe6540eSHuazhong Tan 		set_bit(hdev->reset_type, &hdev->reset_pending);
1552bbe6540eSHuazhong Tan 
1553bbe6540eSHuazhong Tan 	if (hclgevf_is_reset_pending(hdev)) {
1554bbe6540eSHuazhong Tan 		set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1555bbe6540eSHuazhong Tan 		hclgevf_reset_task_schedule(hdev);
15563d77d0cbSHuazhong Tan 	} else {
1557d5432455SGuojia Liao 		set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
15583d77d0cbSHuazhong Tan 		hclgevf_dump_rst_info(hdev);
1559bbe6540eSHuazhong Tan 	}
1560bbe6540eSHuazhong Tan }
1561bbe6540eSHuazhong Tan 
15621cc9bc6eSHuazhong Tan static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
15636988eb2aSSalil Mehta {
15646988eb2aSSalil Mehta 	int ret;
15656988eb2aSSalil Mehta 
1566c88a6e7dSHuazhong Tan 	hdev->rst_stats.rst_cnt++;
15676988eb2aSSalil Mehta 
1568fe735c84SHuazhong Tan 	/* perform reset of the stack & ae device for a client */
1569fe735c84SHuazhong Tan 	ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
1570fe735c84SHuazhong Tan 	if (ret)
1571fe735c84SHuazhong Tan 		return ret;
1572fe735c84SHuazhong Tan 
15731cc9bc6eSHuazhong Tan 	rtnl_lock();
15746988eb2aSSalil Mehta 	/* bring down the nic to stop any ongoing TX/RX */
15756a5f6fa3SHuazhong Tan 	ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
157629118ab9SHuazhong Tan 	rtnl_unlock();
15776a5f6fa3SHuazhong Tan 	if (ret)
15781cc9bc6eSHuazhong Tan 		return ret;
1579dea846e8SHuazhong Tan 
15801cc9bc6eSHuazhong Tan 	return hclgevf_reset_prepare_wait(hdev);
15816988eb2aSSalil Mehta }
15826988eb2aSSalil Mehta 
15831cc9bc6eSHuazhong Tan static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
15841cc9bc6eSHuazhong Tan {
15851cc9bc6eSHuazhong Tan 	int ret;
15861cc9bc6eSHuazhong Tan 
1587c88a6e7dSHuazhong Tan 	hdev->rst_stats.hw_rst_done_cnt++;
1588fe735c84SHuazhong Tan 	ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
1589fe735c84SHuazhong Tan 	if (ret)
1590fe735c84SHuazhong Tan 		return ret;
1591c88a6e7dSHuazhong Tan 
159229118ab9SHuazhong Tan 	rtnl_lock();
15936988eb2aSSalil Mehta 	/* now, re-initialize the nic client and ae device */
15946988eb2aSSalil Mehta 	ret = hclgevf_reset_stack(hdev);
15951cc9bc6eSHuazhong Tan 	rtnl_unlock();
15966a5f6fa3SHuazhong Tan 	if (ret) {
15976988eb2aSSalil Mehta 		dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
15981cc9bc6eSHuazhong Tan 		return ret;
15996a5f6fa3SHuazhong Tan 	}
16006988eb2aSSalil Mehta 
1601fe735c84SHuazhong Tan 	ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
1602fe735c84SHuazhong Tan 	/* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1
1603fe735c84SHuazhong Tan 	 * times
1604fe735c84SHuazhong Tan 	 */
1605fe735c84SHuazhong Tan 	if (ret &&
1606fe735c84SHuazhong Tan 	    hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1)
1607fe735c84SHuazhong Tan 		return ret;
1608fe735c84SHuazhong Tan 
1609fe735c84SHuazhong Tan 	ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT);
1610fe735c84SHuazhong Tan 	if (ret)
1611fe735c84SHuazhong Tan 		return ret;
1612fe735c84SHuazhong Tan 
1613b644a8d4SHuazhong Tan 	hdev->last_reset_time = jiffies;
1614c88a6e7dSHuazhong Tan 	hdev->rst_stats.rst_done_cnt++;
1615bbe6540eSHuazhong Tan 	hdev->rst_stats.rst_fail_cnt = 0;
1616d5432455SGuojia Liao 	clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
1617b644a8d4SHuazhong Tan 
16181cc9bc6eSHuazhong Tan 	return 0;
16191cc9bc6eSHuazhong Tan }
16201cc9bc6eSHuazhong Tan 
16211cc9bc6eSHuazhong Tan static void hclgevf_reset(struct hclgevf_dev *hdev)
16221cc9bc6eSHuazhong Tan {
16231cc9bc6eSHuazhong Tan 	if (hclgevf_reset_prepare(hdev))
16241cc9bc6eSHuazhong Tan 		goto err_reset;
16251cc9bc6eSHuazhong Tan 
16261cc9bc6eSHuazhong Tan 	/* check if VF could successfully fetch the hardware reset completion
16271cc9bc6eSHuazhong Tan 	 * status from the hardware
16281cc9bc6eSHuazhong Tan 	 */
16291cc9bc6eSHuazhong Tan 	if (hclgevf_reset_wait(hdev)) {
16301cc9bc6eSHuazhong Tan 		/* can't do much in this situation, will disable VF */
16311cc9bc6eSHuazhong Tan 		dev_err(&hdev->pdev->dev,
16321cc9bc6eSHuazhong Tan 			"failed to fetch H/W reset completion status\n");
16331cc9bc6eSHuazhong Tan 		goto err_reset;
16341cc9bc6eSHuazhong Tan 	}
16351cc9bc6eSHuazhong Tan 
16361cc9bc6eSHuazhong Tan 	if (hclgevf_reset_rebuild(hdev))
16371cc9bc6eSHuazhong Tan 		goto err_reset;
16381cc9bc6eSHuazhong Tan 
16391cc9bc6eSHuazhong Tan 	return;
16401cc9bc6eSHuazhong Tan 
16416a5f6fa3SHuazhong Tan err_reset:
1642bbe6540eSHuazhong Tan 	hclgevf_reset_err_handle(hdev);
16436988eb2aSSalil Mehta }
16446988eb2aSSalil Mehta 
1645720bd583SHuazhong Tan static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
1646720bd583SHuazhong Tan 						     unsigned long *addr)
1647720bd583SHuazhong Tan {
1648720bd583SHuazhong Tan 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
1649720bd583SHuazhong Tan 
1650dea846e8SHuazhong Tan 	/* return the highest priority reset level amongst all */
1651b90fcc5bSHuazhong Tan 	if (test_bit(HNAE3_VF_RESET, addr)) {
1652b90fcc5bSHuazhong Tan 		rst_level = HNAE3_VF_RESET;
1653b90fcc5bSHuazhong Tan 		clear_bit(HNAE3_VF_RESET, addr);
1654b90fcc5bSHuazhong Tan 		clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1655b90fcc5bSHuazhong Tan 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1656b90fcc5bSHuazhong Tan 	} else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
1657dea846e8SHuazhong Tan 		rst_level = HNAE3_VF_FULL_RESET;
1658dea846e8SHuazhong Tan 		clear_bit(HNAE3_VF_FULL_RESET, addr);
1659dea846e8SHuazhong Tan 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1660aa5c4f17SHuazhong Tan 	} else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
1661aa5c4f17SHuazhong Tan 		rst_level = HNAE3_VF_PF_FUNC_RESET;
1662aa5c4f17SHuazhong Tan 		clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1663aa5c4f17SHuazhong Tan 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1664dea846e8SHuazhong Tan 	} else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
1665dea846e8SHuazhong Tan 		rst_level = HNAE3_VF_FUNC_RESET;
1666dea846e8SHuazhong Tan 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
16676ff3cf07SHuazhong Tan 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
16686ff3cf07SHuazhong Tan 		rst_level = HNAE3_FLR_RESET;
16696ff3cf07SHuazhong Tan 		clear_bit(HNAE3_FLR_RESET, addr);
1670720bd583SHuazhong Tan 	}
1671720bd583SHuazhong Tan 
1672720bd583SHuazhong Tan 	return rst_level;
1673720bd583SHuazhong Tan }
1674720bd583SHuazhong Tan 
16756ae4e733SShiju Jose static void hclgevf_reset_event(struct pci_dev *pdev,
16766ae4e733SShiju Jose 				struct hnae3_handle *handle)
16776d4c3981SSalil Mehta {
16786ff3cf07SHuazhong Tan 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
16796ff3cf07SHuazhong Tan 	struct hclgevf_dev *hdev = ae_dev->priv;
16806d4c3981SSalil Mehta 
16816d4c3981SSalil Mehta 	dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
16826d4c3981SSalil Mehta 
16836ff3cf07SHuazhong Tan 	if (hdev->default_reset_request)
16840742ed7cSHuazhong Tan 		hdev->reset_level =
1685720bd583SHuazhong Tan 			hclgevf_get_reset_level(hdev,
1686720bd583SHuazhong Tan 						&hdev->default_reset_request);
1687720bd583SHuazhong Tan 	else
1688dea846e8SHuazhong Tan 		hdev->reset_level = HNAE3_VF_FUNC_RESET;
16896d4c3981SSalil Mehta 
1690436667d2SSalil Mehta 	/* reset of this VF requested */
1691436667d2SSalil Mehta 	set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
1692436667d2SSalil Mehta 	hclgevf_reset_task_schedule(hdev);
16936d4c3981SSalil Mehta 
16940742ed7cSHuazhong Tan 	hdev->last_reset_time = jiffies;
16956d4c3981SSalil Mehta }
16966d4c3981SSalil Mehta 
1697720bd583SHuazhong Tan static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
1698720bd583SHuazhong Tan 					  enum hnae3_reset_type rst_type)
1699720bd583SHuazhong Tan {
1700720bd583SHuazhong Tan 	struct hclgevf_dev *hdev = ae_dev->priv;
1701720bd583SHuazhong Tan 
1702720bd583SHuazhong Tan 	set_bit(rst_type, &hdev->default_reset_request);
1703720bd583SHuazhong Tan }
1704720bd583SHuazhong Tan 
1705f28368bbSHuazhong Tan static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
1706f28368bbSHuazhong Tan {
1707f28368bbSHuazhong Tan 	writel(en ? 1 : 0, vector->addr);
1708f28368bbSHuazhong Tan }
1709f28368bbSHuazhong Tan 
1710bb1890d5SJiaran Zhang static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
1711bb1890d5SJiaran Zhang 					  enum hnae3_reset_type rst_type)
17126ff3cf07SHuazhong Tan {
1713bb1890d5SJiaran Zhang #define HCLGEVF_RESET_RETRY_WAIT_MS	500
1714bb1890d5SJiaran Zhang #define HCLGEVF_RESET_RETRY_CNT		5
1715f28368bbSHuazhong Tan 
17166ff3cf07SHuazhong Tan 	struct hclgevf_dev *hdev = ae_dev->priv;
1717f28368bbSHuazhong Tan 	int retry_cnt = 0;
1718f28368bbSHuazhong Tan 	int ret;
17196ff3cf07SHuazhong Tan 
1720ed0e658cSJiaran Zhang 	while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) {
1721f28368bbSHuazhong Tan 		down(&hdev->reset_sem);
1722f28368bbSHuazhong Tan 		set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1723bb1890d5SJiaran Zhang 		hdev->reset_type = rst_type;
1724f28368bbSHuazhong Tan 		ret = hclgevf_reset_prepare(hdev);
1725ed0e658cSJiaran Zhang 		if (!ret && !hdev->reset_pending)
1726ed0e658cSJiaran Zhang 			break;
1727ed0e658cSJiaran Zhang 
17286ff3cf07SHuazhong Tan 		dev_err(&hdev->pdev->dev,
1729ed0e658cSJiaran Zhang 			"failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
1730ed0e658cSJiaran Zhang 			ret, hdev->reset_pending, retry_cnt);
1731f28368bbSHuazhong Tan 		clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1732f28368bbSHuazhong Tan 		up(&hdev->reset_sem);
1733bb1890d5SJiaran Zhang 		msleep(HCLGEVF_RESET_RETRY_WAIT_MS);
1734f28368bbSHuazhong Tan 	}
1735f28368bbSHuazhong Tan 
1736bb1890d5SJiaran Zhang 	/* disable misc vector before reset done */
1737f28368bbSHuazhong Tan 	hclgevf_enable_vector(&hdev->misc_vector, false);
1738bb1890d5SJiaran Zhang 
1739bb1890d5SJiaran Zhang 	if (hdev->reset_type == HNAE3_FLR_RESET)
1740f28368bbSHuazhong Tan 		hdev->rst_stats.flr_rst_cnt++;
1741f28368bbSHuazhong Tan }
1742f28368bbSHuazhong Tan 
1743bb1890d5SJiaran Zhang static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev)
1744f28368bbSHuazhong Tan {
1745f28368bbSHuazhong Tan 	struct hclgevf_dev *hdev = ae_dev->priv;
1746f28368bbSHuazhong Tan 	int ret;
1747f28368bbSHuazhong Tan 
1748f28368bbSHuazhong Tan 	hclgevf_enable_vector(&hdev->misc_vector, true);
1749f28368bbSHuazhong Tan 
1750f28368bbSHuazhong Tan 	ret = hclgevf_reset_rebuild(hdev);
1751f28368bbSHuazhong Tan 	if (ret)
1752f28368bbSHuazhong Tan 		dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n",
1753f28368bbSHuazhong Tan 			 ret);
1754f28368bbSHuazhong Tan 
1755f28368bbSHuazhong Tan 	hdev->reset_type = HNAE3_NONE_RESET;
1756f28368bbSHuazhong Tan 	clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1757f28368bbSHuazhong Tan 	up(&hdev->reset_sem);
17586ff3cf07SHuazhong Tan }
17596ff3cf07SHuazhong Tan 
1760e2cb1decSSalil Mehta static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
1761e2cb1decSSalil Mehta {
1762e2cb1decSSalil Mehta 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1763e2cb1decSSalil Mehta 
1764e2cb1decSSalil Mehta 	return hdev->fw_version;
1765e2cb1decSSalil Mehta }
1766e2cb1decSSalil Mehta 
1767e2cb1decSSalil Mehta static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
1768e2cb1decSSalil Mehta {
1769e2cb1decSSalil Mehta 	struct hclgevf_misc_vector *vector = &hdev->misc_vector;
1770e2cb1decSSalil Mehta 
1771e2cb1decSSalil Mehta 	vector->vector_irq = pci_irq_vector(hdev->pdev,
1772e2cb1decSSalil Mehta 					    HCLGEVF_MISC_VECTOR_NUM);
1773076bb537SJie Wang 	vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
1774e2cb1decSSalil Mehta 	/* vector status always valid for Vector 0 */
1775e2cb1decSSalil Mehta 	hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
1776e2cb1decSSalil Mehta 	hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
1777e2cb1decSSalil Mehta 
1778e2cb1decSSalil Mehta 	hdev->num_msi_left -= 1;
1779e2cb1decSSalil Mehta 	hdev->num_msi_used += 1;
1780e2cb1decSSalil Mehta }
1781e2cb1decSSalil Mehta 
178235a1e503SSalil Mehta void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
178335a1e503SSalil Mehta {
1784ff200099SYunsheng Lin 	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
17850251d196SGuangbin Huang 	    test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) &&
1786ff200099SYunsheng Lin 	    !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
1787ff200099SYunsheng Lin 			      &hdev->state))
17880ea68902SYunsheng Lin 		mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
178935a1e503SSalil Mehta }
179035a1e503SSalil Mehta 
179107a0556aSSalil Mehta void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
1792e2cb1decSSalil Mehta {
1793ff200099SYunsheng Lin 	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
1794ff200099SYunsheng Lin 	    !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED,
1795ff200099SYunsheng Lin 			      &hdev->state))
17960ea68902SYunsheng Lin 		mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
179707a0556aSSalil Mehta }
1798e2cb1decSSalil Mehta 
1799ff200099SYunsheng Lin static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
1800ff200099SYunsheng Lin 				  unsigned long delay)
1801e2cb1decSSalil Mehta {
1802d5432455SGuojia Liao 	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
1803d5432455SGuojia Liao 	    !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
18040ea68902SYunsheng Lin 		mod_delayed_work(hclgevf_wq, &hdev->service_task, delay);
1805e2cb1decSSalil Mehta }
1806e2cb1decSSalil Mehta 
1807ff200099SYunsheng Lin static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
180835a1e503SSalil Mehta {
1809d6ad7c53SGuojia Liao #define	HCLGEVF_MAX_RESET_ATTEMPTS_CNT	3
1810d6ad7c53SGuojia Liao 
1811ff200099SYunsheng Lin 	if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state))
1812ff200099SYunsheng Lin 		return;
1813ff200099SYunsheng Lin 
1814f28368bbSHuazhong Tan 	down(&hdev->reset_sem);
1815f28368bbSHuazhong Tan 	set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
181635a1e503SSalil Mehta 
1817436667d2SSalil Mehta 	if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
1818436667d2SSalil Mehta 			       &hdev->reset_state)) {
1819cd7e963dSSalil Mehta 		/* PF has intimated that it is about to reset the hardware.
18209b2f3477SWeihang Li 		 * We now have to poll & check if hardware has actually
18219b2f3477SWeihang Li 		 * completed the reset sequence. On hardware reset completion,
18229b2f3477SWeihang Li 		 * VF needs to reset the client and ae device.
182335a1e503SSalil Mehta 		 */
1824436667d2SSalil Mehta 		hdev->reset_attempts = 0;
1825436667d2SSalil Mehta 
1826dea846e8SHuazhong Tan 		hdev->last_reset_time = jiffies;
18271385cc81SYufeng Mo 		hdev->reset_type =
18281385cc81SYufeng Mo 			hclgevf_get_reset_level(hdev, &hdev->reset_pending);
18291385cc81SYufeng Mo 		if (hdev->reset_type != HNAE3_NONE_RESET)
18301cc9bc6eSHuazhong Tan 			hclgevf_reset(hdev);
1831436667d2SSalil Mehta 	} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
1832436667d2SSalil Mehta 				      &hdev->reset_state)) {
1833436667d2SSalil Mehta 		/* we could be here when either of below happens:
18349b2f3477SWeihang Li 		 * 1. reset was initiated due to watchdog timeout caused by
1835436667d2SSalil Mehta 		 *    a. IMP was earlier reset and our TX got choked down and
1836436667d2SSalil Mehta 		 *       which resulted in watchdog reacting and inducing VF
1837436667d2SSalil Mehta 		 *       reset. This also means our cmdq would be unreliable.
1838436667d2SSalil Mehta 		 *    b. problem in TX due to other lower layer(example link
1839436667d2SSalil Mehta 		 *       layer not functioning properly etc.)
1840436667d2SSalil Mehta 		 * 2. VF reset might have been initiated due to some config
1841436667d2SSalil Mehta 		 *    change.
1842436667d2SSalil Mehta 		 *
1843436667d2SSalil Mehta 		 * NOTE: Theres no clear way to detect above cases than to react
1844436667d2SSalil Mehta 		 * to the response of PF for this reset request. PF will ack the
1845436667d2SSalil Mehta 		 * 1b and 2. cases but we will not get any intimation about 1a
1846436667d2SSalil Mehta 		 * from PF as cmdq would be in unreliable state i.e. mailbox
1847436667d2SSalil Mehta 		 * communication between PF and VF would be broken.
184846ee7350SGuojia Liao 		 *
184946ee7350SGuojia Liao 		 * if we are never geting into pending state it means either:
1850436667d2SSalil Mehta 		 * 1. PF is not receiving our request which could be due to IMP
1851436667d2SSalil Mehta 		 *    reset
1852436667d2SSalil Mehta 		 * 2. PF is screwed
1853436667d2SSalil Mehta 		 * We cannot do much for 2. but to check first we can try reset
1854436667d2SSalil Mehta 		 * our PCIe + stack and see if it alleviates the problem.
1855436667d2SSalil Mehta 		 */
1856d6ad7c53SGuojia Liao 		if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
1857436667d2SSalil Mehta 			/* prepare for full reset of stack + pcie interface */
1858dea846e8SHuazhong Tan 			set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
1859436667d2SSalil Mehta 
1860436667d2SSalil Mehta 			/* "defer" schedule the reset task again */
1861436667d2SSalil Mehta 			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1862436667d2SSalil Mehta 		} else {
1863436667d2SSalil Mehta 			hdev->reset_attempts++;
1864436667d2SSalil Mehta 
1865dea846e8SHuazhong Tan 			set_bit(hdev->reset_level, &hdev->reset_pending);
1866dea846e8SHuazhong Tan 			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1867436667d2SSalil Mehta 		}
1868dea846e8SHuazhong Tan 		hclgevf_reset_task_schedule(hdev);
1869436667d2SSalil Mehta 	}
187035a1e503SSalil Mehta 
1871afb6afdbSHuazhong Tan 	hdev->reset_type = HNAE3_NONE_RESET;
187235a1e503SSalil Mehta 	clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1873f28368bbSHuazhong Tan 	up(&hdev->reset_sem);
187435a1e503SSalil Mehta }
187535a1e503SSalil Mehta 
1876ff200099SYunsheng Lin static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev)
1877e2cb1decSSalil Mehta {
1878ff200099SYunsheng Lin 	if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
1879ff200099SYunsheng Lin 		return;
1880e2cb1decSSalil Mehta 
1881e2cb1decSSalil Mehta 	if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
1882e2cb1decSSalil Mehta 		return;
1883e2cb1decSSalil Mehta 
188407a0556aSSalil Mehta 	hclgevf_mbx_async_handler(hdev);
1885e2cb1decSSalil Mehta 
1886e2cb1decSSalil Mehta 	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1887e2cb1decSSalil Mehta }
1888e2cb1decSSalil Mehta 
1889ff200099SYunsheng Lin static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
1890a6d818e3SYunsheng Lin {
1891d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
1892a6d818e3SYunsheng Lin 	int ret;
1893a6d818e3SYunsheng Lin 
1894076bb537SJie Wang 	if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
1895c59a85c0SJian Shen 		return;
1896c59a85c0SJian Shen 
1897d3410018SYufeng Mo 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0);
1898d3410018SYufeng Mo 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1899a6d818e3SYunsheng Lin 	if (ret)
1900a6d818e3SYunsheng Lin 		dev_err(&hdev->pdev->dev,
1901a6d818e3SYunsheng Lin 			"VF sends keep alive cmd failed(=%d)\n", ret);
1902a6d818e3SYunsheng Lin }
1903a6d818e3SYunsheng Lin 
1904ff200099SYunsheng Lin static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
1905e2cb1decSSalil Mehta {
1906ff200099SYunsheng Lin 	unsigned long delta = round_jiffies_relative(HZ);
1907ff200099SYunsheng Lin 	struct hnae3_handle *handle = &hdev->nic;
1908e2cb1decSSalil Mehta 
1909e6394363SGuangbin Huang 	if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
1910e6394363SGuangbin Huang 		return;
1911e6394363SGuangbin Huang 
1912ff200099SYunsheng Lin 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
1913ff200099SYunsheng Lin 		delta = jiffies - hdev->last_serv_processed;
1914db01afebSliuzhongzhu 
1915ff200099SYunsheng Lin 		if (delta < round_jiffies_relative(HZ)) {
1916ff200099SYunsheng Lin 			delta = round_jiffies_relative(HZ) - delta;
1917ff200099SYunsheng Lin 			goto out;
1918db01afebSliuzhongzhu 		}
1919ff200099SYunsheng Lin 	}
1920ff200099SYunsheng Lin 
1921ff200099SYunsheng Lin 	hdev->serv_processed_cnt++;
1922ff200099SYunsheng Lin 	if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL))
1923ff200099SYunsheng Lin 		hclgevf_keep_alive(hdev);
1924ff200099SYunsheng Lin 
1925ff200099SYunsheng Lin 	if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) {
1926ff200099SYunsheng Lin 		hdev->last_serv_processed = jiffies;
1927ff200099SYunsheng Lin 		goto out;
1928ff200099SYunsheng Lin 	}
1929ff200099SYunsheng Lin 
1930ff200099SYunsheng Lin 	if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
19314afc310cSJie Wang 		hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
1932e2cb1decSSalil Mehta 
193301305e16SGuangbin Huang 	/* VF does not need to request link status when this bit is set, because
193401305e16SGuangbin Huang 	 * PF will push its link status to VFs when link status changed.
1935e2cb1decSSalil Mehta 	 */
193601305e16SGuangbin Huang 	if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state))
1937e2cb1decSSalil Mehta 		hclgevf_request_link_info(hdev);
1938e2cb1decSSalil Mehta 
19399194d18bSliuzhongzhu 	hclgevf_update_link_mode(hdev);
19409194d18bSliuzhongzhu 
1941fe4144d4SJian Shen 	hclgevf_sync_vlan_filter(hdev);
1942fe4144d4SJian Shen 
1943ee4bcd3bSJian Shen 	hclgevf_sync_mac_table(hdev);
1944ee4bcd3bSJian Shen 
1945c631c696SJian Shen 	hclgevf_sync_promisc_mode(hdev);
1946c631c696SJian Shen 
1947ff200099SYunsheng Lin 	hdev->last_serv_processed = jiffies;
1948436667d2SSalil Mehta 
1949ff200099SYunsheng Lin out:
1950ff200099SYunsheng Lin 	hclgevf_task_schedule(hdev, delta);
1951ff200099SYunsheng Lin }
1952b3c3fe8eSYunsheng Lin 
1953ff200099SYunsheng Lin static void hclgevf_service_task(struct work_struct *work)
1954ff200099SYunsheng Lin {
1955ff200099SYunsheng Lin 	struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev,
1956ff200099SYunsheng Lin 						service_task.work);
1957ff200099SYunsheng Lin 
1958ff200099SYunsheng Lin 	hclgevf_reset_service_task(hdev);
1959ff200099SYunsheng Lin 	hclgevf_mailbox_service_task(hdev);
1960ff200099SYunsheng Lin 	hclgevf_periodic_service_task(hdev);
1961ff200099SYunsheng Lin 
1962ff200099SYunsheng Lin 	/* Handle reset and mbx again in case periodical task delays the
1963ff200099SYunsheng Lin 	 * handling by calling hclgevf_task_schedule() in
1964ff200099SYunsheng Lin 	 * hclgevf_periodic_service_task()
1965ff200099SYunsheng Lin 	 */
1966ff200099SYunsheng Lin 	hclgevf_reset_service_task(hdev);
1967ff200099SYunsheng Lin 	hclgevf_mailbox_service_task(hdev);
1968e2cb1decSSalil Mehta }
1969e2cb1decSSalil Mehta 
1970e2cb1decSSalil Mehta static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
1971e2cb1decSSalil Mehta {
1972cb413bfaSJie Wang 	hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr);
1973e2cb1decSSalil Mehta }
1974e2cb1decSSalil Mehta 
1975b90fcc5bSHuazhong Tan static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
1976b90fcc5bSHuazhong Tan 						      u32 *clearval)
1977e2cb1decSSalil Mehta {
197813050921SHuazhong Tan 	u32 val, cmdq_stat_reg, rst_ing_reg;
1979e2cb1decSSalil Mehta 
1980e2cb1decSSalil Mehta 	/* fetch the events from their corresponding regs */
198113050921SHuazhong Tan 	cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
1982cb413bfaSJie Wang 					 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG);
198313050921SHuazhong Tan 	if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
1984b90fcc5bSHuazhong Tan 		rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
1985b90fcc5bSHuazhong Tan 		dev_info(&hdev->pdev->dev,
1986b90fcc5bSHuazhong Tan 			 "receive reset interrupt 0x%x!\n", rst_ing_reg);
1987b90fcc5bSHuazhong Tan 		set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
1988b90fcc5bSHuazhong Tan 		set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1989076bb537SJie Wang 		set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
199013050921SHuazhong Tan 		*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
1991c88a6e7dSHuazhong Tan 		hdev->rst_stats.vf_rst_cnt++;
199272e2fb07SHuazhong Tan 		/* set up VF hardware reset status, its PF will clear
199372e2fb07SHuazhong Tan 		 * this status when PF has initialized done.
199472e2fb07SHuazhong Tan 		 */
199572e2fb07SHuazhong Tan 		val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING);
199672e2fb07SHuazhong Tan 		hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING,
199772e2fb07SHuazhong Tan 				  val | HCLGEVF_VF_RST_ING_BIT);
1998b90fcc5bSHuazhong Tan 		return HCLGEVF_VECTOR0_EVENT_RST;
1999b90fcc5bSHuazhong Tan 	}
2000b90fcc5bSHuazhong Tan 
2001e2cb1decSSalil Mehta 	/* check for vector0 mailbox(=CMDQ RX) event source */
200213050921SHuazhong Tan 	if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
200313050921SHuazhong Tan 		/* for revision 0x21, clearing interrupt is writing bit 0
200413050921SHuazhong Tan 		 * to the clear register, writing bit 1 means to keep the
200513050921SHuazhong Tan 		 * old value.
200613050921SHuazhong Tan 		 * for revision 0x20, the clear register is a read & write
200713050921SHuazhong Tan 		 * register, so we should just write 0 to the bit we are
200813050921SHuazhong Tan 		 * handling, and keep other bits as cmdq_stat_reg.
200913050921SHuazhong Tan 		 */
2010295ba232SGuangbin Huang 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
201113050921SHuazhong Tan 			*clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
201213050921SHuazhong Tan 		else
201313050921SHuazhong Tan 			*clearval = cmdq_stat_reg &
201413050921SHuazhong Tan 				    ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
201513050921SHuazhong Tan 
2016b90fcc5bSHuazhong Tan 		return HCLGEVF_VECTOR0_EVENT_MBX;
2017e2cb1decSSalil Mehta 	}
2018e2cb1decSSalil Mehta 
2019e45afb39SHuazhong Tan 	/* print other vector0 event source */
2020e45afb39SHuazhong Tan 	dev_info(&hdev->pdev->dev,
2021e45afb39SHuazhong Tan 		 "vector 0 interrupt from unknown source, cmdq_src = %#x\n",
2022e45afb39SHuazhong Tan 		 cmdq_stat_reg);
2023e2cb1decSSalil Mehta 
2024b90fcc5bSHuazhong Tan 	return HCLGEVF_VECTOR0_EVENT_OTHER;
2025e2cb1decSSalil Mehta }
2026e2cb1decSSalil Mehta 
2027e2cb1decSSalil Mehta static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
2028e2cb1decSSalil Mehta {
2029b90fcc5bSHuazhong Tan 	enum hclgevf_evt_cause event_cause;
2030e2cb1decSSalil Mehta 	struct hclgevf_dev *hdev = data;
2031e2cb1decSSalil Mehta 	u32 clearval;
2032e2cb1decSSalil Mehta 
2033e2cb1decSSalil Mehta 	hclgevf_enable_vector(&hdev->misc_vector, false);
2034b90fcc5bSHuazhong Tan 	event_cause = hclgevf_check_evt_cause(hdev, &clearval);
2035427900d2SJiaran Zhang 	if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
2036427900d2SJiaran Zhang 		hclgevf_clear_event_cause(hdev, clearval);
2037e2cb1decSSalil Mehta 
2038b90fcc5bSHuazhong Tan 	switch (event_cause) {
2039b90fcc5bSHuazhong Tan 	case HCLGEVF_VECTOR0_EVENT_RST:
2040b90fcc5bSHuazhong Tan 		hclgevf_reset_task_schedule(hdev);
2041b90fcc5bSHuazhong Tan 		break;
2042b90fcc5bSHuazhong Tan 	case HCLGEVF_VECTOR0_EVENT_MBX:
204307a0556aSSalil Mehta 		hclgevf_mbx_handler(hdev);
2044b90fcc5bSHuazhong Tan 		break;
2045b90fcc5bSHuazhong Tan 	default:
2046b90fcc5bSHuazhong Tan 		break;
2047b90fcc5bSHuazhong Tan 	}
2048e2cb1decSSalil Mehta 
2049e2cb1decSSalil Mehta 	hclgevf_enable_vector(&hdev->misc_vector, true);
2050e2cb1decSSalil Mehta 
2051e2cb1decSSalil Mehta 	return IRQ_HANDLED;
2052e2cb1decSSalil Mehta }
2053e2cb1decSSalil Mehta 
2054e2cb1decSSalil Mehta static int hclgevf_configure(struct hclgevf_dev *hdev)
2055e2cb1decSSalil Mehta {
2056e2cb1decSSalil Mehta 	int ret;
2057e2cb1decSSalil Mehta 
20583462207dSYufeng Mo 	hdev->gro_en = true;
20593462207dSYufeng Mo 
206032e6d104SJian Shen 	ret = hclgevf_get_basic_info(hdev);
206132e6d104SJian Shen 	if (ret)
206232e6d104SJian Shen 		return ret;
206332e6d104SJian Shen 
206492f11ea1SJian Shen 	/* get current port based vlan state from PF */
206592f11ea1SJian Shen 	ret = hclgevf_get_port_base_vlan_filter_state(hdev);
206692f11ea1SJian Shen 	if (ret)
206792f11ea1SJian Shen 		return ret;
206892f11ea1SJian Shen 
2069e2cb1decSSalil Mehta 	/* get queue configuration from PF */
20706cee6fc3SJian Shen 	ret = hclgevf_get_queue_info(hdev);
2071e2cb1decSSalil Mehta 	if (ret)
2072e2cb1decSSalil Mehta 		return ret;
2073c0425944SPeng Li 
2074c0425944SPeng Li 	/* get queue depth info from PF */
2075c0425944SPeng Li 	ret = hclgevf_get_queue_depth(hdev);
2076c0425944SPeng Li 	if (ret)
2077c0425944SPeng Li 		return ret;
2078c0425944SPeng Li 
207932e6d104SJian Shen 	return hclgevf_get_pf_media_type(hdev);
2080e2cb1decSSalil Mehta }
2081e2cb1decSSalil Mehta 
20827a01c897SSalil Mehta static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
20837a01c897SSalil Mehta {
20847a01c897SSalil Mehta 	struct pci_dev *pdev = ae_dev->pdev;
20851154bb26SPeng Li 	struct hclgevf_dev *hdev;
20867a01c897SSalil Mehta 
20877a01c897SSalil Mehta 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
20887a01c897SSalil Mehta 	if (!hdev)
20897a01c897SSalil Mehta 		return -ENOMEM;
20907a01c897SSalil Mehta 
20917a01c897SSalil Mehta 	hdev->pdev = pdev;
20927a01c897SSalil Mehta 	hdev->ae_dev = ae_dev;
20937a01c897SSalil Mehta 	ae_dev->priv = hdev;
20947a01c897SSalil Mehta 
20957a01c897SSalil Mehta 	return 0;
20967a01c897SSalil Mehta }
20977a01c897SSalil Mehta 
2098e2cb1decSSalil Mehta static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
2099e2cb1decSSalil Mehta {
2100e2cb1decSSalil Mehta 	struct hnae3_handle *roce = &hdev->roce;
2101e2cb1decSSalil Mehta 	struct hnae3_handle *nic = &hdev->nic;
2102e2cb1decSSalil Mehta 
210307acf909SJian Shen 	roce->rinfo.num_vectors = hdev->num_roce_msix;
2104e2cb1decSSalil Mehta 
2105e2cb1decSSalil Mehta 	if (hdev->num_msi_left < roce->rinfo.num_vectors ||
2106e2cb1decSSalil Mehta 	    hdev->num_msi_left == 0)
2107e2cb1decSSalil Mehta 		return -EINVAL;
2108e2cb1decSSalil Mehta 
2109beb27ca4SJie Wang 	roce->rinfo.base_vector = hdev->roce_base_msix_offset;
2110e2cb1decSSalil Mehta 
2111e2cb1decSSalil Mehta 	roce->rinfo.netdev = nic->kinfo.netdev;
2112076bb537SJie Wang 	roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
2113076bb537SJie Wang 	roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
2114e2cb1decSSalil Mehta 
2115e2cb1decSSalil Mehta 	roce->pdev = nic->pdev;
2116e2cb1decSSalil Mehta 	roce->ae_algo = nic->ae_algo;
2117e2cb1decSSalil Mehta 	roce->numa_node_mask = nic->numa_node_mask;
2118e2cb1decSSalil Mehta 
2119e2cb1decSSalil Mehta 	return 0;
2120e2cb1decSSalil Mehta }
2121e2cb1decSSalil Mehta 
21223462207dSYufeng Mo static int hclgevf_config_gro(struct hclgevf_dev *hdev)
2123b26a6feaSPeng Li {
2124b26a6feaSPeng Li 	struct hclgevf_cfg_gro_status_cmd *req;
21256befad60SJie Wang 	struct hclge_desc desc;
2126b26a6feaSPeng Li 	int ret;
2127b26a6feaSPeng Li 
2128507e46aeSGuangbin Huang 	if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
2129b26a6feaSPeng Li 		return 0;
2130b26a6feaSPeng Li 
213143710bfeSJie Wang 	hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG,
2132b26a6feaSPeng Li 				     false);
2133b26a6feaSPeng Li 	req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
2134b26a6feaSPeng Li 
21353462207dSYufeng Mo 	req->gro_en = hdev->gro_en ? 1 : 0;
2136b26a6feaSPeng Li 
2137b26a6feaSPeng Li 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
2138b26a6feaSPeng Li 	if (ret)
2139b26a6feaSPeng Li 		dev_err(&hdev->pdev->dev,
2140b26a6feaSPeng Li 			"VF GRO hardware config cmd failed, ret = %d.\n", ret);
2141b26a6feaSPeng Li 
2142b26a6feaSPeng Li 	return ret;
2143b26a6feaSPeng Li }
2144b26a6feaSPeng Li 
2145944de484SGuojia Liao static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
2146944de484SGuojia Liao {
2147027733b1SJie Wang 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
214893969dc1SJie Wang 	u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
214993969dc1SJie Wang 	u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
215093969dc1SJie Wang 	u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
2151944de484SGuojia Liao 	int ret;
2152944de484SGuojia Liao 
2153295ba232SGuangbin Huang 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
21547428d6c9SJie Wang 		ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw,
21557428d6c9SJie Wang 						  rss_cfg->rss_algo,
2156944de484SGuojia Liao 						  rss_cfg->rss_hash_key);
2157944de484SGuojia Liao 		if (ret)
2158944de484SGuojia Liao 			return ret;
2159944de484SGuojia Liao 
21607428d6c9SJie Wang 		ret = hclge_comm_set_rss_input_tuple(&hdev->nic, &hdev->hw.hw,
21617428d6c9SJie Wang 						     false, rss_cfg);
2162944de484SGuojia Liao 		if (ret)
2163944de484SGuojia Liao 			return ret;
2164944de484SGuojia Liao 	}
2165e2cb1decSSalil Mehta 
21667428d6c9SJie Wang 	ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
21677428d6c9SJie Wang 					     rss_cfg->rss_indirection_tbl);
2168e2cb1decSSalil Mehta 	if (ret)
2169e2cb1decSSalil Mehta 		return ret;
2170e2cb1decSSalil Mehta 
217193969dc1SJie Wang 	hclge_comm_get_rss_tc_info(rss_cfg->rss_size, hdev->hw_tc_map,
217293969dc1SJie Wang 				   tc_offset, tc_valid, tc_size);
217393969dc1SJie Wang 
217493969dc1SJie Wang 	return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
217593969dc1SJie Wang 					  tc_valid, tc_size);
2176e2cb1decSSalil Mehta }
2177e2cb1decSSalil Mehta 
2178e2cb1decSSalil Mehta static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
2179e2cb1decSSalil Mehta {
2180bbfd4506SJian Shen 	struct hnae3_handle *nic = &hdev->nic;
2181bbfd4506SJian Shen 	int ret;
2182bbfd4506SJian Shen 
2183bbfd4506SJian Shen 	ret = hclgevf_en_hw_strip_rxvtag(nic, true);
2184bbfd4506SJian Shen 	if (ret) {
2185bbfd4506SJian Shen 		dev_err(&hdev->pdev->dev,
2186bbfd4506SJian Shen 			"failed to enable rx vlan offload, ret = %d\n", ret);
2187bbfd4506SJian Shen 		return ret;
2188bbfd4506SJian Shen 	}
2189bbfd4506SJian Shen 
2190e2cb1decSSalil Mehta 	return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
2191e2cb1decSSalil Mehta 				       false);
2192e2cb1decSSalil Mehta }
2193e2cb1decSSalil Mehta 
2194ff200099SYunsheng Lin static void hclgevf_flush_link_update(struct hclgevf_dev *hdev)
2195ff200099SYunsheng Lin {
2196ff200099SYunsheng Lin #define HCLGEVF_FLUSH_LINK_TIMEOUT	100000
2197ff200099SYunsheng Lin 
2198ff200099SYunsheng Lin 	unsigned long last = hdev->serv_processed_cnt;
2199ff200099SYunsheng Lin 	int i = 0;
2200ff200099SYunsheng Lin 
2201ff200099SYunsheng Lin 	while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) &&
2202ff200099SYunsheng Lin 	       i++ < HCLGEVF_FLUSH_LINK_TIMEOUT &&
2203ff200099SYunsheng Lin 	       last == hdev->serv_processed_cnt)
2204ff200099SYunsheng Lin 		usleep_range(1, 1);
2205ff200099SYunsheng Lin }
2206ff200099SYunsheng Lin 
22078cdb992fSJian Shen static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
22088cdb992fSJian Shen {
22098cdb992fSJian Shen 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
22108cdb992fSJian Shen 
22118cdb992fSJian Shen 	if (enable) {
2212ff200099SYunsheng Lin 		hclgevf_task_schedule(hdev, 0);
22138cdb992fSJian Shen 	} else {
2214b3c3fe8eSYunsheng Lin 		set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2215ff200099SYunsheng Lin 
2216ff200099SYunsheng Lin 		/* flush memory to make sure DOWN is seen by service task */
2217ff200099SYunsheng Lin 		smp_mb__before_atomic();
2218ff200099SYunsheng Lin 		hclgevf_flush_link_update(hdev);
22198cdb992fSJian Shen 	}
22208cdb992fSJian Shen }
22218cdb992fSJian Shen 
2222e2cb1decSSalil Mehta static int hclgevf_ae_start(struct hnae3_handle *handle)
2223e2cb1decSSalil Mehta {
2224e2cb1decSSalil Mehta 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2225e2cb1decSSalil Mehta 
2226ed7bedd2SGuangbin Huang 	clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
222701305e16SGuangbin Huang 	clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state);
2228ed7bedd2SGuangbin Huang 
22294afc310cSJie Wang 	hclge_comm_reset_tqp_stats(handle);
2230e2cb1decSSalil Mehta 
2231e2cb1decSSalil Mehta 	hclgevf_request_link_info(hdev);
2232e2cb1decSSalil Mehta 
22339194d18bSliuzhongzhu 	hclgevf_update_link_mode(hdev);
22349194d18bSliuzhongzhu 
2235e2cb1decSSalil Mehta 	return 0;
2236e2cb1decSSalil Mehta }
2237e2cb1decSSalil Mehta 
2238e2cb1decSSalil Mehta static void hclgevf_ae_stop(struct hnae3_handle *handle)
2239e2cb1decSSalil Mehta {
2240e2cb1decSSalil Mehta 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2241e2cb1decSSalil Mehta 
22422f7e4896SFuyun Liang 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
22432f7e4896SFuyun Liang 
2244146e92c1SHuazhong Tan 	if (hdev->reset_type != HNAE3_VF_RESET)
22458fa86551SYufeng Mo 		hclgevf_reset_tqp(handle);
224639cfbc9cSHuazhong Tan 
22474afc310cSJie Wang 	hclge_comm_reset_tqp_stats(handle);
22488cc6c1f7SFuyun Liang 	hclgevf_update_link_status(hdev, 0);
2249e2cb1decSSalil Mehta }
2250e2cb1decSSalil Mehta 
2251a6d818e3SYunsheng Lin static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
2252a6d818e3SYunsheng Lin {
2253d3410018SYufeng Mo #define HCLGEVF_STATE_ALIVE	1
2254d3410018SYufeng Mo #define HCLGEVF_STATE_NOT_ALIVE	0
2255a6d818e3SYunsheng Lin 
2256d3410018SYufeng Mo 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2257d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
2258d3410018SYufeng Mo 
2259d3410018SYufeng Mo 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0);
2260d3410018SYufeng Mo 	send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE :
2261d3410018SYufeng Mo 				HCLGEVF_STATE_NOT_ALIVE;
2262d3410018SYufeng Mo 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2263a6d818e3SYunsheng Lin }
2264a6d818e3SYunsheng Lin 
2265a6d818e3SYunsheng Lin static int hclgevf_client_start(struct hnae3_handle *handle)
2266a6d818e3SYunsheng Lin {
2267f621df96SQinglang Miao 	return hclgevf_set_alive(handle, true);
2268a6d818e3SYunsheng Lin }
2269a6d818e3SYunsheng Lin 
2270a6d818e3SYunsheng Lin static void hclgevf_client_stop(struct hnae3_handle *handle)
2271a6d818e3SYunsheng Lin {
2272a6d818e3SYunsheng Lin 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2273a6d818e3SYunsheng Lin 	int ret;
2274a6d818e3SYunsheng Lin 
2275a6d818e3SYunsheng Lin 	ret = hclgevf_set_alive(handle, false);
2276a6d818e3SYunsheng Lin 	if (ret)
2277a6d818e3SYunsheng Lin 		dev_warn(&hdev->pdev->dev,
2278a6d818e3SYunsheng Lin 			 "%s failed %d\n", __func__, ret);
2279a6d818e3SYunsheng Lin }
2280a6d818e3SYunsheng Lin 
2281e2cb1decSSalil Mehta static void hclgevf_state_init(struct hclgevf_dev *hdev)
2282e2cb1decSSalil Mehta {
2283e2cb1decSSalil Mehta 	clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
2284e2cb1decSSalil Mehta 	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
2285d5432455SGuojia Liao 	clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
2286e2cb1decSSalil Mehta 
2287b3c3fe8eSYunsheng Lin 	INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
228835a1e503SSalil Mehta 
2289e2cb1decSSalil Mehta 	mutex_init(&hdev->mbx_resp.mbx_mutex);
2290f28368bbSHuazhong Tan 	sema_init(&hdev->reset_sem, 1);
2291e2cb1decSSalil Mehta 
2292ee4bcd3bSJian Shen 	spin_lock_init(&hdev->mac_table.mac_list_lock);
2293ee4bcd3bSJian Shen 	INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list);
2294ee4bcd3bSJian Shen 	INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list);
2295ee4bcd3bSJian Shen 
2296e2cb1decSSalil Mehta 	/* bring the device down */
2297e2cb1decSSalil Mehta 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2298e2cb1decSSalil Mehta }
2299e2cb1decSSalil Mehta 
2300e2cb1decSSalil Mehta static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
2301e2cb1decSSalil Mehta {
2302e2cb1decSSalil Mehta 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2303acfc3d55SHuazhong Tan 	set_bit(HCLGEVF_STATE_REMOVING, &hdev->state);
2304e2cb1decSSalil Mehta 
2305b3c3fe8eSYunsheng Lin 	if (hdev->service_task.work.func)
2306b3c3fe8eSYunsheng Lin 		cancel_delayed_work_sync(&hdev->service_task);
2307e2cb1decSSalil Mehta 
2308e2cb1decSSalil Mehta 	mutex_destroy(&hdev->mbx_resp.mbx_mutex);
2309e2cb1decSSalil Mehta }
2310e2cb1decSSalil Mehta 
2311e2cb1decSSalil Mehta static int hclgevf_init_msi(struct hclgevf_dev *hdev)
2312e2cb1decSSalil Mehta {
2313e2cb1decSSalil Mehta 	struct pci_dev *pdev = hdev->pdev;
2314e2cb1decSSalil Mehta 	int vectors;
2315e2cb1decSSalil Mehta 	int i;
2316e2cb1decSSalil Mehta 
2317580a05f9SYonglong Liu 	if (hnae3_dev_roce_supported(hdev))
231807acf909SJian Shen 		vectors = pci_alloc_irq_vectors(pdev,
231907acf909SJian Shen 						hdev->roce_base_msix_offset + 1,
232007acf909SJian Shen 						hdev->num_msi,
232107acf909SJian Shen 						PCI_IRQ_MSIX);
232207acf909SJian Shen 	else
2323580a05f9SYonglong Liu 		vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2324580a05f9SYonglong Liu 						hdev->num_msi,
2325e2cb1decSSalil Mehta 						PCI_IRQ_MSI | PCI_IRQ_MSIX);
232607acf909SJian Shen 
2327e2cb1decSSalil Mehta 	if (vectors < 0) {
2328e2cb1decSSalil Mehta 		dev_err(&pdev->dev,
2329e2cb1decSSalil Mehta 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2330e2cb1decSSalil Mehta 			vectors);
2331e2cb1decSSalil Mehta 		return vectors;
2332e2cb1decSSalil Mehta 	}
2333e2cb1decSSalil Mehta 	if (vectors < hdev->num_msi)
2334e2cb1decSSalil Mehta 		dev_warn(&hdev->pdev->dev,
2335adcf738bSGuojia Liao 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2336e2cb1decSSalil Mehta 			 hdev->num_msi, vectors);
2337e2cb1decSSalil Mehta 
2338e2cb1decSSalil Mehta 	hdev->num_msi = vectors;
2339e2cb1decSSalil Mehta 	hdev->num_msi_left = vectors;
2340580a05f9SYonglong Liu 
2341e2cb1decSSalil Mehta 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2342e2cb1decSSalil Mehta 					   sizeof(u16), GFP_KERNEL);
2343e2cb1decSSalil Mehta 	if (!hdev->vector_status) {
2344e2cb1decSSalil Mehta 		pci_free_irq_vectors(pdev);
2345e2cb1decSSalil Mehta 		return -ENOMEM;
2346e2cb1decSSalil Mehta 	}
2347e2cb1decSSalil Mehta 
2348e2cb1decSSalil Mehta 	for (i = 0; i < hdev->num_msi; i++)
2349e2cb1decSSalil Mehta 		hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
2350e2cb1decSSalil Mehta 
2351e2cb1decSSalil Mehta 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2352e2cb1decSSalil Mehta 					sizeof(int), GFP_KERNEL);
2353e2cb1decSSalil Mehta 	if (!hdev->vector_irq) {
2354862d969aSHuazhong Tan 		devm_kfree(&pdev->dev, hdev->vector_status);
2355e2cb1decSSalil Mehta 		pci_free_irq_vectors(pdev);
2356e2cb1decSSalil Mehta 		return -ENOMEM;
2357e2cb1decSSalil Mehta 	}
2358e2cb1decSSalil Mehta 
2359e2cb1decSSalil Mehta 	return 0;
2360e2cb1decSSalil Mehta }
2361e2cb1decSSalil Mehta 
2362e2cb1decSSalil Mehta static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
2363e2cb1decSSalil Mehta {
2364e2cb1decSSalil Mehta 	struct pci_dev *pdev = hdev->pdev;
2365e2cb1decSSalil Mehta 
2366862d969aSHuazhong Tan 	devm_kfree(&pdev->dev, hdev->vector_status);
2367862d969aSHuazhong Tan 	devm_kfree(&pdev->dev, hdev->vector_irq);
2368e2cb1decSSalil Mehta 	pci_free_irq_vectors(pdev);
2369e2cb1decSSalil Mehta }
2370e2cb1decSSalil Mehta 
2371e2cb1decSSalil Mehta static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
2372e2cb1decSSalil Mehta {
2373cdd332acSGuojia Liao 	int ret;
2374e2cb1decSSalil Mehta 
2375e2cb1decSSalil Mehta 	hclgevf_get_misc_vector(hdev);
2376e2cb1decSSalil Mehta 
2377f97c4d82SYonglong Liu 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
2378f97c4d82SYonglong Liu 		 HCLGEVF_NAME, pci_name(hdev->pdev));
2379e2cb1decSSalil Mehta 	ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
2380f97c4d82SYonglong Liu 			  0, hdev->misc_vector.name, hdev);
2381e2cb1decSSalil Mehta 	if (ret) {
2382e2cb1decSSalil Mehta 		dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
2383e2cb1decSSalil Mehta 			hdev->misc_vector.vector_irq);
2384e2cb1decSSalil Mehta 		return ret;
2385e2cb1decSSalil Mehta 	}
2386e2cb1decSSalil Mehta 
23871819e409SXi Wang 	hclgevf_clear_event_cause(hdev, 0);
23881819e409SXi Wang 
2389e2cb1decSSalil Mehta 	/* enable misc. vector(vector 0) */
2390e2cb1decSSalil Mehta 	hclgevf_enable_vector(&hdev->misc_vector, true);
2391e2cb1decSSalil Mehta 
2392e2cb1decSSalil Mehta 	return ret;
2393e2cb1decSSalil Mehta }
2394e2cb1decSSalil Mehta 
2395e2cb1decSSalil Mehta static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
2396e2cb1decSSalil Mehta {
2397e2cb1decSSalil Mehta 	/* disable misc vector(vector 0) */
2398e2cb1decSSalil Mehta 	hclgevf_enable_vector(&hdev->misc_vector, false);
23991819e409SXi Wang 	synchronize_irq(hdev->misc_vector.vector_irq);
2400e2cb1decSSalil Mehta 	free_irq(hdev->misc_vector.vector_irq, hdev);
2401e2cb1decSSalil Mehta 	hclgevf_free_vector(hdev, 0);
2402e2cb1decSSalil Mehta }
2403e2cb1decSSalil Mehta 
2404bb87be87SYonglong Liu static void hclgevf_info_show(struct hclgevf_dev *hdev)
2405bb87be87SYonglong Liu {
2406bb87be87SYonglong Liu 	struct device *dev = &hdev->pdev->dev;
2407bb87be87SYonglong Liu 
2408bb87be87SYonglong Liu 	dev_info(dev, "VF info begin:\n");
2409bb87be87SYonglong Liu 
2410adcf738bSGuojia Liao 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
2411adcf738bSGuojia Liao 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
2412adcf738bSGuojia Liao 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
2413adcf738bSGuojia Liao 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
2414adcf738bSGuojia Liao 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
2415adcf738bSGuojia Liao 	dev_info(dev, "PF media type of this VF: %u\n",
2416bb87be87SYonglong Liu 		 hdev->hw.mac.media_type);
2417bb87be87SYonglong Liu 
2418bb87be87SYonglong Liu 	dev_info(dev, "VF info end.\n");
2419bb87be87SYonglong Liu }
2420bb87be87SYonglong Liu 
24211db58f86SHuazhong Tan static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
24221db58f86SHuazhong Tan 					    struct hnae3_client *client)
24231db58f86SHuazhong Tan {
24241db58f86SHuazhong Tan 	struct hclgevf_dev *hdev = ae_dev->priv;
24254cd5beaaSGuangbin Huang 	int rst_cnt = hdev->rst_stats.rst_cnt;
24261db58f86SHuazhong Tan 	int ret;
24271db58f86SHuazhong Tan 
24281db58f86SHuazhong Tan 	ret = client->ops->init_instance(&hdev->nic);
24291db58f86SHuazhong Tan 	if (ret)
24301db58f86SHuazhong Tan 		return ret;
24311db58f86SHuazhong Tan 
24321db58f86SHuazhong Tan 	set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
24334cd5beaaSGuangbin Huang 	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
24344cd5beaaSGuangbin Huang 	    rst_cnt != hdev->rst_stats.rst_cnt) {
24354cd5beaaSGuangbin Huang 		clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
24364cd5beaaSGuangbin Huang 
24374cd5beaaSGuangbin Huang 		client->ops->uninit_instance(&hdev->nic, 0);
24384cd5beaaSGuangbin Huang 		return -EBUSY;
24394cd5beaaSGuangbin Huang 	}
24404cd5beaaSGuangbin Huang 
24411db58f86SHuazhong Tan 	hnae3_set_client_init_flag(client, ae_dev, 1);
24421db58f86SHuazhong Tan 
24431db58f86SHuazhong Tan 	if (netif_msg_drv(&hdev->nic))
24441db58f86SHuazhong Tan 		hclgevf_info_show(hdev);
24451db58f86SHuazhong Tan 
24461db58f86SHuazhong Tan 	return 0;
24471db58f86SHuazhong Tan }
24481db58f86SHuazhong Tan 
24491db58f86SHuazhong Tan static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
24501db58f86SHuazhong Tan 					     struct hnae3_client *client)
24511db58f86SHuazhong Tan {
24521db58f86SHuazhong Tan 	struct hclgevf_dev *hdev = ae_dev->priv;
24531db58f86SHuazhong Tan 	int ret;
24541db58f86SHuazhong Tan 
24551db58f86SHuazhong Tan 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
24561db58f86SHuazhong Tan 	    !hdev->nic_client)
24571db58f86SHuazhong Tan 		return 0;
24581db58f86SHuazhong Tan 
24591db58f86SHuazhong Tan 	ret = hclgevf_init_roce_base_info(hdev);
24601db58f86SHuazhong Tan 	if (ret)
24611db58f86SHuazhong Tan 		return ret;
24621db58f86SHuazhong Tan 
24631db58f86SHuazhong Tan 	ret = client->ops->init_instance(&hdev->roce);
24641db58f86SHuazhong Tan 	if (ret)
24651db58f86SHuazhong Tan 		return ret;
24661db58f86SHuazhong Tan 
2467fe735c84SHuazhong Tan 	set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
24681db58f86SHuazhong Tan 	hnae3_set_client_init_flag(client, ae_dev, 1);
24691db58f86SHuazhong Tan 
24701db58f86SHuazhong Tan 	return 0;
24711db58f86SHuazhong Tan }
24721db58f86SHuazhong Tan 
2473e718a93fSPeng Li static int hclgevf_init_client_instance(struct hnae3_client *client,
2474e718a93fSPeng Li 					struct hnae3_ae_dev *ae_dev)
2475e2cb1decSSalil Mehta {
2476e718a93fSPeng Li 	struct hclgevf_dev *hdev = ae_dev->priv;
2477e2cb1decSSalil Mehta 	int ret;
2478e2cb1decSSalil Mehta 
2479e2cb1decSSalil Mehta 	switch (client->type) {
2480e2cb1decSSalil Mehta 	case HNAE3_CLIENT_KNIC:
2481e2cb1decSSalil Mehta 		hdev->nic_client = client;
2482e2cb1decSSalil Mehta 		hdev->nic.client = client;
2483e2cb1decSSalil Mehta 
24841db58f86SHuazhong Tan 		ret = hclgevf_init_nic_client_instance(ae_dev, client);
2485e2cb1decSSalil Mehta 		if (ret)
248649dd8054SJian Shen 			goto clear_nic;
2487e2cb1decSSalil Mehta 
24881db58f86SHuazhong Tan 		ret = hclgevf_init_roce_client_instance(ae_dev,
24891db58f86SHuazhong Tan 							hdev->roce_client);
2490e2cb1decSSalil Mehta 		if (ret)
249149dd8054SJian Shen 			goto clear_roce;
2492d9f28fc2SJian Shen 
2493e2cb1decSSalil Mehta 		break;
2494e2cb1decSSalil Mehta 	case HNAE3_CLIENT_ROCE:
2495544a7bcdSLijun Ou 		if (hnae3_dev_roce_supported(hdev)) {
2496e2cb1decSSalil Mehta 			hdev->roce_client = client;
2497e2cb1decSSalil Mehta 			hdev->roce.client = client;
2498544a7bcdSLijun Ou 		}
2499e2cb1decSSalil Mehta 
25001db58f86SHuazhong Tan 		ret = hclgevf_init_roce_client_instance(ae_dev, client);
2501e2cb1decSSalil Mehta 		if (ret)
250249dd8054SJian Shen 			goto clear_roce;
2503e2cb1decSSalil Mehta 
2504fa7a4bd5SJian Shen 		break;
2505fa7a4bd5SJian Shen 	default:
2506fa7a4bd5SJian Shen 		return -EINVAL;
2507e2cb1decSSalil Mehta 	}
2508e2cb1decSSalil Mehta 
2509e2cb1decSSalil Mehta 	return 0;
251049dd8054SJian Shen 
251149dd8054SJian Shen clear_nic:
251249dd8054SJian Shen 	hdev->nic_client = NULL;
251349dd8054SJian Shen 	hdev->nic.client = NULL;
251449dd8054SJian Shen 	return ret;
251549dd8054SJian Shen clear_roce:
251649dd8054SJian Shen 	hdev->roce_client = NULL;
251749dd8054SJian Shen 	hdev->roce.client = NULL;
251849dd8054SJian Shen 	return ret;
2519e2cb1decSSalil Mehta }
2520e2cb1decSSalil Mehta 
2521e718a93fSPeng Li static void hclgevf_uninit_client_instance(struct hnae3_client *client,
2522e718a93fSPeng Li 					   struct hnae3_ae_dev *ae_dev)
2523e2cb1decSSalil Mehta {
2524e718a93fSPeng Li 	struct hclgevf_dev *hdev = ae_dev->priv;
2525e718a93fSPeng Li 
2526e2cb1decSSalil Mehta 	/* un-init roce, if it exists */
252749dd8054SJian Shen 	if (hdev->roce_client) {
2528e140c798SYufeng Mo 		while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
2529e140c798SYufeng Mo 			msleep(HCLGEVF_WAIT_RESET_DONE);
2530fe735c84SHuazhong Tan 		clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
2531e140c798SYufeng Mo 
2532e2cb1decSSalil Mehta 		hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
253349dd8054SJian Shen 		hdev->roce_client = NULL;
253449dd8054SJian Shen 		hdev->roce.client = NULL;
253549dd8054SJian Shen 	}
2536e2cb1decSSalil Mehta 
2537e2cb1decSSalil Mehta 	/* un-init nic/unic, if this was not called by roce client */
253849dd8054SJian Shen 	if (client->ops->uninit_instance && hdev->nic_client &&
253949dd8054SJian Shen 	    client->type != HNAE3_CLIENT_ROCE) {
2540e140c798SYufeng Mo 		while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
2541e140c798SYufeng Mo 			msleep(HCLGEVF_WAIT_RESET_DONE);
254225d1817cSHuazhong Tan 		clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
254325d1817cSHuazhong Tan 
2544e2cb1decSSalil Mehta 		client->ops->uninit_instance(&hdev->nic, 0);
254549dd8054SJian Shen 		hdev->nic_client = NULL;
254649dd8054SJian Shen 		hdev->nic.client = NULL;
254749dd8054SJian Shen 	}
2548e2cb1decSSalil Mehta }
2549e2cb1decSSalil Mehta 
255030ae7f8aSHuazhong Tan static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev)
255130ae7f8aSHuazhong Tan {
255230ae7f8aSHuazhong Tan 	struct pci_dev *pdev = hdev->pdev;
255330ae7f8aSHuazhong Tan 	struct hclgevf_hw *hw = &hdev->hw;
255430ae7f8aSHuazhong Tan 
255530ae7f8aSHuazhong Tan 	/* for device does not have device memory, return directly */
255630ae7f8aSHuazhong Tan 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR)))
255730ae7f8aSHuazhong Tan 		return 0;
255830ae7f8aSHuazhong Tan 
2559076bb537SJie Wang 	hw->hw.mem_base =
2560076bb537SJie Wang 		devm_ioremap_wc(&pdev->dev,
2561076bb537SJie Wang 				pci_resource_start(pdev, HCLGEVF_MEM_BAR),
256230ae7f8aSHuazhong Tan 				pci_resource_len(pdev, HCLGEVF_MEM_BAR));
2563076bb537SJie Wang 	if (!hw->hw.mem_base) {
2564be419fcaSColin Ian King 		dev_err(&pdev->dev, "failed to map device memory\n");
256530ae7f8aSHuazhong Tan 		return -EFAULT;
256630ae7f8aSHuazhong Tan 	}
256730ae7f8aSHuazhong Tan 
256830ae7f8aSHuazhong Tan 	return 0;
256930ae7f8aSHuazhong Tan }
257030ae7f8aSHuazhong Tan 
2571e2cb1decSSalil Mehta static int hclgevf_pci_init(struct hclgevf_dev *hdev)
2572e2cb1decSSalil Mehta {
2573e2cb1decSSalil Mehta 	struct pci_dev *pdev = hdev->pdev;
2574e2cb1decSSalil Mehta 	struct hclgevf_hw *hw;
2575e2cb1decSSalil Mehta 	int ret;
2576e2cb1decSSalil Mehta 
2577e2cb1decSSalil Mehta 	ret = pci_enable_device(pdev);
2578e2cb1decSSalil Mehta 	if (ret) {
2579e2cb1decSSalil Mehta 		dev_err(&pdev->dev, "failed to enable PCI device\n");
25803e249d3bSFuyun Liang 		return ret;
2581e2cb1decSSalil Mehta 	}
2582e2cb1decSSalil Mehta 
2583e2cb1decSSalil Mehta 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2584e2cb1decSSalil Mehta 	if (ret) {
2585e2cb1decSSalil Mehta 		dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
2586e2cb1decSSalil Mehta 		goto err_disable_device;
2587e2cb1decSSalil Mehta 	}
2588e2cb1decSSalil Mehta 
2589e2cb1decSSalil Mehta 	ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
2590e2cb1decSSalil Mehta 	if (ret) {
2591e2cb1decSSalil Mehta 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
2592e2cb1decSSalil Mehta 		goto err_disable_device;
2593e2cb1decSSalil Mehta 	}
2594e2cb1decSSalil Mehta 
2595e2cb1decSSalil Mehta 	pci_set_master(pdev);
2596e2cb1decSSalil Mehta 	hw = &hdev->hw;
2597076bb537SJie Wang 	hw->hw.io_base = pci_iomap(pdev, 2, 0);
2598076bb537SJie Wang 	if (!hw->hw.io_base) {
2599e2cb1decSSalil Mehta 		dev_err(&pdev->dev, "can't map configuration register space\n");
2600e2cb1decSSalil Mehta 		ret = -ENOMEM;
2601e2cb1decSSalil Mehta 		goto err_clr_master;
2602e2cb1decSSalil Mehta 	}
2603e2cb1decSSalil Mehta 
260430ae7f8aSHuazhong Tan 	ret = hclgevf_dev_mem_map(hdev);
260530ae7f8aSHuazhong Tan 	if (ret)
260630ae7f8aSHuazhong Tan 		goto err_unmap_io_base;
260730ae7f8aSHuazhong Tan 
2608e2cb1decSSalil Mehta 	return 0;
2609e2cb1decSSalil Mehta 
261030ae7f8aSHuazhong Tan err_unmap_io_base:
2611076bb537SJie Wang 	pci_iounmap(pdev, hdev->hw.hw.io_base);
2612e2cb1decSSalil Mehta err_clr_master:
2613e2cb1decSSalil Mehta 	pci_clear_master(pdev);
2614e2cb1decSSalil Mehta 	pci_release_regions(pdev);
2615e2cb1decSSalil Mehta err_disable_device:
2616e2cb1decSSalil Mehta 	pci_disable_device(pdev);
26173e249d3bSFuyun Liang 
2618e2cb1decSSalil Mehta 	return ret;
2619e2cb1decSSalil Mehta }
2620e2cb1decSSalil Mehta 
2621e2cb1decSSalil Mehta static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
2622e2cb1decSSalil Mehta {
2623e2cb1decSSalil Mehta 	struct pci_dev *pdev = hdev->pdev;
2624e2cb1decSSalil Mehta 
2625076bb537SJie Wang 	if (hdev->hw.hw.mem_base)
2626076bb537SJie Wang 		devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
262730ae7f8aSHuazhong Tan 
2628076bb537SJie Wang 	pci_iounmap(pdev, hdev->hw.hw.io_base);
2629e2cb1decSSalil Mehta 	pci_clear_master(pdev);
2630e2cb1decSSalil Mehta 	pci_release_regions(pdev);
2631e2cb1decSSalil Mehta 	pci_disable_device(pdev);
2632e2cb1decSSalil Mehta }
2633e2cb1decSSalil Mehta 
263407acf909SJian Shen static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
263507acf909SJian Shen {
263607acf909SJian Shen 	struct hclgevf_query_res_cmd *req;
26376befad60SJie Wang 	struct hclge_desc desc;
263807acf909SJian Shen 	int ret;
263907acf909SJian Shen 
264043710bfeSJie Wang 	hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RSRC, true);
264107acf909SJian Shen 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
264207acf909SJian Shen 	if (ret) {
264307acf909SJian Shen 		dev_err(&hdev->pdev->dev,
264407acf909SJian Shen 			"query vf resource failed, ret = %d.\n", ret);
264507acf909SJian Shen 		return ret;
264607acf909SJian Shen 	}
264707acf909SJian Shen 
264807acf909SJian Shen 	req = (struct hclgevf_query_res_cmd *)desc.data;
264907acf909SJian Shen 
2650580a05f9SYonglong Liu 	if (hnae3_dev_roce_supported(hdev)) {
265107acf909SJian Shen 		hdev->roce_base_msix_offset =
265260df7e91SHuazhong Tan 		hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
265307acf909SJian Shen 				HCLGEVF_MSIX_OFT_ROCEE_M,
265407acf909SJian Shen 				HCLGEVF_MSIX_OFT_ROCEE_S);
265507acf909SJian Shen 		hdev->num_roce_msix =
265660df7e91SHuazhong Tan 		hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
265707acf909SJian Shen 				HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
265807acf909SJian Shen 
2659580a05f9SYonglong Liu 		/* nic's msix numbers is always equals to the roce's. */
2660580a05f9SYonglong Liu 		hdev->num_nic_msix = hdev->num_roce_msix;
2661580a05f9SYonglong Liu 
266207acf909SJian Shen 		/* VF should have NIC vectors and Roce vectors, NIC vectors
266307acf909SJian Shen 		 * are queued before Roce vectors. The offset is fixed to 64.
266407acf909SJian Shen 		 */
266507acf909SJian Shen 		hdev->num_msi = hdev->num_roce_msix +
266607acf909SJian Shen 				hdev->roce_base_msix_offset;
266707acf909SJian Shen 	} else {
266807acf909SJian Shen 		hdev->num_msi =
266960df7e91SHuazhong Tan 		hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
267007acf909SJian Shen 				HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2671580a05f9SYonglong Liu 
2672580a05f9SYonglong Liu 		hdev->num_nic_msix = hdev->num_msi;
2673580a05f9SYonglong Liu 	}
2674580a05f9SYonglong Liu 
2675580a05f9SYonglong Liu 	if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) {
2676580a05f9SYonglong Liu 		dev_err(&hdev->pdev->dev,
2677580a05f9SYonglong Liu 			"Just %u msi resources, not enough for vf(min:2).\n",
2678580a05f9SYonglong Liu 			hdev->num_nic_msix);
2679580a05f9SYonglong Liu 		return -EINVAL;
268007acf909SJian Shen 	}
268107acf909SJian Shen 
268207acf909SJian Shen 	return 0;
268307acf909SJian Shen }
268407acf909SJian Shen 
2685af2aedc5SGuangbin Huang static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
2686af2aedc5SGuangbin Huang {
2687af2aedc5SGuangbin Huang #define HCLGEVF_MAX_NON_TSO_BD_NUM			8U
2688af2aedc5SGuangbin Huang 
2689af2aedc5SGuangbin Huang 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2690af2aedc5SGuangbin Huang 
2691af2aedc5SGuangbin Huang 	ae_dev->dev_specs.max_non_tso_bd_num =
2692af2aedc5SGuangbin Huang 					HCLGEVF_MAX_NON_TSO_BD_NUM;
2693af2aedc5SGuangbin Huang 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
26947428d6c9SJie Wang 	ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
2695ab16b49cSHuazhong Tan 	ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
2696e070c8b9SYufeng Mo 	ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME;
2697af2aedc5SGuangbin Huang }
2698af2aedc5SGuangbin Huang 
2699af2aedc5SGuangbin Huang static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
27006befad60SJie Wang 				    struct hclge_desc *desc)
2701af2aedc5SGuangbin Huang {
2702af2aedc5SGuangbin Huang 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2703af2aedc5SGuangbin Huang 	struct hclgevf_dev_specs_0_cmd *req0;
2704ab16b49cSHuazhong Tan 	struct hclgevf_dev_specs_1_cmd *req1;
2705af2aedc5SGuangbin Huang 
2706af2aedc5SGuangbin Huang 	req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data;
2707ab16b49cSHuazhong Tan 	req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data;
2708af2aedc5SGuangbin Huang 
2709af2aedc5SGuangbin Huang 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
2710af2aedc5SGuangbin Huang 	ae_dev->dev_specs.rss_ind_tbl_size =
2711af2aedc5SGuangbin Huang 					le16_to_cpu(req0->rss_ind_tbl_size);
271291bfae25SHuazhong Tan 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
2713af2aedc5SGuangbin Huang 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
2714ab16b49cSHuazhong Tan 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
2715e070c8b9SYufeng Mo 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
2716af2aedc5SGuangbin Huang }
2717af2aedc5SGuangbin Huang 
271813297028SGuangbin Huang static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
271913297028SGuangbin Huang {
272013297028SGuangbin Huang 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
272113297028SGuangbin Huang 
272213297028SGuangbin Huang 	if (!dev_specs->max_non_tso_bd_num)
272313297028SGuangbin Huang 		dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM;
272413297028SGuangbin Huang 	if (!dev_specs->rss_ind_tbl_size)
272513297028SGuangbin Huang 		dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
272613297028SGuangbin Huang 	if (!dev_specs->rss_key_size)
27277428d6c9SJie Wang 		dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
2728ab16b49cSHuazhong Tan 	if (!dev_specs->max_int_gl)
2729ab16b49cSHuazhong Tan 		dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
2730e070c8b9SYufeng Mo 	if (!dev_specs->max_frm_size)
2731e070c8b9SYufeng Mo 		dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME;
273213297028SGuangbin Huang }
273313297028SGuangbin Huang 
2734af2aedc5SGuangbin Huang static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
2735af2aedc5SGuangbin Huang {
27366befad60SJie Wang 	struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
2737af2aedc5SGuangbin Huang 	int ret;
2738af2aedc5SGuangbin Huang 	int i;
2739af2aedc5SGuangbin Huang 
2740af2aedc5SGuangbin Huang 	/* set default specifications as devices lower than version V3 do not
2741af2aedc5SGuangbin Huang 	 * support querying specifications from firmware.
2742af2aedc5SGuangbin Huang 	 */
2743af2aedc5SGuangbin Huang 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
2744af2aedc5SGuangbin Huang 		hclgevf_set_default_dev_specs(hdev);
2745af2aedc5SGuangbin Huang 		return 0;
2746af2aedc5SGuangbin Huang 	}
2747af2aedc5SGuangbin Huang 
2748af2aedc5SGuangbin Huang 	for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
2749af2aedc5SGuangbin Huang 		hclgevf_cmd_setup_basic_desc(&desc[i],
275043710bfeSJie Wang 					     HCLGE_OPC_QUERY_DEV_SPECS, true);
2751cb413bfaSJie Wang 		desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2752af2aedc5SGuangbin Huang 	}
275343710bfeSJie Wang 	hclgevf_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
2754af2aedc5SGuangbin Huang 
2755af2aedc5SGuangbin Huang 	ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM);
2756af2aedc5SGuangbin Huang 	if (ret)
2757af2aedc5SGuangbin Huang 		return ret;
2758af2aedc5SGuangbin Huang 
2759af2aedc5SGuangbin Huang 	hclgevf_parse_dev_specs(hdev, desc);
276013297028SGuangbin Huang 	hclgevf_check_dev_specs(hdev);
2761af2aedc5SGuangbin Huang 
2762af2aedc5SGuangbin Huang 	return 0;
2763af2aedc5SGuangbin Huang }
2764af2aedc5SGuangbin Huang 
2765862d969aSHuazhong Tan static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
2766862d969aSHuazhong Tan {
2767862d969aSHuazhong Tan 	struct pci_dev *pdev = hdev->pdev;
2768862d969aSHuazhong Tan 	int ret = 0;
2769862d969aSHuazhong Tan 
277009e6b30eSJie Wang 	if ((hdev->reset_type == HNAE3_VF_FULL_RESET ||
277109e6b30eSJie Wang 	     hdev->reset_type == HNAE3_FLR_RESET) &&
2772862d969aSHuazhong Tan 	    test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2773862d969aSHuazhong Tan 		hclgevf_misc_irq_uninit(hdev);
2774862d969aSHuazhong Tan 		hclgevf_uninit_msi(hdev);
2775862d969aSHuazhong Tan 		clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2776862d969aSHuazhong Tan 	}
2777862d969aSHuazhong Tan 
2778862d969aSHuazhong Tan 	if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2779862d969aSHuazhong Tan 		pci_set_master(pdev);
2780862d969aSHuazhong Tan 		ret = hclgevf_init_msi(hdev);
2781862d969aSHuazhong Tan 		if (ret) {
2782862d969aSHuazhong Tan 			dev_err(&pdev->dev,
2783862d969aSHuazhong Tan 				"failed(%d) to init MSI/MSI-X\n", ret);
2784862d969aSHuazhong Tan 			return ret;
2785862d969aSHuazhong Tan 		}
2786862d969aSHuazhong Tan 
2787862d969aSHuazhong Tan 		ret = hclgevf_misc_irq_init(hdev);
2788862d969aSHuazhong Tan 		if (ret) {
2789862d969aSHuazhong Tan 			hclgevf_uninit_msi(hdev);
2790862d969aSHuazhong Tan 			dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
2791862d969aSHuazhong Tan 				ret);
2792862d969aSHuazhong Tan 			return ret;
2793862d969aSHuazhong Tan 		}
2794862d969aSHuazhong Tan 
2795862d969aSHuazhong Tan 		set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2796862d969aSHuazhong Tan 	}
2797862d969aSHuazhong Tan 
2798862d969aSHuazhong Tan 	return ret;
2799862d969aSHuazhong Tan }
2800862d969aSHuazhong Tan 
2801039ba863SJian Shen static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev)
2802039ba863SJian Shen {
2803039ba863SJian Shen 	struct hclge_vf_to_pf_msg send_msg;
2804039ba863SJian Shen 
2805039ba863SJian Shen 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL,
2806039ba863SJian Shen 			       HCLGE_MBX_VPORT_LIST_CLEAR);
2807039ba863SJian Shen 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2808039ba863SJian Shen }
2809039ba863SJian Shen 
281079664077SHuazhong Tan static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev)
281179664077SHuazhong Tan {
281279664077SHuazhong Tan 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
281379664077SHuazhong Tan 		hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1);
281479664077SHuazhong Tan }
281579664077SHuazhong Tan 
281679664077SHuazhong Tan static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev)
281779664077SHuazhong Tan {
281879664077SHuazhong Tan 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
281979664077SHuazhong Tan 		hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0);
282079664077SHuazhong Tan }
282179664077SHuazhong Tan 
28229c6f7085SHuazhong Tan static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
2823e2cb1decSSalil Mehta {
28247a01c897SSalil Mehta 	struct pci_dev *pdev = hdev->pdev;
2825e2cb1decSSalil Mehta 	int ret;
2826e2cb1decSSalil Mehta 
2827862d969aSHuazhong Tan 	ret = hclgevf_pci_reset(hdev);
2828862d969aSHuazhong Tan 	if (ret) {
2829862d969aSHuazhong Tan 		dev_err(&pdev->dev, "pci reset failed %d\n", ret);
2830862d969aSHuazhong Tan 		return ret;
2831862d969aSHuazhong Tan 	}
2832862d969aSHuazhong Tan 
2833cb413bfaSJie Wang 	hclgevf_arq_init(hdev);
2834cb413bfaSJie Wang 	ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
2835cb413bfaSJie Wang 				  &hdev->fw_version, false,
2836cb413bfaSJie Wang 				  hdev->reset_pending);
28379c6f7085SHuazhong Tan 	if (ret) {
28389c6f7085SHuazhong Tan 		dev_err(&pdev->dev, "cmd failed %d\n", ret);
28399c6f7085SHuazhong Tan 		return ret;
28407a01c897SSalil Mehta 	}
2841e2cb1decSSalil Mehta 
28429c6f7085SHuazhong Tan 	ret = hclgevf_rss_init_hw(hdev);
28439c6f7085SHuazhong Tan 	if (ret) {
28449c6f7085SHuazhong Tan 		dev_err(&hdev->pdev->dev,
28459c6f7085SHuazhong Tan 			"failed(%d) to initialize RSS\n", ret);
28469c6f7085SHuazhong Tan 		return ret;
28479c6f7085SHuazhong Tan 	}
28489c6f7085SHuazhong Tan 
28493462207dSYufeng Mo 	ret = hclgevf_config_gro(hdev);
2850b26a6feaSPeng Li 	if (ret)
2851b26a6feaSPeng Li 		return ret;
2852b26a6feaSPeng Li 
28539c6f7085SHuazhong Tan 	ret = hclgevf_init_vlan_config(hdev);
28549c6f7085SHuazhong Tan 	if (ret) {
28559c6f7085SHuazhong Tan 		dev_err(&hdev->pdev->dev,
28569c6f7085SHuazhong Tan 			"failed(%d) to initialize VLAN config\n", ret);
28579c6f7085SHuazhong Tan 		return ret;
28589c6f7085SHuazhong Tan 	}
28599c6f7085SHuazhong Tan 
2860190cd8a7SJian Shen 	/* get current port based vlan state from PF */
2861190cd8a7SJian Shen 	ret = hclgevf_get_port_base_vlan_filter_state(hdev);
2862190cd8a7SJian Shen 	if (ret)
2863190cd8a7SJian Shen 		return ret;
2864190cd8a7SJian Shen 
2865c631c696SJian Shen 	set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
2866c631c696SJian Shen 
286779664077SHuazhong Tan 	hclgevf_init_rxd_adv_layout(hdev);
286879664077SHuazhong Tan 
28699c6f7085SHuazhong Tan 	dev_info(&hdev->pdev->dev, "Reset done\n");
28709c6f7085SHuazhong Tan 
28719c6f7085SHuazhong Tan 	return 0;
28729c6f7085SHuazhong Tan }
28739c6f7085SHuazhong Tan 
28749c6f7085SHuazhong Tan static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
28759c6f7085SHuazhong Tan {
28769c6f7085SHuazhong Tan 	struct pci_dev *pdev = hdev->pdev;
28779c6f7085SHuazhong Tan 	int ret;
28789c6f7085SHuazhong Tan 
2879e2cb1decSSalil Mehta 	ret = hclgevf_pci_init(hdev);
288060df7e91SHuazhong Tan 	if (ret)
2881e2cb1decSSalil Mehta 		return ret;
2882e2cb1decSSalil Mehta 
2883cd624299SYufeng Mo 	ret = hclgevf_devlink_init(hdev);
2884cd624299SYufeng Mo 	if (ret)
2885cd624299SYufeng Mo 		goto err_devlink_init;
2886cd624299SYufeng Mo 
2887cb413bfaSJie Wang 	ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
288860df7e91SHuazhong Tan 	if (ret)
28898b0195a3SHuazhong Tan 		goto err_cmd_queue_init;
28908b0195a3SHuazhong Tan 
2891cb413bfaSJie Wang 	hclgevf_arq_init(hdev);
2892cb413bfaSJie Wang 	ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
2893cb413bfaSJie Wang 				  &hdev->fw_version, false,
2894cb413bfaSJie Wang 				  hdev->reset_pending);
2895eddf0462SYunsheng Lin 	if (ret)
2896eddf0462SYunsheng Lin 		goto err_cmd_init;
2897eddf0462SYunsheng Lin 
289807acf909SJian Shen 	/* Get vf resource */
289907acf909SJian Shen 	ret = hclgevf_query_vf_resource(hdev);
290060df7e91SHuazhong Tan 	if (ret)
29018b0195a3SHuazhong Tan 		goto err_cmd_init;
290207acf909SJian Shen 
2903af2aedc5SGuangbin Huang 	ret = hclgevf_query_dev_specs(hdev);
2904af2aedc5SGuangbin Huang 	if (ret) {
2905af2aedc5SGuangbin Huang 		dev_err(&pdev->dev,
2906af2aedc5SGuangbin Huang 			"failed to query dev specifications, ret = %d\n", ret);
2907af2aedc5SGuangbin Huang 		goto err_cmd_init;
2908af2aedc5SGuangbin Huang 	}
2909af2aedc5SGuangbin Huang 
291007acf909SJian Shen 	ret = hclgevf_init_msi(hdev);
291107acf909SJian Shen 	if (ret) {
291207acf909SJian Shen 		dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
29138b0195a3SHuazhong Tan 		goto err_cmd_init;
291407acf909SJian Shen 	}
291507acf909SJian Shen 
291607acf909SJian Shen 	hclgevf_state_init(hdev);
2917dea846e8SHuazhong Tan 	hdev->reset_level = HNAE3_VF_FUNC_RESET;
2918afb6afdbSHuazhong Tan 	hdev->reset_type = HNAE3_NONE_RESET;
291907acf909SJian Shen 
2920e2cb1decSSalil Mehta 	ret = hclgevf_misc_irq_init(hdev);
292160df7e91SHuazhong Tan 	if (ret)
2922e2cb1decSSalil Mehta 		goto err_misc_irq_init;
2923e2cb1decSSalil Mehta 
2924862d969aSHuazhong Tan 	set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2925862d969aSHuazhong Tan 
2926e2cb1decSSalil Mehta 	ret = hclgevf_configure(hdev);
2927e2cb1decSSalil Mehta 	if (ret) {
2928e2cb1decSSalil Mehta 		dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
2929e2cb1decSSalil Mehta 		goto err_config;
2930e2cb1decSSalil Mehta 	}
2931e2cb1decSSalil Mehta 
2932e2cb1decSSalil Mehta 	ret = hclgevf_alloc_tqps(hdev);
2933e2cb1decSSalil Mehta 	if (ret) {
2934e2cb1decSSalil Mehta 		dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
2935e2cb1decSSalil Mehta 		goto err_config;
2936e2cb1decSSalil Mehta 	}
2937e2cb1decSSalil Mehta 
2938e2cb1decSSalil Mehta 	ret = hclgevf_set_handle_info(hdev);
293960df7e91SHuazhong Tan 	if (ret)
2940e2cb1decSSalil Mehta 		goto err_config;
2941e2cb1decSSalil Mehta 
29423462207dSYufeng Mo 	ret = hclgevf_config_gro(hdev);
2943b26a6feaSPeng Li 	if (ret)
2944b26a6feaSPeng Li 		goto err_config;
2945b26a6feaSPeng Li 
2946e2cb1decSSalil Mehta 	/* Initialize RSS for this VF */
294793969dc1SJie Wang 	ret = hclge_comm_rss_init_cfg(&hdev->nic, hdev->ae_dev,
294893969dc1SJie Wang 				      &hdev->rss_cfg);
294987ce161eSGuangbin Huang 	if (ret) {
295087ce161eSGuangbin Huang 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
295187ce161eSGuangbin Huang 		goto err_config;
295287ce161eSGuangbin Huang 	}
295387ce161eSGuangbin Huang 
2954e2cb1decSSalil Mehta 	ret = hclgevf_rss_init_hw(hdev);
2955e2cb1decSSalil Mehta 	if (ret) {
2956e2cb1decSSalil Mehta 		dev_err(&hdev->pdev->dev,
2957e2cb1decSSalil Mehta 			"failed(%d) to initialize RSS\n", ret);
2958e2cb1decSSalil Mehta 		goto err_config;
2959e2cb1decSSalil Mehta 	}
2960e2cb1decSSalil Mehta 
2961039ba863SJian Shen 	/* ensure vf tbl list as empty before init */
2962039ba863SJian Shen 	ret = hclgevf_clear_vport_list(hdev);
2963039ba863SJian Shen 	if (ret) {
2964039ba863SJian Shen 		dev_err(&pdev->dev,
2965039ba863SJian Shen 			"failed to clear tbl list configuration, ret = %d.\n",
2966039ba863SJian Shen 			ret);
2967039ba863SJian Shen 		goto err_config;
2968039ba863SJian Shen 	}
2969039ba863SJian Shen 
2970e2cb1decSSalil Mehta 	ret = hclgevf_init_vlan_config(hdev);
2971e2cb1decSSalil Mehta 	if (ret) {
2972e2cb1decSSalil Mehta 		dev_err(&hdev->pdev->dev,
2973e2cb1decSSalil Mehta 			"failed(%d) to initialize VLAN config\n", ret);
2974e2cb1decSSalil Mehta 		goto err_config;
2975e2cb1decSSalil Mehta 	}
2976e2cb1decSSalil Mehta 
297779664077SHuazhong Tan 	hclgevf_init_rxd_adv_layout(hdev);
297879664077SHuazhong Tan 
29790251d196SGuangbin Huang 	set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
29800251d196SGuangbin Huang 
29810742ed7cSHuazhong Tan 	hdev->last_reset_time = jiffies;
298208d80a4cSHuazhong Tan 	dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
298308d80a4cSHuazhong Tan 		 HCLGEVF_DRIVER_NAME);
2984e2cb1decSSalil Mehta 
2985ff200099SYunsheng Lin 	hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
2986ff200099SYunsheng Lin 
2987e2cb1decSSalil Mehta 	return 0;
2988e2cb1decSSalil Mehta 
2989e2cb1decSSalil Mehta err_config:
2990e2cb1decSSalil Mehta 	hclgevf_misc_irq_uninit(hdev);
2991e2cb1decSSalil Mehta err_misc_irq_init:
2992e2cb1decSSalil Mehta 	hclgevf_state_uninit(hdev);
2993e2cb1decSSalil Mehta 	hclgevf_uninit_msi(hdev);
299407acf909SJian Shen err_cmd_init:
29959970308fSJie Wang 	hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
29968b0195a3SHuazhong Tan err_cmd_queue_init:
2997cd624299SYufeng Mo 	hclgevf_devlink_uninit(hdev);
2998cd624299SYufeng Mo err_devlink_init:
2999e2cb1decSSalil Mehta 	hclgevf_pci_uninit(hdev);
3000862d969aSHuazhong Tan 	clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3001e2cb1decSSalil Mehta 	return ret;
3002e2cb1decSSalil Mehta }
3003e2cb1decSSalil Mehta 
30047a01c897SSalil Mehta static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
3005e2cb1decSSalil Mehta {
3006d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
3007d3410018SYufeng Mo 
3008e2cb1decSSalil Mehta 	hclgevf_state_uninit(hdev);
300979664077SHuazhong Tan 	hclgevf_uninit_rxd_adv_layout(hdev);
3010862d969aSHuazhong Tan 
3011d3410018SYufeng Mo 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0);
3012d3410018SYufeng Mo 	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
301323b4201dSJian Shen 
3014862d969aSHuazhong Tan 	if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
3015eddf0462SYunsheng Lin 		hclgevf_misc_irq_uninit(hdev);
3016e2cb1decSSalil Mehta 		hclgevf_uninit_msi(hdev);
30177a01c897SSalil Mehta 	}
30187a01c897SSalil Mehta 
30199970308fSJie Wang 	hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
3020cd624299SYufeng Mo 	hclgevf_devlink_uninit(hdev);
3021e3364c5fSZenghui Yu 	hclgevf_pci_uninit(hdev);
3022ee4bcd3bSJian Shen 	hclgevf_uninit_mac_list(hdev);
3023862d969aSHuazhong Tan }
3024862d969aSHuazhong Tan 
30257a01c897SSalil Mehta static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
30267a01c897SSalil Mehta {
30277a01c897SSalil Mehta 	struct pci_dev *pdev = ae_dev->pdev;
30287a01c897SSalil Mehta 	int ret;
30297a01c897SSalil Mehta 
30307a01c897SSalil Mehta 	ret = hclgevf_alloc_hdev(ae_dev);
30317a01c897SSalil Mehta 	if (ret) {
30327a01c897SSalil Mehta 		dev_err(&pdev->dev, "hclge device allocation failed\n");
30337a01c897SSalil Mehta 		return ret;
30347a01c897SSalil Mehta 	}
30357a01c897SSalil Mehta 
30367a01c897SSalil Mehta 	ret = hclgevf_init_hdev(ae_dev->priv);
3037a6d818e3SYunsheng Lin 	if (ret) {
30387a01c897SSalil Mehta 		dev_err(&pdev->dev, "hclge device initialization failed\n");
30397a01c897SSalil Mehta 		return ret;
30407a01c897SSalil Mehta 	}
30417a01c897SSalil Mehta 
3042a6d818e3SYunsheng Lin 	return 0;
3043a6d818e3SYunsheng Lin }
3044a6d818e3SYunsheng Lin 
30457a01c897SSalil Mehta static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
30467a01c897SSalil Mehta {
30477a01c897SSalil Mehta 	struct hclgevf_dev *hdev = ae_dev->priv;
30487a01c897SSalil Mehta 
30497a01c897SSalil Mehta 	hclgevf_uninit_hdev(hdev);
3050e2cb1decSSalil Mehta 	ae_dev->priv = NULL;
3051e2cb1decSSalil Mehta }
3052e2cb1decSSalil Mehta 
3053849e4607SPeng Li static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
3054849e4607SPeng Li {
3055849e4607SPeng Li 	struct hnae3_handle *nic = &hdev->nic;
3056849e4607SPeng Li 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
3057849e4607SPeng Li 
30588be73621SHuazhong Tan 	return min_t(u32, hdev->rss_size_max,
305935244430SJian Shen 		     hdev->num_tqps / kinfo->tc_info.num_tc);
3060849e4607SPeng Li }
3061849e4607SPeng Li 
3062849e4607SPeng Li /**
3063849e4607SPeng Li  * hclgevf_get_channels - Get the current channels enabled and max supported.
3064849e4607SPeng Li  * @handle: hardware information for network interface
3065849e4607SPeng Li  * @ch: ethtool channels structure
3066849e4607SPeng Li  *
3067849e4607SPeng Li  * We don't support separate tx and rx queues as channels. The other count
3068849e4607SPeng Li  * represents how many queues are being used for control. max_combined counts
3069849e4607SPeng Li  * how many queue pairs we can support. They may not be mapped 1 to 1 with
3070849e4607SPeng Li  * q_vectors since we support a lot more queue pairs than q_vectors.
3071849e4607SPeng Li  **/
3072849e4607SPeng Li static void hclgevf_get_channels(struct hnae3_handle *handle,
3073849e4607SPeng Li 				 struct ethtool_channels *ch)
3074849e4607SPeng Li {
3075849e4607SPeng Li 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3076849e4607SPeng Li 
3077849e4607SPeng Li 	ch->max_combined = hclgevf_get_max_channels(hdev);
3078849e4607SPeng Li 	ch->other_count = 0;
3079849e4607SPeng Li 	ch->max_other = 0;
30808be73621SHuazhong Tan 	ch->combined_count = handle->kinfo.rss_size;
3081849e4607SPeng Li }
3082849e4607SPeng Li 
3083cc719218SPeng Li static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
30840d43bf45SHuazhong Tan 					  u16 *alloc_tqps, u16 *max_rss_size)
3085cc719218SPeng Li {
3086cc719218SPeng Li 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3087cc719218SPeng Li 
30880d43bf45SHuazhong Tan 	*alloc_tqps = hdev->num_tqps;
3089cc719218SPeng Li 	*max_rss_size = hdev->rss_size_max;
3090cc719218SPeng Li }
3091cc719218SPeng Li 
30924093d1a2SGuangbin Huang static void hclgevf_update_rss_size(struct hnae3_handle *handle,
30934093d1a2SGuangbin Huang 				    u32 new_tqps_num)
30944093d1a2SGuangbin Huang {
30954093d1a2SGuangbin Huang 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
30964093d1a2SGuangbin Huang 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
30974093d1a2SGuangbin Huang 	u16 max_rss_size;
30984093d1a2SGuangbin Huang 
30994093d1a2SGuangbin Huang 	kinfo->req_rss_size = new_tqps_num;
31004093d1a2SGuangbin Huang 
31014093d1a2SGuangbin Huang 	max_rss_size = min_t(u16, hdev->rss_size_max,
310235244430SJian Shen 			     hdev->num_tqps / kinfo->tc_info.num_tc);
31034093d1a2SGuangbin Huang 
31044093d1a2SGuangbin Huang 	/* Use the user's configuration when it is not larger than
31054093d1a2SGuangbin Huang 	 * max_rss_size, otherwise, use the maximum specification value.
31064093d1a2SGuangbin Huang 	 */
31074093d1a2SGuangbin Huang 	if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
31084093d1a2SGuangbin Huang 	    kinfo->req_rss_size <= max_rss_size)
31094093d1a2SGuangbin Huang 		kinfo->rss_size = kinfo->req_rss_size;
31104093d1a2SGuangbin Huang 	else if (kinfo->rss_size > max_rss_size ||
31114093d1a2SGuangbin Huang 		 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size))
31124093d1a2SGuangbin Huang 		kinfo->rss_size = max_rss_size;
31134093d1a2SGuangbin Huang 
311435244430SJian Shen 	kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size;
31154093d1a2SGuangbin Huang }
31164093d1a2SGuangbin Huang 
31174093d1a2SGuangbin Huang static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
31184093d1a2SGuangbin Huang 				bool rxfh_configured)
31194093d1a2SGuangbin Huang {
31204093d1a2SGuangbin Huang 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
31214093d1a2SGuangbin Huang 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
312293969dc1SJie Wang 	u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
312393969dc1SJie Wang 	u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
312493969dc1SJie Wang 	u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
31254093d1a2SGuangbin Huang 	u16 cur_rss_size = kinfo->rss_size;
31264093d1a2SGuangbin Huang 	u16 cur_tqps = kinfo->num_tqps;
31274093d1a2SGuangbin Huang 	u32 *rss_indir;
31284093d1a2SGuangbin Huang 	unsigned int i;
31294093d1a2SGuangbin Huang 	int ret;
31304093d1a2SGuangbin Huang 
31314093d1a2SGuangbin Huang 	hclgevf_update_rss_size(handle, new_tqps_num);
31324093d1a2SGuangbin Huang 
3133*ae9f29fdSJie Wang 	hclge_comm_get_rss_tc_info(kinfo->rss_size, hdev->hw_tc_map,
313493969dc1SJie Wang 				   tc_offset, tc_valid, tc_size);
313593969dc1SJie Wang 	ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
313693969dc1SJie Wang 					 tc_valid, tc_size);
31374093d1a2SGuangbin Huang 	if (ret)
31384093d1a2SGuangbin Huang 		return ret;
31394093d1a2SGuangbin Huang 
3140cd7e963dSSalil Mehta 	/* RSS indirection table has been configured by user */
31414093d1a2SGuangbin Huang 	if (rxfh_configured)
31424093d1a2SGuangbin Huang 		goto out;
31434093d1a2SGuangbin Huang 
31444093d1a2SGuangbin Huang 	/* Reinitializes the rss indirect table according to the new RSS size */
314587ce161eSGuangbin Huang 	rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size,
314687ce161eSGuangbin Huang 			    sizeof(u32), GFP_KERNEL);
31474093d1a2SGuangbin Huang 	if (!rss_indir)
31484093d1a2SGuangbin Huang 		return -ENOMEM;
31494093d1a2SGuangbin Huang 
315087ce161eSGuangbin Huang 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
31514093d1a2SGuangbin Huang 		rss_indir[i] = i % kinfo->rss_size;
31524093d1a2SGuangbin Huang 
3153944de484SGuojia Liao 	hdev->rss_cfg.rss_size = kinfo->rss_size;
3154944de484SGuojia Liao 
31554093d1a2SGuangbin Huang 	ret = hclgevf_set_rss(handle, rss_indir, NULL, 0);
31564093d1a2SGuangbin Huang 	if (ret)
31574093d1a2SGuangbin Huang 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
31584093d1a2SGuangbin Huang 			ret);
31594093d1a2SGuangbin Huang 
31604093d1a2SGuangbin Huang 	kfree(rss_indir);
31614093d1a2SGuangbin Huang 
31624093d1a2SGuangbin Huang out:
31634093d1a2SGuangbin Huang 	if (!ret)
31644093d1a2SGuangbin Huang 		dev_info(&hdev->pdev->dev,
31654093d1a2SGuangbin Huang 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
31664093d1a2SGuangbin Huang 			 cur_rss_size, kinfo->rss_size,
316735244430SJian Shen 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
31684093d1a2SGuangbin Huang 
31694093d1a2SGuangbin Huang 	return ret;
31704093d1a2SGuangbin Huang }
31714093d1a2SGuangbin Huang 
3172175ec96bSFuyun Liang static int hclgevf_get_status(struct hnae3_handle *handle)
3173175ec96bSFuyun Liang {
3174175ec96bSFuyun Liang 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3175175ec96bSFuyun Liang 
3176175ec96bSFuyun Liang 	return hdev->hw.mac.link;
3177175ec96bSFuyun Liang }
3178175ec96bSFuyun Liang 
31794a152de9SFuyun Liang static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
31804a152de9SFuyun Liang 					    u8 *auto_neg, u32 *speed,
31810f032f93SHao Chen 					    u8 *duplex, u32 *lane_num)
31824a152de9SFuyun Liang {
31834a152de9SFuyun Liang 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
31844a152de9SFuyun Liang 
31854a152de9SFuyun Liang 	if (speed)
31864a152de9SFuyun Liang 		*speed = hdev->hw.mac.speed;
31874a152de9SFuyun Liang 	if (duplex)
31884a152de9SFuyun Liang 		*duplex = hdev->hw.mac.duplex;
31894a152de9SFuyun Liang 	if (auto_neg)
31904a152de9SFuyun Liang 		*auto_neg = AUTONEG_DISABLE;
31914a152de9SFuyun Liang }
31924a152de9SFuyun Liang 
31934a152de9SFuyun Liang void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
31944a152de9SFuyun Liang 				 u8 duplex)
31954a152de9SFuyun Liang {
31964a152de9SFuyun Liang 	hdev->hw.mac.speed = speed;
31974a152de9SFuyun Liang 	hdev->hw.mac.duplex = duplex;
31984a152de9SFuyun Liang }
31994a152de9SFuyun Liang 
32001731be4cSYonglong Liu static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
32015c9f6b39SPeng Li {
32025c9f6b39SPeng Li 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
32033462207dSYufeng Mo 	bool gro_en_old = hdev->gro_en;
32043462207dSYufeng Mo 	int ret;
32055c9f6b39SPeng Li 
32063462207dSYufeng Mo 	hdev->gro_en = enable;
32073462207dSYufeng Mo 	ret = hclgevf_config_gro(hdev);
32083462207dSYufeng Mo 	if (ret)
32093462207dSYufeng Mo 		hdev->gro_en = gro_en_old;
32103462207dSYufeng Mo 
32113462207dSYufeng Mo 	return ret;
32125c9f6b39SPeng Li }
32135c9f6b39SPeng Li 
321488d10bd6SJian Shen static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type,
321588d10bd6SJian Shen 				   u8 *module_type)
3216c136b884SPeng Li {
3217c136b884SPeng Li 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
321888d10bd6SJian Shen 
3219c136b884SPeng Li 	if (media_type)
3220c136b884SPeng Li 		*media_type = hdev->hw.mac.media_type;
322188d10bd6SJian Shen 
322288d10bd6SJian Shen 	if (module_type)
322388d10bd6SJian Shen 		*module_type = hdev->hw.mac.module_type;
3224c136b884SPeng Li }
3225c136b884SPeng Li 
32264d60291bSHuazhong Tan static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
32274d60291bSHuazhong Tan {
32284d60291bSHuazhong Tan 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
32294d60291bSHuazhong Tan 
3230aa5c4f17SHuazhong Tan 	return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
32314d60291bSHuazhong Tan }
32324d60291bSHuazhong Tan 
3233fe735c84SHuazhong Tan static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle)
3234fe735c84SHuazhong Tan {
3235fe735c84SHuazhong Tan 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3236fe735c84SHuazhong Tan 
3237076bb537SJie Wang 	return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3238fe735c84SHuazhong Tan }
3239fe735c84SHuazhong Tan 
32404d60291bSHuazhong Tan static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
32414d60291bSHuazhong Tan {
32424d60291bSHuazhong Tan 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
32434d60291bSHuazhong Tan 
32444d60291bSHuazhong Tan 	return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
32454d60291bSHuazhong Tan }
32464d60291bSHuazhong Tan 
32474d60291bSHuazhong Tan static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
32484d60291bSHuazhong Tan {
32494d60291bSHuazhong Tan 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
32504d60291bSHuazhong Tan 
3251c88a6e7dSHuazhong Tan 	return hdev->rst_stats.hw_rst_done_cnt;
32524d60291bSHuazhong Tan }
32534d60291bSHuazhong Tan 
32549194d18bSliuzhongzhu static void hclgevf_get_link_mode(struct hnae3_handle *handle,
32559194d18bSliuzhongzhu 				  unsigned long *supported,
32569194d18bSliuzhongzhu 				  unsigned long *advertising)
32579194d18bSliuzhongzhu {
32589194d18bSliuzhongzhu 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
32599194d18bSliuzhongzhu 
32609194d18bSliuzhongzhu 	*supported = hdev->hw.mac.supported;
32619194d18bSliuzhongzhu 	*advertising = hdev->hw.mac.advertising;
32629194d18bSliuzhongzhu }
32639194d18bSliuzhongzhu 
32641600c3e5SJian Shen #define MAX_SEPARATE_NUM	4
3265e407efddSHuazhong Tan #define SEPARATOR_VALUE		0xFDFCFBFA
32661600c3e5SJian Shen #define REG_NUM_PER_LINE	4
32671600c3e5SJian Shen #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
32681600c3e5SJian Shen 
32691600c3e5SJian Shen static int hclgevf_get_regs_len(struct hnae3_handle *handle)
32701600c3e5SJian Shen {
32711600c3e5SJian Shen 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
32721600c3e5SJian Shen 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
32731600c3e5SJian Shen 
32741600c3e5SJian Shen 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
32751600c3e5SJian Shen 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
32761600c3e5SJian Shen 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
32771600c3e5SJian Shen 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
32781600c3e5SJian Shen 
32791600c3e5SJian Shen 	return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps +
32801600c3e5SJian Shen 		tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE;
32811600c3e5SJian Shen }
32821600c3e5SJian Shen 
32831600c3e5SJian Shen static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
32841600c3e5SJian Shen 			     void *data)
32851600c3e5SJian Shen {
32861600c3e5SJian Shen 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
32871600c3e5SJian Shen 	int i, j, reg_um, separator_num;
32881600c3e5SJian Shen 	u32 *reg = data;
32891600c3e5SJian Shen 
32901600c3e5SJian Shen 	*version = hdev->fw_version;
32911600c3e5SJian Shen 
32921600c3e5SJian Shen 	/* fetching per-VF registers values from VF PCIe register space */
32931600c3e5SJian Shen 	reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
32941600c3e5SJian Shen 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
32951600c3e5SJian Shen 	for (i = 0; i < reg_um; i++)
32961600c3e5SJian Shen 		*reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
32971600c3e5SJian Shen 	for (i = 0; i < separator_num; i++)
32981600c3e5SJian Shen 		*reg++ = SEPARATOR_VALUE;
32991600c3e5SJian Shen 
33001600c3e5SJian Shen 	reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
33011600c3e5SJian Shen 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
33021600c3e5SJian Shen 	for (i = 0; i < reg_um; i++)
33031600c3e5SJian Shen 		*reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
33041600c3e5SJian Shen 	for (i = 0; i < separator_num; i++)
33051600c3e5SJian Shen 		*reg++ = SEPARATOR_VALUE;
33061600c3e5SJian Shen 
33071600c3e5SJian Shen 	reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
33081600c3e5SJian Shen 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
33091600c3e5SJian Shen 	for (j = 0; j < hdev->num_tqps; j++) {
33101600c3e5SJian Shen 		for (i = 0; i < reg_um; i++)
33111600c3e5SJian Shen 			*reg++ = hclgevf_read_dev(&hdev->hw,
33121600c3e5SJian Shen 						  ring_reg_addr_list[i] +
3313350cb440SPeng Li 						  HCLGEVF_TQP_REG_SIZE * j);
33141600c3e5SJian Shen 		for (i = 0; i < separator_num; i++)
33151600c3e5SJian Shen 			*reg++ = SEPARATOR_VALUE;
33161600c3e5SJian Shen 	}
33171600c3e5SJian Shen 
33181600c3e5SJian Shen 	reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
33191600c3e5SJian Shen 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
33201600c3e5SJian Shen 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
33211600c3e5SJian Shen 		for (i = 0; i < reg_um; i++)
33221600c3e5SJian Shen 			*reg++ = hclgevf_read_dev(&hdev->hw,
33231600c3e5SJian Shen 						  tqp_intr_reg_addr_list[i] +
33241600c3e5SJian Shen 						  4 * j);
33251600c3e5SJian Shen 		for (i = 0; i < separator_num; i++)
33261600c3e5SJian Shen 			*reg++ = SEPARATOR_VALUE;
33271600c3e5SJian Shen 	}
33281600c3e5SJian Shen }
33291600c3e5SJian Shen 
333092f11ea1SJian Shen void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
3331767975e5SJie Wang 				struct hclge_mbx_port_base_vlan *port_base_vlan)
333292f11ea1SJian Shen {
333392f11ea1SJian Shen 	struct hnae3_handle *nic = &hdev->nic;
3334d3410018SYufeng Mo 	struct hclge_vf_to_pf_msg send_msg;
3335a6f7bfdcSJian Shen 	int ret;
333692f11ea1SJian Shen 
333792f11ea1SJian Shen 	rtnl_lock();
3338a6f7bfdcSJian Shen 
3339b7b5d25bSGuojia Liao 	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
3340b7b5d25bSGuojia Liao 	    test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
3341a6f7bfdcSJian Shen 		dev_warn(&hdev->pdev->dev,
3342a6f7bfdcSJian Shen 			 "is resetting when updating port based vlan info\n");
334392f11ea1SJian Shen 		rtnl_unlock();
3344a6f7bfdcSJian Shen 		return;
3345a6f7bfdcSJian Shen 	}
3346a6f7bfdcSJian Shen 
3347a6f7bfdcSJian Shen 	ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
3348a6f7bfdcSJian Shen 	if (ret) {
3349a6f7bfdcSJian Shen 		rtnl_unlock();
3350a6f7bfdcSJian Shen 		return;
3351a6f7bfdcSJian Shen 	}
335292f11ea1SJian Shen 
335392f11ea1SJian Shen 	/* send msg to PF and wait update port based vlan info */
3354d3410018SYufeng Mo 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
3355d3410018SYufeng Mo 			       HCLGE_MBX_PORT_BASE_VLAN_CFG);
3356767975e5SJie Wang 	memcpy(send_msg.data, port_base_vlan, sizeof(*port_base_vlan));
3357a6f7bfdcSJian Shen 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3358a6f7bfdcSJian Shen 	if (!ret) {
335992f11ea1SJian Shen 		if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
3360a6f7bfdcSJian Shen 			nic->port_base_vlan_state = state;
336192f11ea1SJian Shen 		else
336292f11ea1SJian Shen 			nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
3363a6f7bfdcSJian Shen 	}
336492f11ea1SJian Shen 
336592f11ea1SJian Shen 	hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
336692f11ea1SJian Shen 	rtnl_unlock();
336792f11ea1SJian Shen }
336892f11ea1SJian Shen 
3369e2cb1decSSalil Mehta static const struct hnae3_ae_ops hclgevf_ops = {
3370e2cb1decSSalil Mehta 	.init_ae_dev = hclgevf_init_ae_dev,
3371e2cb1decSSalil Mehta 	.uninit_ae_dev = hclgevf_uninit_ae_dev,
3372bb1890d5SJiaran Zhang 	.reset_prepare = hclgevf_reset_prepare_general,
3373bb1890d5SJiaran Zhang 	.reset_done = hclgevf_reset_done,
3374e718a93fSPeng Li 	.init_client_instance = hclgevf_init_client_instance,
3375e718a93fSPeng Li 	.uninit_client_instance = hclgevf_uninit_client_instance,
3376e2cb1decSSalil Mehta 	.start = hclgevf_ae_start,
3377e2cb1decSSalil Mehta 	.stop = hclgevf_ae_stop,
3378a6d818e3SYunsheng Lin 	.client_start = hclgevf_client_start,
3379a6d818e3SYunsheng Lin 	.client_stop = hclgevf_client_stop,
3380e2cb1decSSalil Mehta 	.map_ring_to_vector = hclgevf_map_ring_to_vector,
3381e2cb1decSSalil Mehta 	.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
3382e2cb1decSSalil Mehta 	.get_vector = hclgevf_get_vector,
33830d3e6631SYunsheng Lin 	.put_vector = hclgevf_put_vector,
3384e2cb1decSSalil Mehta 	.reset_queue = hclgevf_reset_tqp,
3385e2cb1decSSalil Mehta 	.get_mac_addr = hclgevf_get_mac_addr,
3386e2cb1decSSalil Mehta 	.set_mac_addr = hclgevf_set_mac_addr,
3387e2cb1decSSalil Mehta 	.add_uc_addr = hclgevf_add_uc_addr,
3388e2cb1decSSalil Mehta 	.rm_uc_addr = hclgevf_rm_uc_addr,
3389e2cb1decSSalil Mehta 	.add_mc_addr = hclgevf_add_mc_addr,
3390e2cb1decSSalil Mehta 	.rm_mc_addr = hclgevf_rm_mc_addr,
3391e2cb1decSSalil Mehta 	.get_stats = hclgevf_get_stats,
3392e2cb1decSSalil Mehta 	.update_stats = hclgevf_update_stats,
3393e2cb1decSSalil Mehta 	.get_strings = hclgevf_get_strings,
3394e2cb1decSSalil Mehta 	.get_sset_count = hclgevf_get_sset_count,
3395027733b1SJie Wang 	.get_rss_key_size = hclge_comm_get_rss_key_size,
3396e2cb1decSSalil Mehta 	.get_rss = hclgevf_get_rss,
3397e2cb1decSSalil Mehta 	.set_rss = hclgevf_set_rss,
3398d97b3072SJian Shen 	.get_rss_tuple = hclgevf_get_rss_tuple,
3399d97b3072SJian Shen 	.set_rss_tuple = hclgevf_set_rss_tuple,
3400e2cb1decSSalil Mehta 	.get_tc_size = hclgevf_get_tc_size,
3401e2cb1decSSalil Mehta 	.get_fw_version = hclgevf_get_fw_version,
3402e2cb1decSSalil Mehta 	.set_vlan_filter = hclgevf_set_vlan_filter,
3403fa6a262aSJian Shen 	.enable_vlan_filter = hclgevf_enable_vlan_filter,
3404b2641e2aSYunsheng Lin 	.enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
34056d4c3981SSalil Mehta 	.reset_event = hclgevf_reset_event,
3406720bd583SHuazhong Tan 	.set_default_reset_request = hclgevf_set_def_reset_request,
34074093d1a2SGuangbin Huang 	.set_channels = hclgevf_set_channels,
3408849e4607SPeng Li 	.get_channels = hclgevf_get_channels,
3409cc719218SPeng Li 	.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
34101600c3e5SJian Shen 	.get_regs_len = hclgevf_get_regs_len,
34111600c3e5SJian Shen 	.get_regs = hclgevf_get_regs,
3412175ec96bSFuyun Liang 	.get_status = hclgevf_get_status,
34134a152de9SFuyun Liang 	.get_ksettings_an_result = hclgevf_get_ksettings_an_result,
3414c136b884SPeng Li 	.get_media_type = hclgevf_get_media_type,
34154d60291bSHuazhong Tan 	.get_hw_reset_stat = hclgevf_get_hw_reset_stat,
34164d60291bSHuazhong Tan 	.ae_dev_resetting = hclgevf_ae_dev_resetting,
34174d60291bSHuazhong Tan 	.ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
34185c9f6b39SPeng Li 	.set_gro_en = hclgevf_gro_en,
3419818f1675SYunsheng Lin 	.set_mtu = hclgevf_set_mtu,
34200c29d191Sliuzhongzhu 	.get_global_queue_id = hclgevf_get_qid_global,
34218cdb992fSJian Shen 	.set_timer_task = hclgevf_set_timer_task,
34229194d18bSliuzhongzhu 	.get_link_mode = hclgevf_get_link_mode,
3423e196ec75SJian Shen 	.set_promisc_mode = hclgevf_set_promisc_mode,
3424c631c696SJian Shen 	.request_update_promisc_mode = hclgevf_request_update_promisc_mode,
3425fe735c84SHuazhong Tan 	.get_cmdq_stat = hclgevf_get_cmdq_stat,
3426e2cb1decSSalil Mehta };
3427e2cb1decSSalil Mehta 
3428e2cb1decSSalil Mehta static struct hnae3_ae_algo ae_algovf = {
3429e2cb1decSSalil Mehta 	.ops = &hclgevf_ops,
3430e2cb1decSSalil Mehta 	.pdev_id_table = ae_algovf_pci_tbl,
3431e2cb1decSSalil Mehta };
3432e2cb1decSSalil Mehta 
3433134a4647SXiu Jianfeng static int __init hclgevf_init(void)
3434e2cb1decSSalil Mehta {
3435e2cb1decSSalil Mehta 	pr_info("%s is initializing\n", HCLGEVF_NAME);
3436e2cb1decSSalil Mehta 
3437f29da408SYufeng Mo 	hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME);
34380ea68902SYunsheng Lin 	if (!hclgevf_wq) {
34390ea68902SYunsheng Lin 		pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
34400ea68902SYunsheng Lin 		return -ENOMEM;
34410ea68902SYunsheng Lin 	}
34420ea68902SYunsheng Lin 
3443854cf33aSFuyun Liang 	hnae3_register_ae_algo(&ae_algovf);
3444854cf33aSFuyun Liang 
3445854cf33aSFuyun Liang 	return 0;
3446e2cb1decSSalil Mehta }
3447e2cb1decSSalil Mehta 
3448134a4647SXiu Jianfeng static void __exit hclgevf_exit(void)
3449e2cb1decSSalil Mehta {
3450e2cb1decSSalil Mehta 	hnae3_unregister_ae_algo(&ae_algovf);
34510ea68902SYunsheng Lin 	destroy_workqueue(hclgevf_wq);
3452e2cb1decSSalil Mehta }
3453e2cb1decSSalil Mehta module_init(hclgevf_init);
3454e2cb1decSSalil Mehta module_exit(hclgevf_exit);
3455e2cb1decSSalil Mehta 
3456e2cb1decSSalil Mehta MODULE_LICENSE("GPL");
3457e2cb1decSSalil Mehta MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3458e2cb1decSSalil Mehta MODULE_DESCRIPTION("HCLGEVF Driver");
3459e2cb1decSSalil Mehta MODULE_VERSION(HCLGEVF_MOD_VERSION);
3460