1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/etherdevice.h>
5 #include <linux/iopoll.h>
6 #include <net/rtnetlink.h>
7 #include "hclgevf_cmd.h"
8 #include "hclgevf_main.h"
9 #include "hclge_mbx.h"
10 #include "hnae3.h"
11 #include "hclgevf_devlink.h"
12 
13 #define HCLGEVF_NAME	"hclgevf"
14 
15 #define HCLGEVF_RESET_MAX_FAIL_CNT	5
16 
17 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
18 static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
19 				  unsigned long delay);
20 
21 static struct hnae3_ae_algo ae_algovf;
22 
23 static struct workqueue_struct *hclgevf_wq;
24 
25 static const struct pci_device_id ae_algovf_pci_tbl[] = {
26 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
27 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
28 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
29 	/* required last entry */
30 	{0, }
31 };
32 
33 static const u8 hclgevf_hash_key[] = {
34 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
35 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
36 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
37 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
38 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
39 };
40 
41 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
42 
43 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_NIC_CSQ_BASEADDR_L_REG,
44 					 HCLGEVF_NIC_CSQ_BASEADDR_H_REG,
45 					 HCLGEVF_NIC_CSQ_DEPTH_REG,
46 					 HCLGEVF_NIC_CSQ_TAIL_REG,
47 					 HCLGEVF_NIC_CSQ_HEAD_REG,
48 					 HCLGEVF_NIC_CRQ_BASEADDR_L_REG,
49 					 HCLGEVF_NIC_CRQ_BASEADDR_H_REG,
50 					 HCLGEVF_NIC_CRQ_DEPTH_REG,
51 					 HCLGEVF_NIC_CRQ_TAIL_REG,
52 					 HCLGEVF_NIC_CRQ_HEAD_REG,
53 					 HCLGEVF_VECTOR0_CMDQ_SRC_REG,
54 					 HCLGEVF_VECTOR0_CMDQ_STATE_REG,
55 					 HCLGEVF_CMDQ_INTR_EN_REG,
56 					 HCLGEVF_CMDQ_INTR_GEN_REG};
57 
58 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
59 					   HCLGEVF_RST_ING,
60 					   HCLGEVF_GRO_EN_REG};
61 
62 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
63 					 HCLGEVF_RING_RX_ADDR_H_REG,
64 					 HCLGEVF_RING_RX_BD_NUM_REG,
65 					 HCLGEVF_RING_RX_BD_LENGTH_REG,
66 					 HCLGEVF_RING_RX_MERGE_EN_REG,
67 					 HCLGEVF_RING_RX_TAIL_REG,
68 					 HCLGEVF_RING_RX_HEAD_REG,
69 					 HCLGEVF_RING_RX_FBD_NUM_REG,
70 					 HCLGEVF_RING_RX_OFFSET_REG,
71 					 HCLGEVF_RING_RX_FBD_OFFSET_REG,
72 					 HCLGEVF_RING_RX_STASH_REG,
73 					 HCLGEVF_RING_RX_BD_ERR_REG,
74 					 HCLGEVF_RING_TX_ADDR_L_REG,
75 					 HCLGEVF_RING_TX_ADDR_H_REG,
76 					 HCLGEVF_RING_TX_BD_NUM_REG,
77 					 HCLGEVF_RING_TX_PRIORITY_REG,
78 					 HCLGEVF_RING_TX_TC_REG,
79 					 HCLGEVF_RING_TX_MERGE_EN_REG,
80 					 HCLGEVF_RING_TX_TAIL_REG,
81 					 HCLGEVF_RING_TX_HEAD_REG,
82 					 HCLGEVF_RING_TX_FBD_NUM_REG,
83 					 HCLGEVF_RING_TX_OFFSET_REG,
84 					 HCLGEVF_RING_TX_EBD_NUM_REG,
85 					 HCLGEVF_RING_TX_EBD_OFFSET_REG,
86 					 HCLGEVF_RING_TX_BD_ERR_REG,
87 					 HCLGEVF_RING_EN_REG};
88 
89 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
90 					     HCLGEVF_TQP_INTR_GL0_REG,
91 					     HCLGEVF_TQP_INTR_GL1_REG,
92 					     HCLGEVF_TQP_INTR_GL2_REG,
93 					     HCLGEVF_TQP_INTR_RL_REG};
94 
95 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
96 {
97 	if (!handle->client)
98 		return container_of(handle, struct hclgevf_dev, nic);
99 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
100 		return container_of(handle, struct hclgevf_dev, roce);
101 	else
102 		return container_of(handle, struct hclgevf_dev, nic);
103 }
104 
105 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
106 {
107 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
108 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
109 	struct hclge_desc desc;
110 	struct hclgevf_tqp *tqp;
111 	int status;
112 	int i;
113 
114 	for (i = 0; i < kinfo->num_tqps; i++) {
115 		tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
116 		hclgevf_cmd_setup_basic_desc(&desc,
117 					     HCLGEVF_OPC_QUERY_RX_STATUS,
118 					     true);
119 
120 		desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
121 		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
122 		if (status) {
123 			dev_err(&hdev->pdev->dev,
124 				"Query tqp stat fail, status = %d,queue = %d\n",
125 				status,	i);
126 			return status;
127 		}
128 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
129 			le32_to_cpu(desc.data[1]);
130 
131 		hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
132 					     true);
133 
134 		desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
135 		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
136 		if (status) {
137 			dev_err(&hdev->pdev->dev,
138 				"Query tqp stat fail, status = %d,queue = %d\n",
139 				status, i);
140 			return status;
141 		}
142 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
143 			le32_to_cpu(desc.data[1]);
144 	}
145 
146 	return 0;
147 }
148 
149 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
150 {
151 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
152 	struct hclgevf_tqp *tqp;
153 	u64 *buff = data;
154 	int i;
155 
156 	for (i = 0; i < kinfo->num_tqps; i++) {
157 		tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
158 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
159 	}
160 	for (i = 0; i < kinfo->num_tqps; i++) {
161 		tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
162 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
163 	}
164 
165 	return buff;
166 }
167 
168 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
169 {
170 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
171 
172 	return kinfo->num_tqps * 2;
173 }
174 
175 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
176 {
177 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
178 	u8 *buff = data;
179 	int i;
180 
181 	for (i = 0; i < kinfo->num_tqps; i++) {
182 		struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
183 						       struct hclgevf_tqp, q);
184 		snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
185 			 tqp->index);
186 		buff += ETH_GSTRING_LEN;
187 	}
188 
189 	for (i = 0; i < kinfo->num_tqps; i++) {
190 		struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
191 						       struct hclgevf_tqp, q);
192 		snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
193 			 tqp->index);
194 		buff += ETH_GSTRING_LEN;
195 	}
196 
197 	return buff;
198 }
199 
200 static void hclgevf_update_stats(struct hnae3_handle *handle,
201 				 struct net_device_stats *net_stats)
202 {
203 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
204 	int status;
205 
206 	status = hclgevf_tqps_update_stats(handle);
207 	if (status)
208 		dev_err(&hdev->pdev->dev,
209 			"VF update of TQPS stats fail, status = %d.\n",
210 			status);
211 }
212 
213 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
214 {
215 	if (strset == ETH_SS_TEST)
216 		return -EOPNOTSUPP;
217 	else if (strset == ETH_SS_STATS)
218 		return hclgevf_tqps_get_sset_count(handle, strset);
219 
220 	return 0;
221 }
222 
223 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
224 				u8 *data)
225 {
226 	u8 *p = (char *)data;
227 
228 	if (strset == ETH_SS_STATS)
229 		p = hclgevf_tqps_get_strings(handle, p);
230 }
231 
232 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
233 {
234 	hclgevf_tqps_get_stats(handle, data);
235 }
236 
237 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
238 				   u8 subcode)
239 {
240 	if (msg) {
241 		memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg));
242 		msg->code = code;
243 		msg->subcode = subcode;
244 	}
245 }
246 
247 static int hclgevf_get_basic_info(struct hclgevf_dev *hdev)
248 {
249 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
250 	u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE];
251 	struct hclge_basic_info *basic_info;
252 	struct hclge_vf_to_pf_msg send_msg;
253 	unsigned long caps;
254 	int status;
255 
256 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0);
257 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
258 				      sizeof(resp_msg));
259 	if (status) {
260 		dev_err(&hdev->pdev->dev,
261 			"failed to get basic info from pf, ret = %d", status);
262 		return status;
263 	}
264 
265 	basic_info = (struct hclge_basic_info *)resp_msg;
266 
267 	hdev->hw_tc_map = basic_info->hw_tc_map;
268 	hdev->mbx_api_version = basic_info->mbx_api_version;
269 	caps = basic_info->pf_caps;
270 	if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps))
271 		set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
272 
273 	return 0;
274 }
275 
276 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
277 {
278 	struct hnae3_handle *nic = &hdev->nic;
279 	struct hclge_vf_to_pf_msg send_msg;
280 	u8 resp_msg;
281 	int ret;
282 
283 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
284 			       HCLGE_MBX_GET_PORT_BASE_VLAN_STATE);
285 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
286 				   sizeof(u8));
287 	if (ret) {
288 		dev_err(&hdev->pdev->dev,
289 			"VF request to get port based vlan state failed %d",
290 			ret);
291 		return ret;
292 	}
293 
294 	nic->port_base_vlan_state = resp_msg;
295 
296 	return 0;
297 }
298 
299 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
300 {
301 #define HCLGEVF_TQPS_RSS_INFO_LEN	6
302 #define HCLGEVF_TQPS_ALLOC_OFFSET	0
303 #define HCLGEVF_TQPS_RSS_SIZE_OFFSET	2
304 #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET	4
305 
306 	u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
307 	struct hclge_vf_to_pf_msg send_msg;
308 	int status;
309 
310 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0);
311 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
312 				      HCLGEVF_TQPS_RSS_INFO_LEN);
313 	if (status) {
314 		dev_err(&hdev->pdev->dev,
315 			"VF request to get tqp info from PF failed %d",
316 			status);
317 		return status;
318 	}
319 
320 	memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET],
321 	       sizeof(u16));
322 	memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET],
323 	       sizeof(u16));
324 	memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET],
325 	       sizeof(u16));
326 
327 	return 0;
328 }
329 
330 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
331 {
332 #define HCLGEVF_TQPS_DEPTH_INFO_LEN	4
333 #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET	0
334 #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET	2
335 
336 	u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN];
337 	struct hclge_vf_to_pf_msg send_msg;
338 	int ret;
339 
340 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0);
341 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
342 				   HCLGEVF_TQPS_DEPTH_INFO_LEN);
343 	if (ret) {
344 		dev_err(&hdev->pdev->dev,
345 			"VF request to get tqp depth info from PF failed %d",
346 			ret);
347 		return ret;
348 	}
349 
350 	memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET],
351 	       sizeof(u16));
352 	memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET],
353 	       sizeof(u16));
354 
355 	return 0;
356 }
357 
358 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
359 {
360 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
361 	struct hclge_vf_to_pf_msg send_msg;
362 	u16 qid_in_pf = 0;
363 	u8 resp_data[2];
364 	int ret;
365 
366 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0);
367 	memcpy(send_msg.data, &queue_id, sizeof(queue_id));
368 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data,
369 				   sizeof(resp_data));
370 	if (!ret)
371 		qid_in_pf = *(u16 *)resp_data;
372 
373 	return qid_in_pf;
374 }
375 
376 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
377 {
378 	struct hclge_vf_to_pf_msg send_msg;
379 	u8 resp_msg[2];
380 	int ret;
381 
382 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0);
383 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
384 				   sizeof(resp_msg));
385 	if (ret) {
386 		dev_err(&hdev->pdev->dev,
387 			"VF request to get the pf port media type failed %d",
388 			ret);
389 		return ret;
390 	}
391 
392 	hdev->hw.mac.media_type = resp_msg[0];
393 	hdev->hw.mac.module_type = resp_msg[1];
394 
395 	return 0;
396 }
397 
398 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
399 {
400 	struct hclgevf_tqp *tqp;
401 	int i;
402 
403 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
404 				  sizeof(struct hclgevf_tqp), GFP_KERNEL);
405 	if (!hdev->htqp)
406 		return -ENOMEM;
407 
408 	tqp = hdev->htqp;
409 
410 	for (i = 0; i < hdev->num_tqps; i++) {
411 		tqp->dev = &hdev->pdev->dev;
412 		tqp->index = i;
413 
414 		tqp->q.ae_algo = &ae_algovf;
415 		tqp->q.buf_size = hdev->rx_buf_len;
416 		tqp->q.tx_desc_num = hdev->num_tx_desc;
417 		tqp->q.rx_desc_num = hdev->num_rx_desc;
418 
419 		/* need an extended offset to configure queues >=
420 		 * HCLGEVF_TQP_MAX_SIZE_DEV_V2.
421 		 */
422 		if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2)
423 			tqp->q.io_base = hdev->hw.io_base +
424 					 HCLGEVF_TQP_REG_OFFSET +
425 					 i * HCLGEVF_TQP_REG_SIZE;
426 		else
427 			tqp->q.io_base = hdev->hw.io_base +
428 					 HCLGEVF_TQP_REG_OFFSET +
429 					 HCLGEVF_TQP_EXT_REG_OFFSET +
430 					 (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) *
431 					 HCLGEVF_TQP_REG_SIZE;
432 
433 		tqp++;
434 	}
435 
436 	return 0;
437 }
438 
439 static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
440 {
441 	struct hnae3_handle *nic = &hdev->nic;
442 	struct hnae3_knic_private_info *kinfo;
443 	u16 new_tqps = hdev->num_tqps;
444 	unsigned int i;
445 	u8 num_tc = 0;
446 
447 	kinfo = &nic->kinfo;
448 	kinfo->num_tx_desc = hdev->num_tx_desc;
449 	kinfo->num_rx_desc = hdev->num_rx_desc;
450 	kinfo->rx_buf_len = hdev->rx_buf_len;
451 	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
452 		if (hdev->hw_tc_map & BIT(i))
453 			num_tc++;
454 
455 	num_tc = num_tc ? num_tc : 1;
456 	kinfo->tc_info.num_tc = num_tc;
457 	kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc);
458 	new_tqps = kinfo->rss_size * num_tc;
459 	kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
460 
461 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
462 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
463 	if (!kinfo->tqp)
464 		return -ENOMEM;
465 
466 	for (i = 0; i < kinfo->num_tqps; i++) {
467 		hdev->htqp[i].q.handle = &hdev->nic;
468 		hdev->htqp[i].q.tqp_index = i;
469 		kinfo->tqp[i] = &hdev->htqp[i].q;
470 	}
471 
472 	/* after init the max rss_size and tqps, adjust the default tqp numbers
473 	 * and rss size with the actual vector numbers
474 	 */
475 	kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
476 	kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc,
477 				kinfo->rss_size);
478 
479 	return 0;
480 }
481 
482 static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
483 {
484 	struct hclge_vf_to_pf_msg send_msg;
485 	int status;
486 
487 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0);
488 	status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
489 	if (status)
490 		dev_err(&hdev->pdev->dev,
491 			"VF failed to fetch link status(%d) from PF", status);
492 }
493 
494 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
495 {
496 	struct hnae3_handle *rhandle = &hdev->roce;
497 	struct hnae3_handle *handle = &hdev->nic;
498 	struct hnae3_client *rclient;
499 	struct hnae3_client *client;
500 
501 	if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state))
502 		return;
503 
504 	client = handle->client;
505 	rclient = hdev->roce_client;
506 
507 	link_state =
508 		test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
509 	if (link_state != hdev->hw.mac.link) {
510 		hdev->hw.mac.link = link_state;
511 		client->ops->link_status_change(handle, !!link_state);
512 		if (rclient && rclient->ops->link_status_change)
513 			rclient->ops->link_status_change(rhandle, !!link_state);
514 	}
515 
516 	clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
517 }
518 
519 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
520 {
521 #define HCLGEVF_ADVERTISING	0
522 #define HCLGEVF_SUPPORTED	1
523 
524 	struct hclge_vf_to_pf_msg send_msg;
525 
526 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0);
527 	send_msg.data[0] = HCLGEVF_ADVERTISING;
528 	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
529 	send_msg.data[0] = HCLGEVF_SUPPORTED;
530 	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
531 }
532 
533 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
534 {
535 	struct hnae3_handle *nic = &hdev->nic;
536 	int ret;
537 
538 	nic->ae_algo = &ae_algovf;
539 	nic->pdev = hdev->pdev;
540 	nic->numa_node_mask = hdev->numa_node_mask;
541 	nic->flags |= HNAE3_SUPPORT_VF;
542 	nic->kinfo.io_base = hdev->hw.io_base;
543 
544 	ret = hclgevf_knic_setup(hdev);
545 	if (ret)
546 		dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
547 			ret);
548 	return ret;
549 }
550 
551 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
552 {
553 	if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
554 		dev_warn(&hdev->pdev->dev,
555 			 "vector(vector_id %d) has been freed.\n", vector_id);
556 		return;
557 	}
558 
559 	hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
560 	hdev->num_msi_left += 1;
561 	hdev->num_msi_used -= 1;
562 }
563 
564 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
565 			      struct hnae3_vector_info *vector_info)
566 {
567 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
568 	struct hnae3_vector_info *vector = vector_info;
569 	int alloc = 0;
570 	int i, j;
571 
572 	vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num);
573 	vector_num = min(hdev->num_msi_left, vector_num);
574 
575 	for (j = 0; j < vector_num; j++) {
576 		for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
577 			if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
578 				vector->vector = pci_irq_vector(hdev->pdev, i);
579 				vector->io_addr = hdev->hw.io_base +
580 					HCLGEVF_VECTOR_REG_BASE +
581 					(i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
582 				hdev->vector_status[i] = 0;
583 				hdev->vector_irq[i] = vector->vector;
584 
585 				vector++;
586 				alloc++;
587 
588 				break;
589 			}
590 		}
591 	}
592 	hdev->num_msi_left -= alloc;
593 	hdev->num_msi_used += alloc;
594 
595 	return alloc;
596 }
597 
598 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
599 {
600 	int i;
601 
602 	for (i = 0; i < hdev->num_msi; i++)
603 		if (vector == hdev->vector_irq[i])
604 			return i;
605 
606 	return -EINVAL;
607 }
608 
609 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
610 				    const u8 hfunc, const u8 *key)
611 {
612 	struct hclgevf_rss_config_cmd *req;
613 	unsigned int key_offset = 0;
614 	struct hclge_desc desc;
615 	int key_counts;
616 	int key_size;
617 	int ret;
618 
619 	key_counts = HCLGEVF_RSS_KEY_SIZE;
620 	req = (struct hclgevf_rss_config_cmd *)desc.data;
621 
622 	while (key_counts) {
623 		hclgevf_cmd_setup_basic_desc(&desc,
624 					     HCLGEVF_OPC_RSS_GENERIC_CONFIG,
625 					     false);
626 
627 		req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK);
628 		req->hash_config |=
629 			(key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
630 
631 		key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts);
632 		memcpy(req->hash_key,
633 		       key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
634 
635 		key_counts -= key_size;
636 		key_offset++;
637 		ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
638 		if (ret) {
639 			dev_err(&hdev->pdev->dev,
640 				"Configure RSS config fail, status = %d\n",
641 				ret);
642 			return ret;
643 		}
644 	}
645 
646 	return 0;
647 }
648 
649 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
650 {
651 	return HCLGEVF_RSS_KEY_SIZE;
652 }
653 
654 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
655 {
656 	const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
657 	struct hclgevf_rss_indirection_table_cmd *req;
658 	struct hclge_desc desc;
659 	int rss_cfg_tbl_num;
660 	int status;
661 	int i, j;
662 
663 	req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
664 	rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
665 			  HCLGEVF_RSS_CFG_TBL_SIZE;
666 
667 	for (i = 0; i < rss_cfg_tbl_num; i++) {
668 		hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
669 					     false);
670 		req->start_table_index =
671 			cpu_to_le16(i * HCLGEVF_RSS_CFG_TBL_SIZE);
672 		req->rss_set_bitmap = cpu_to_le16(HCLGEVF_RSS_SET_BITMAP_MSK);
673 		for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
674 			req->rss_result[j] =
675 				indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
676 
677 		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
678 		if (status) {
679 			dev_err(&hdev->pdev->dev,
680 				"VF failed(=%d) to set RSS indirection table\n",
681 				status);
682 			return status;
683 		}
684 	}
685 
686 	return 0;
687 }
688 
689 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev,  u16 rss_size)
690 {
691 	struct hclgevf_rss_tc_mode_cmd *req;
692 	u16 tc_offset[HCLGEVF_MAX_TC_NUM];
693 	u16 tc_valid[HCLGEVF_MAX_TC_NUM];
694 	u16 tc_size[HCLGEVF_MAX_TC_NUM];
695 	struct hclge_desc desc;
696 	u16 roundup_size;
697 	unsigned int i;
698 	int status;
699 
700 	req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
701 
702 	roundup_size = roundup_pow_of_two(rss_size);
703 	roundup_size = ilog2(roundup_size);
704 
705 	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
706 		tc_valid[i] = 1;
707 		tc_size[i] = roundup_size;
708 		tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0;
709 	}
710 
711 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
712 	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
713 		u16 mode = 0;
714 
715 		hnae3_set_bit(mode, HCLGEVF_RSS_TC_VALID_B,
716 			      (tc_valid[i] & 0x1));
717 		hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M,
718 				HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
719 		hnae3_set_bit(mode, HCLGEVF_RSS_TC_SIZE_MSB_B,
720 			      tc_size[i] >> HCLGEVF_RSS_TC_SIZE_MSB_OFFSET &
721 			      0x1);
722 		hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M,
723 				HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
724 
725 		req->rss_tc_mode[i] = cpu_to_le16(mode);
726 	}
727 	status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
728 	if (status)
729 		dev_err(&hdev->pdev->dev,
730 			"VF failed(=%d) to set rss tc mode\n", status);
731 
732 	return status;
733 }
734 
735 /* for revision 0x20, vf shared the same rss config with pf */
736 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
737 {
738 #define HCLGEVF_RSS_MBX_RESP_LEN	8
739 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
740 	u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
741 	struct hclge_vf_to_pf_msg send_msg;
742 	u16 msg_num, hash_key_index;
743 	u8 index;
744 	int ret;
745 
746 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0);
747 	msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
748 			HCLGEVF_RSS_MBX_RESP_LEN;
749 	for (index = 0; index < msg_num; index++) {
750 		send_msg.data[0] = index;
751 		ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
752 					   HCLGEVF_RSS_MBX_RESP_LEN);
753 		if (ret) {
754 			dev_err(&hdev->pdev->dev,
755 				"VF get rss hash key from PF failed, ret=%d",
756 				ret);
757 			return ret;
758 		}
759 
760 		hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index;
761 		if (index == msg_num - 1)
762 			memcpy(&rss_cfg->rss_hash_key[hash_key_index],
763 			       &resp_msg[0],
764 			       HCLGEVF_RSS_KEY_SIZE - hash_key_index);
765 		else
766 			memcpy(&rss_cfg->rss_hash_key[hash_key_index],
767 			       &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN);
768 	}
769 
770 	return 0;
771 }
772 
773 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
774 			   u8 *hfunc)
775 {
776 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
777 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
778 	int i, ret;
779 
780 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
781 		/* Get hash algorithm */
782 		if (hfunc) {
783 			switch (rss_cfg->hash_algo) {
784 			case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ:
785 				*hfunc = ETH_RSS_HASH_TOP;
786 				break;
787 			case HCLGEVF_RSS_HASH_ALGO_SIMPLE:
788 				*hfunc = ETH_RSS_HASH_XOR;
789 				break;
790 			default:
791 				*hfunc = ETH_RSS_HASH_UNKNOWN;
792 				break;
793 			}
794 		}
795 
796 		/* Get the RSS Key required by the user */
797 		if (key)
798 			memcpy(key, rss_cfg->rss_hash_key,
799 			       HCLGEVF_RSS_KEY_SIZE);
800 	} else {
801 		if (hfunc)
802 			*hfunc = ETH_RSS_HASH_TOP;
803 		if (key) {
804 			ret = hclgevf_get_rss_hash_key(hdev);
805 			if (ret)
806 				return ret;
807 			memcpy(key, rss_cfg->rss_hash_key,
808 			       HCLGEVF_RSS_KEY_SIZE);
809 		}
810 	}
811 
812 	if (indir)
813 		for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
814 			indir[i] = rss_cfg->rss_indirection_tbl[i];
815 
816 	return 0;
817 }
818 
819 static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc,
820 				   u8 *hash_algo)
821 {
822 	switch (hfunc) {
823 	case ETH_RSS_HASH_TOP:
824 		*hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
825 		return 0;
826 	case ETH_RSS_HASH_XOR:
827 		*hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
828 		return 0;
829 	case ETH_RSS_HASH_NO_CHANGE:
830 		*hash_algo = hdev->rss_cfg.hash_algo;
831 		return 0;
832 	default:
833 		return -EINVAL;
834 	}
835 }
836 
837 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
838 			   const u8 *key, const u8 hfunc)
839 {
840 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
841 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
842 	u8 hash_algo;
843 	int ret, i;
844 
845 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
846 		ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo);
847 		if (ret)
848 			return ret;
849 
850 		/* Set the RSS Hash Key if specififed by the user */
851 		if (key) {
852 			ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key);
853 			if (ret) {
854 				dev_err(&hdev->pdev->dev,
855 					"invalid hfunc type %u\n", hfunc);
856 				return ret;
857 			}
858 
859 			/* Update the shadow RSS key with user specified qids */
860 			memcpy(rss_cfg->rss_hash_key, key,
861 			       HCLGEVF_RSS_KEY_SIZE);
862 		} else {
863 			ret = hclgevf_set_rss_algo_key(hdev, hash_algo,
864 						       rss_cfg->rss_hash_key);
865 			if (ret)
866 				return ret;
867 		}
868 		rss_cfg->hash_algo = hash_algo;
869 	}
870 
871 	/* update the shadow RSS table with user specified qids */
872 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
873 		rss_cfg->rss_indirection_tbl[i] = indir[i];
874 
875 	/* update the hardware */
876 	return hclgevf_set_rss_indir_table(hdev);
877 }
878 
879 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
880 {
881 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0;
882 
883 	if (nfc->data & RXH_L4_B_2_3)
884 		hash_sets |= HCLGEVF_D_PORT_BIT;
885 	else
886 		hash_sets &= ~HCLGEVF_D_PORT_BIT;
887 
888 	if (nfc->data & RXH_IP_SRC)
889 		hash_sets |= HCLGEVF_S_IP_BIT;
890 	else
891 		hash_sets &= ~HCLGEVF_S_IP_BIT;
892 
893 	if (nfc->data & RXH_IP_DST)
894 		hash_sets |= HCLGEVF_D_IP_BIT;
895 	else
896 		hash_sets &= ~HCLGEVF_D_IP_BIT;
897 
898 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
899 		hash_sets |= HCLGEVF_V_TAG_BIT;
900 
901 	return hash_sets;
902 }
903 
904 static int hclgevf_init_rss_tuple_cmd(struct hnae3_handle *handle,
905 				      struct ethtool_rxnfc *nfc,
906 				      struct hclgevf_rss_input_tuple_cmd *req)
907 {
908 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
909 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
910 	u8 tuple_sets;
911 
912 	req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
913 	req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
914 	req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
915 	req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
916 	req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
917 	req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
918 	req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
919 	req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
920 
921 	tuple_sets = hclgevf_get_rss_hash_bits(nfc);
922 	switch (nfc->flow_type) {
923 	case TCP_V4_FLOW:
924 		req->ipv4_tcp_en = tuple_sets;
925 		break;
926 	case TCP_V6_FLOW:
927 		req->ipv6_tcp_en = tuple_sets;
928 		break;
929 	case UDP_V4_FLOW:
930 		req->ipv4_udp_en = tuple_sets;
931 		break;
932 	case UDP_V6_FLOW:
933 		req->ipv6_udp_en = tuple_sets;
934 		break;
935 	case SCTP_V4_FLOW:
936 		req->ipv4_sctp_en = tuple_sets;
937 		break;
938 	case SCTP_V6_FLOW:
939 		if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
940 		    (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
941 			return -EINVAL;
942 
943 		req->ipv6_sctp_en = tuple_sets;
944 		break;
945 	case IPV4_FLOW:
946 		req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
947 		break;
948 	case IPV6_FLOW:
949 		req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
950 		break;
951 	default:
952 		return -EINVAL;
953 	}
954 
955 	return 0;
956 }
957 
958 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
959 				 struct ethtool_rxnfc *nfc)
960 {
961 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
962 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
963 	struct hclgevf_rss_input_tuple_cmd *req;
964 	struct hclge_desc desc;
965 	int ret;
966 
967 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
968 		return -EOPNOTSUPP;
969 
970 	if (nfc->data &
971 	    ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
972 		return -EINVAL;
973 
974 	req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
975 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
976 
977 	ret = hclgevf_init_rss_tuple_cmd(handle, nfc, req);
978 	if (ret) {
979 		dev_err(&hdev->pdev->dev,
980 			"failed to init rss tuple cmd, ret = %d\n", ret);
981 		return ret;
982 	}
983 
984 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
985 	if (ret) {
986 		dev_err(&hdev->pdev->dev,
987 			"Set rss tuple fail, status = %d\n", ret);
988 		return ret;
989 	}
990 
991 	rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
992 	rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
993 	rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
994 	rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
995 	rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
996 	rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
997 	rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
998 	rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
999 	return 0;
1000 }
1001 
1002 static int hclgevf_get_rss_tuple_by_flow_type(struct hclgevf_dev *hdev,
1003 					      int flow_type, u8 *tuple_sets)
1004 {
1005 	switch (flow_type) {
1006 	case TCP_V4_FLOW:
1007 		*tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_tcp_en;
1008 		break;
1009 	case UDP_V4_FLOW:
1010 		*tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_udp_en;
1011 		break;
1012 	case TCP_V6_FLOW:
1013 		*tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_tcp_en;
1014 		break;
1015 	case UDP_V6_FLOW:
1016 		*tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_udp_en;
1017 		break;
1018 	case SCTP_V4_FLOW:
1019 		*tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_sctp_en;
1020 		break;
1021 	case SCTP_V6_FLOW:
1022 		*tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_sctp_en;
1023 		break;
1024 	case IPV4_FLOW:
1025 	case IPV6_FLOW:
1026 		*tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
1027 		break;
1028 	default:
1029 		return -EINVAL;
1030 	}
1031 
1032 	return 0;
1033 }
1034 
1035 static u64 hclgevf_convert_rss_tuple(u8 tuple_sets)
1036 {
1037 	u64 tuple_data = 0;
1038 
1039 	if (tuple_sets & HCLGEVF_D_PORT_BIT)
1040 		tuple_data |= RXH_L4_B_2_3;
1041 	if (tuple_sets & HCLGEVF_S_PORT_BIT)
1042 		tuple_data |= RXH_L4_B_0_1;
1043 	if (tuple_sets & HCLGEVF_D_IP_BIT)
1044 		tuple_data |= RXH_IP_DST;
1045 	if (tuple_sets & HCLGEVF_S_IP_BIT)
1046 		tuple_data |= RXH_IP_SRC;
1047 
1048 	return tuple_data;
1049 }
1050 
1051 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
1052 				 struct ethtool_rxnfc *nfc)
1053 {
1054 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1055 	u8 tuple_sets;
1056 	int ret;
1057 
1058 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
1059 		return -EOPNOTSUPP;
1060 
1061 	nfc->data = 0;
1062 
1063 	ret = hclgevf_get_rss_tuple_by_flow_type(hdev, nfc->flow_type,
1064 						 &tuple_sets);
1065 	if (ret || !tuple_sets)
1066 		return ret;
1067 
1068 	nfc->data = hclgevf_convert_rss_tuple(tuple_sets);
1069 
1070 	return 0;
1071 }
1072 
1073 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev,
1074 				       struct hclgevf_rss_cfg *rss_cfg)
1075 {
1076 	struct hclgevf_rss_input_tuple_cmd *req;
1077 	struct hclge_desc desc;
1078 	int ret;
1079 
1080 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
1081 
1082 	req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
1083 
1084 	req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
1085 	req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
1086 	req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
1087 	req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
1088 	req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
1089 	req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
1090 	req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
1091 	req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
1092 
1093 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1094 	if (ret)
1095 		dev_err(&hdev->pdev->dev,
1096 			"Configure rss input fail, status = %d\n", ret);
1097 	return ret;
1098 }
1099 
1100 static int hclgevf_get_tc_size(struct hnae3_handle *handle)
1101 {
1102 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1103 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
1104 
1105 	return rss_cfg->rss_size;
1106 }
1107 
1108 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
1109 				       int vector_id,
1110 				       struct hnae3_ring_chain_node *ring_chain)
1111 {
1112 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1113 	struct hclge_vf_to_pf_msg send_msg;
1114 	struct hnae3_ring_chain_node *node;
1115 	int status;
1116 	int i = 0;
1117 
1118 	memset(&send_msg, 0, sizeof(send_msg));
1119 	send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR :
1120 		HCLGE_MBX_UNMAP_RING_TO_VECTOR;
1121 	send_msg.vector_id = vector_id;
1122 
1123 	for (node = ring_chain; node; node = node->next) {
1124 		send_msg.param[i].ring_type =
1125 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
1126 
1127 		send_msg.param[i].tqp_index = node->tqp_index;
1128 		send_msg.param[i].int_gl_index =
1129 					hnae3_get_field(node->int_gl_idx,
1130 							HNAE3_RING_GL_IDX_M,
1131 							HNAE3_RING_GL_IDX_S);
1132 
1133 		i++;
1134 		if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) {
1135 			send_msg.ring_num = i;
1136 
1137 			status = hclgevf_send_mbx_msg(hdev, &send_msg, false,
1138 						      NULL, 0);
1139 			if (status) {
1140 				dev_err(&hdev->pdev->dev,
1141 					"Map TQP fail, status is %d.\n",
1142 					status);
1143 				return status;
1144 			}
1145 			i = 0;
1146 		}
1147 	}
1148 
1149 	return 0;
1150 }
1151 
1152 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
1153 				      struct hnae3_ring_chain_node *ring_chain)
1154 {
1155 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1156 	int vector_id;
1157 
1158 	vector_id = hclgevf_get_vector_index(hdev, vector);
1159 	if (vector_id < 0) {
1160 		dev_err(&handle->pdev->dev,
1161 			"Get vector index fail. ret =%d\n", vector_id);
1162 		return vector_id;
1163 	}
1164 
1165 	return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
1166 }
1167 
1168 static int hclgevf_unmap_ring_from_vector(
1169 				struct hnae3_handle *handle,
1170 				int vector,
1171 				struct hnae3_ring_chain_node *ring_chain)
1172 {
1173 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1174 	int ret, vector_id;
1175 
1176 	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
1177 		return 0;
1178 
1179 	vector_id = hclgevf_get_vector_index(hdev, vector);
1180 	if (vector_id < 0) {
1181 		dev_err(&handle->pdev->dev,
1182 			"Get vector index fail. ret =%d\n", vector_id);
1183 		return vector_id;
1184 	}
1185 
1186 	ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
1187 	if (ret)
1188 		dev_err(&handle->pdev->dev,
1189 			"Unmap ring from vector fail. vector=%d, ret =%d\n",
1190 			vector_id,
1191 			ret);
1192 
1193 	return ret;
1194 }
1195 
1196 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
1197 {
1198 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1199 	int vector_id;
1200 
1201 	vector_id = hclgevf_get_vector_index(hdev, vector);
1202 	if (vector_id < 0) {
1203 		dev_err(&handle->pdev->dev,
1204 			"hclgevf_put_vector get vector index fail. ret =%d\n",
1205 			vector_id);
1206 		return vector_id;
1207 	}
1208 
1209 	hclgevf_free_vector(hdev, vector_id);
1210 
1211 	return 0;
1212 }
1213 
1214 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
1215 					bool en_uc_pmc, bool en_mc_pmc,
1216 					bool en_bc_pmc)
1217 {
1218 	struct hnae3_handle *handle = &hdev->nic;
1219 	struct hclge_vf_to_pf_msg send_msg;
1220 	int ret;
1221 
1222 	memset(&send_msg, 0, sizeof(send_msg));
1223 	send_msg.code = HCLGE_MBX_SET_PROMISC_MODE;
1224 	send_msg.en_bc = en_bc_pmc ? 1 : 0;
1225 	send_msg.en_uc = en_uc_pmc ? 1 : 0;
1226 	send_msg.en_mc = en_mc_pmc ? 1 : 0;
1227 	send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC,
1228 					     &handle->priv_flags) ? 1 : 0;
1229 
1230 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1231 	if (ret)
1232 		dev_err(&hdev->pdev->dev,
1233 			"Set promisc mode fail, status is %d.\n", ret);
1234 
1235 	return ret;
1236 }
1237 
1238 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
1239 				    bool en_mc_pmc)
1240 {
1241 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1242 	bool en_bc_pmc;
1243 
1244 	en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
1245 
1246 	return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc,
1247 					    en_bc_pmc);
1248 }
1249 
1250 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle)
1251 {
1252 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1253 
1254 	set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
1255 	hclgevf_task_schedule(hdev, 0);
1256 }
1257 
1258 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev)
1259 {
1260 	struct hnae3_handle *handle = &hdev->nic;
1261 	bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE;
1262 	bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE;
1263 	int ret;
1264 
1265 	if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) {
1266 		ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc);
1267 		if (!ret)
1268 			clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
1269 	}
1270 }
1271 
1272 static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id,
1273 				       u16 stream_id, bool enable)
1274 {
1275 	struct hclgevf_cfg_com_tqp_queue_cmd *req;
1276 	struct hclge_desc desc;
1277 
1278 	req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
1279 
1280 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
1281 				     false);
1282 	req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
1283 	req->stream_id = cpu_to_le16(stream_id);
1284 	if (enable)
1285 		req->enable |= 1U << HCLGEVF_TQP_ENABLE_B;
1286 
1287 	return hclgevf_cmd_send(&hdev->hw, &desc, 1);
1288 }
1289 
1290 static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable)
1291 {
1292 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1293 	int ret;
1294 	u16 i;
1295 
1296 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
1297 		ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable);
1298 		if (ret)
1299 			return ret;
1300 	}
1301 
1302 	return 0;
1303 }
1304 
1305 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
1306 {
1307 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
1308 	struct hclgevf_tqp *tqp;
1309 	int i;
1310 
1311 	for (i = 0; i < kinfo->num_tqps; i++) {
1312 		tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
1313 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
1314 	}
1315 }
1316 
1317 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
1318 {
1319 	struct hclge_vf_to_pf_msg send_msg;
1320 	u8 host_mac[ETH_ALEN];
1321 	int status;
1322 
1323 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0);
1324 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac,
1325 				      ETH_ALEN);
1326 	if (status) {
1327 		dev_err(&hdev->pdev->dev,
1328 			"fail to get VF MAC from host %d", status);
1329 		return status;
1330 	}
1331 
1332 	ether_addr_copy(p, host_mac);
1333 
1334 	return 0;
1335 }
1336 
1337 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
1338 {
1339 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1340 	u8 host_mac_addr[ETH_ALEN];
1341 
1342 	if (hclgevf_get_host_mac_addr(hdev, host_mac_addr))
1343 		return;
1344 
1345 	hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr);
1346 	if (hdev->has_pf_mac)
1347 		ether_addr_copy(p, host_mac_addr);
1348 	else
1349 		ether_addr_copy(p, hdev->hw.mac.mac_addr);
1350 }
1351 
1352 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p,
1353 				bool is_first)
1354 {
1355 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1356 	u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
1357 	struct hclge_vf_to_pf_msg send_msg;
1358 	u8 *new_mac_addr = (u8 *)p;
1359 	int status;
1360 
1361 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0);
1362 	send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY;
1363 	ether_addr_copy(send_msg.data, new_mac_addr);
1364 	if (is_first && !hdev->has_pf_mac)
1365 		eth_zero_addr(&send_msg.data[ETH_ALEN]);
1366 	else
1367 		ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
1368 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1369 	if (!status)
1370 		ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
1371 
1372 	return status;
1373 }
1374 
1375 static struct hclgevf_mac_addr_node *
1376 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr)
1377 {
1378 	struct hclgevf_mac_addr_node *mac_node, *tmp;
1379 
1380 	list_for_each_entry_safe(mac_node, tmp, list, node)
1381 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
1382 			return mac_node;
1383 
1384 	return NULL;
1385 }
1386 
1387 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node,
1388 				    enum HCLGEVF_MAC_NODE_STATE state)
1389 {
1390 	switch (state) {
1391 	/* from set_rx_mode or tmp_add_list */
1392 	case HCLGEVF_MAC_TO_ADD:
1393 		if (mac_node->state == HCLGEVF_MAC_TO_DEL)
1394 			mac_node->state = HCLGEVF_MAC_ACTIVE;
1395 		break;
1396 	/* only from set_rx_mode */
1397 	case HCLGEVF_MAC_TO_DEL:
1398 		if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
1399 			list_del(&mac_node->node);
1400 			kfree(mac_node);
1401 		} else {
1402 			mac_node->state = HCLGEVF_MAC_TO_DEL;
1403 		}
1404 		break;
1405 	/* only from tmp_add_list, the mac_node->state won't be
1406 	 * HCLGEVF_MAC_ACTIVE
1407 	 */
1408 	case HCLGEVF_MAC_ACTIVE:
1409 		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1410 			mac_node->state = HCLGEVF_MAC_ACTIVE;
1411 		break;
1412 	}
1413 }
1414 
1415 static int hclgevf_update_mac_list(struct hnae3_handle *handle,
1416 				   enum HCLGEVF_MAC_NODE_STATE state,
1417 				   enum HCLGEVF_MAC_ADDR_TYPE mac_type,
1418 				   const unsigned char *addr)
1419 {
1420 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1421 	struct hclgevf_mac_addr_node *mac_node;
1422 	struct list_head *list;
1423 
1424 	list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
1425 	       &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
1426 
1427 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
1428 
1429 	/* if the mac addr is already in the mac list, no need to add a new
1430 	 * one into it, just check the mac addr state, convert it to a new
1431 	 * new state, or just remove it, or do nothing.
1432 	 */
1433 	mac_node = hclgevf_find_mac_node(list, addr);
1434 	if (mac_node) {
1435 		hclgevf_update_mac_node(mac_node, state);
1436 		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1437 		return 0;
1438 	}
1439 	/* if this address is never added, unnecessary to delete */
1440 	if (state == HCLGEVF_MAC_TO_DEL) {
1441 		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1442 		return -ENOENT;
1443 	}
1444 
1445 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
1446 	if (!mac_node) {
1447 		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1448 		return -ENOMEM;
1449 	}
1450 
1451 	mac_node->state = state;
1452 	ether_addr_copy(mac_node->mac_addr, addr);
1453 	list_add_tail(&mac_node->node, list);
1454 
1455 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1456 	return 0;
1457 }
1458 
1459 static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
1460 			       const unsigned char *addr)
1461 {
1462 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
1463 				       HCLGEVF_MAC_ADDR_UC, addr);
1464 }
1465 
1466 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
1467 			      const unsigned char *addr)
1468 {
1469 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
1470 				       HCLGEVF_MAC_ADDR_UC, addr);
1471 }
1472 
1473 static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
1474 			       const unsigned char *addr)
1475 {
1476 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
1477 				       HCLGEVF_MAC_ADDR_MC, addr);
1478 }
1479 
1480 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
1481 			      const unsigned char *addr)
1482 {
1483 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
1484 				       HCLGEVF_MAC_ADDR_MC, addr);
1485 }
1486 
1487 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev,
1488 				    struct hclgevf_mac_addr_node *mac_node,
1489 				    enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1490 {
1491 	struct hclge_vf_to_pf_msg send_msg;
1492 	u8 code, subcode;
1493 
1494 	if (mac_type == HCLGEVF_MAC_ADDR_UC) {
1495 		code = HCLGE_MBX_SET_UNICAST;
1496 		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1497 			subcode = HCLGE_MBX_MAC_VLAN_UC_ADD;
1498 		else
1499 			subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE;
1500 	} else {
1501 		code = HCLGE_MBX_SET_MULTICAST;
1502 		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1503 			subcode = HCLGE_MBX_MAC_VLAN_MC_ADD;
1504 		else
1505 			subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE;
1506 	}
1507 
1508 	hclgevf_build_send_msg(&send_msg, code, subcode);
1509 	ether_addr_copy(send_msg.data, mac_node->mac_addr);
1510 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1511 }
1512 
1513 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
1514 				    struct list_head *list,
1515 				    enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1516 {
1517 	char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
1518 	struct hclgevf_mac_addr_node *mac_node, *tmp;
1519 	int ret;
1520 
1521 	list_for_each_entry_safe(mac_node, tmp, list, node) {
1522 		ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
1523 		if  (ret) {
1524 			hnae3_format_mac_addr(format_mac_addr,
1525 					      mac_node->mac_addr);
1526 			dev_err(&hdev->pdev->dev,
1527 				"failed to configure mac %s, state = %d, ret = %d\n",
1528 				format_mac_addr, mac_node->state, ret);
1529 			return;
1530 		}
1531 		if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
1532 			mac_node->state = HCLGEVF_MAC_ACTIVE;
1533 		} else {
1534 			list_del(&mac_node->node);
1535 			kfree(mac_node);
1536 		}
1537 	}
1538 }
1539 
1540 static void hclgevf_sync_from_add_list(struct list_head *add_list,
1541 				       struct list_head *mac_list)
1542 {
1543 	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1544 
1545 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
1546 		/* if the mac address from tmp_add_list is not in the
1547 		 * uc/mc_mac_list, it means have received a TO_DEL request
1548 		 * during the time window of sending mac config request to PF
1549 		 * If mac_node state is ACTIVE, then change its state to TO_DEL,
1550 		 * then it will be removed at next time. If is TO_ADD, it means
1551 		 * send TO_ADD request failed, so just remove the mac node.
1552 		 */
1553 		new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
1554 		if (new_node) {
1555 			hclgevf_update_mac_node(new_node, mac_node->state);
1556 			list_del(&mac_node->node);
1557 			kfree(mac_node);
1558 		} else if (mac_node->state == HCLGEVF_MAC_ACTIVE) {
1559 			mac_node->state = HCLGEVF_MAC_TO_DEL;
1560 			list_move_tail(&mac_node->node, mac_list);
1561 		} else {
1562 			list_del(&mac_node->node);
1563 			kfree(mac_node);
1564 		}
1565 	}
1566 }
1567 
1568 static void hclgevf_sync_from_del_list(struct list_head *del_list,
1569 				       struct list_head *mac_list)
1570 {
1571 	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1572 
1573 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
1574 		new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
1575 		if (new_node) {
1576 			/* If the mac addr is exist in the mac list, it means
1577 			 * received a new request TO_ADD during the time window
1578 			 * of sending mac addr configurrequest to PF, so just
1579 			 * change the mac state to ACTIVE.
1580 			 */
1581 			new_node->state = HCLGEVF_MAC_ACTIVE;
1582 			list_del(&mac_node->node);
1583 			kfree(mac_node);
1584 		} else {
1585 			list_move_tail(&mac_node->node, mac_list);
1586 		}
1587 	}
1588 }
1589 
1590 static void hclgevf_clear_list(struct list_head *list)
1591 {
1592 	struct hclgevf_mac_addr_node *mac_node, *tmp;
1593 
1594 	list_for_each_entry_safe(mac_node, tmp, list, node) {
1595 		list_del(&mac_node->node);
1596 		kfree(mac_node);
1597 	}
1598 }
1599 
1600 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev,
1601 				  enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1602 {
1603 	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1604 	struct list_head tmp_add_list, tmp_del_list;
1605 	struct list_head *list;
1606 
1607 	INIT_LIST_HEAD(&tmp_add_list);
1608 	INIT_LIST_HEAD(&tmp_del_list);
1609 
1610 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
1611 	 * we can add/delete these mac addr outside the spin lock
1612 	 */
1613 	list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
1614 		&hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
1615 
1616 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
1617 
1618 	list_for_each_entry_safe(mac_node, tmp, list, node) {
1619 		switch (mac_node->state) {
1620 		case HCLGEVF_MAC_TO_DEL:
1621 			list_move_tail(&mac_node->node, &tmp_del_list);
1622 			break;
1623 		case HCLGEVF_MAC_TO_ADD:
1624 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
1625 			if (!new_node)
1626 				goto stop_traverse;
1627 
1628 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
1629 			new_node->state = mac_node->state;
1630 			list_add_tail(&new_node->node, &tmp_add_list);
1631 			break;
1632 		default:
1633 			break;
1634 		}
1635 	}
1636 
1637 stop_traverse:
1638 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1639 
1640 	/* delete first, in order to get max mac table space for adding */
1641 	hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type);
1642 	hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type);
1643 
1644 	/* if some mac addresses were added/deleted fail, move back to the
1645 	 * mac_list, and retry at next time.
1646 	 */
1647 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
1648 
1649 	hclgevf_sync_from_del_list(&tmp_del_list, list);
1650 	hclgevf_sync_from_add_list(&tmp_add_list, list);
1651 
1652 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1653 }
1654 
1655 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev)
1656 {
1657 	hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC);
1658 	hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC);
1659 }
1660 
1661 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev)
1662 {
1663 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
1664 
1665 	hclgevf_clear_list(&hdev->mac_table.uc_mac_list);
1666 	hclgevf_clear_list(&hdev->mac_table.mc_mac_list);
1667 
1668 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1669 }
1670 
1671 static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
1672 {
1673 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1674 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
1675 	struct hclge_vf_to_pf_msg send_msg;
1676 
1677 	if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
1678 		return -EOPNOTSUPP;
1679 
1680 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1681 			       HCLGE_MBX_ENABLE_VLAN_FILTER);
1682 	send_msg.data[0] = enable ? 1 : 0;
1683 
1684 	return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1685 }
1686 
1687 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
1688 				   __be16 proto, u16 vlan_id,
1689 				   bool is_kill)
1690 {
1691 #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET	0
1692 #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET	1
1693 #define HCLGEVF_VLAN_MBX_PROTO_OFFSET	3
1694 
1695 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1696 	struct hclge_vf_to_pf_msg send_msg;
1697 	int ret;
1698 
1699 	if (vlan_id > HCLGEVF_MAX_VLAN_ID)
1700 		return -EINVAL;
1701 
1702 	if (proto != htons(ETH_P_8021Q))
1703 		return -EPROTONOSUPPORT;
1704 
1705 	/* When device is resetting or reset failed, firmware is unable to
1706 	 * handle mailbox. Just record the vlan id, and remove it after
1707 	 * reset finished.
1708 	 */
1709 	if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
1710 	     test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
1711 		set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1712 		return -EBUSY;
1713 	}
1714 
1715 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1716 			       HCLGE_MBX_VLAN_FILTER);
1717 	send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill;
1718 	memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id,
1719 	       sizeof(vlan_id));
1720 	memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto,
1721 	       sizeof(proto));
1722 	/* when remove hw vlan filter failed, record the vlan id,
1723 	 * and try to remove it from hw later, to be consistence
1724 	 * with stack.
1725 	 */
1726 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1727 	if (is_kill && ret)
1728 		set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1729 
1730 	return ret;
1731 }
1732 
1733 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
1734 {
1735 #define HCLGEVF_MAX_SYNC_COUNT	60
1736 	struct hnae3_handle *handle = &hdev->nic;
1737 	int ret, sync_cnt = 0;
1738 	u16 vlan_id;
1739 
1740 	vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
1741 	while (vlan_id != VLAN_N_VID) {
1742 		ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
1743 					      vlan_id, true);
1744 		if (ret)
1745 			return;
1746 
1747 		clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
1748 		sync_cnt++;
1749 		if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
1750 			return;
1751 
1752 		vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
1753 	}
1754 }
1755 
1756 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
1757 {
1758 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1759 	struct hclge_vf_to_pf_msg send_msg;
1760 
1761 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1762 			       HCLGE_MBX_VLAN_RX_OFF_CFG);
1763 	send_msg.data[0] = enable ? 1 : 0;
1764 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1765 }
1766 
1767 static int hclgevf_reset_tqp(struct hnae3_handle *handle)
1768 {
1769 #define HCLGEVF_RESET_ALL_QUEUE_DONE	1U
1770 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1771 	struct hclge_vf_to_pf_msg send_msg;
1772 	u8 return_status = 0;
1773 	int ret;
1774 	u16 i;
1775 
1776 	/* disable vf queue before send queue reset msg to PF */
1777 	ret = hclgevf_tqp_enable(handle, false);
1778 	if (ret) {
1779 		dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n",
1780 			ret);
1781 		return ret;
1782 	}
1783 
1784 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
1785 
1786 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status,
1787 				   sizeof(return_status));
1788 	if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE)
1789 		return ret;
1790 
1791 	for (i = 1; i < handle->kinfo.num_tqps; i++) {
1792 		hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
1793 		memcpy(send_msg.data, &i, sizeof(i));
1794 		ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1795 		if (ret)
1796 			return ret;
1797 	}
1798 
1799 	return 0;
1800 }
1801 
1802 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
1803 {
1804 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1805 	struct hclge_vf_to_pf_msg send_msg;
1806 
1807 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0);
1808 	memcpy(send_msg.data, &new_mtu, sizeof(new_mtu));
1809 	return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1810 }
1811 
1812 static int hclgevf_notify_client(struct hclgevf_dev *hdev,
1813 				 enum hnae3_reset_notify_type type)
1814 {
1815 	struct hnae3_client *client = hdev->nic_client;
1816 	struct hnae3_handle *handle = &hdev->nic;
1817 	int ret;
1818 
1819 	if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) ||
1820 	    !client)
1821 		return 0;
1822 
1823 	if (!client->ops->reset_notify)
1824 		return -EOPNOTSUPP;
1825 
1826 	ret = client->ops->reset_notify(handle, type);
1827 	if (ret)
1828 		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
1829 			type, ret);
1830 
1831 	return ret;
1832 }
1833 
1834 static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
1835 				      enum hnae3_reset_notify_type type)
1836 {
1837 	struct hnae3_client *client = hdev->roce_client;
1838 	struct hnae3_handle *handle = &hdev->roce;
1839 	int ret;
1840 
1841 	if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client)
1842 		return 0;
1843 
1844 	if (!client->ops->reset_notify)
1845 		return -EOPNOTSUPP;
1846 
1847 	ret = client->ops->reset_notify(handle, type);
1848 	if (ret)
1849 		dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
1850 			type, ret);
1851 	return ret;
1852 }
1853 
1854 static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
1855 {
1856 #define HCLGEVF_RESET_WAIT_US	20000
1857 #define HCLGEVF_RESET_WAIT_CNT	2000
1858 #define HCLGEVF_RESET_WAIT_TIMEOUT_US	\
1859 	(HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
1860 
1861 	u32 val;
1862 	int ret;
1863 
1864 	if (hdev->reset_type == HNAE3_VF_RESET)
1865 		ret = readl_poll_timeout(hdev->hw.io_base +
1866 					 HCLGEVF_VF_RST_ING, val,
1867 					 !(val & HCLGEVF_VF_RST_ING_BIT),
1868 					 HCLGEVF_RESET_WAIT_US,
1869 					 HCLGEVF_RESET_WAIT_TIMEOUT_US);
1870 	else
1871 		ret = readl_poll_timeout(hdev->hw.io_base +
1872 					 HCLGEVF_RST_ING, val,
1873 					 !(val & HCLGEVF_RST_ING_BITS),
1874 					 HCLGEVF_RESET_WAIT_US,
1875 					 HCLGEVF_RESET_WAIT_TIMEOUT_US);
1876 
1877 	/* hardware completion status should be available by this time */
1878 	if (ret) {
1879 		dev_err(&hdev->pdev->dev,
1880 			"couldn't get reset done status from h/w, timeout!\n");
1881 		return ret;
1882 	}
1883 
1884 	/* we will wait a bit more to let reset of the stack to complete. This
1885 	 * might happen in case reset assertion was made by PF. Yes, this also
1886 	 * means we might end up waiting bit more even for VF reset.
1887 	 */
1888 	msleep(5000);
1889 
1890 	return 0;
1891 }
1892 
1893 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable)
1894 {
1895 	u32 reg_val;
1896 
1897 	reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
1898 	if (enable)
1899 		reg_val |= HCLGEVF_NIC_SW_RST_RDY;
1900 	else
1901 		reg_val &= ~HCLGEVF_NIC_SW_RST_RDY;
1902 
1903 	hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
1904 			  reg_val);
1905 }
1906 
1907 static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
1908 {
1909 	int ret;
1910 
1911 	/* uninitialize the nic client */
1912 	ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1913 	if (ret)
1914 		return ret;
1915 
1916 	/* re-initialize the hclge device */
1917 	ret = hclgevf_reset_hdev(hdev);
1918 	if (ret) {
1919 		dev_err(&hdev->pdev->dev,
1920 			"hclge device re-init failed, VF is disabled!\n");
1921 		return ret;
1922 	}
1923 
1924 	/* bring up the nic client again */
1925 	ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
1926 	if (ret)
1927 		return ret;
1928 
1929 	/* clear handshake status with IMP */
1930 	hclgevf_reset_handshake(hdev, false);
1931 
1932 	/* bring up the nic to enable TX/RX again */
1933 	return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1934 }
1935 
1936 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
1937 {
1938 #define HCLGEVF_RESET_SYNC_TIME 100
1939 
1940 	if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
1941 		struct hclge_vf_to_pf_msg send_msg;
1942 		int ret;
1943 
1944 		hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
1945 		ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1946 		if (ret) {
1947 			dev_err(&hdev->pdev->dev,
1948 				"failed to assert VF reset, ret = %d\n", ret);
1949 			return ret;
1950 		}
1951 		hdev->rst_stats.vf_func_rst_cnt++;
1952 	}
1953 
1954 	set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
1955 	/* inform hardware that preparatory work is done */
1956 	msleep(HCLGEVF_RESET_SYNC_TIME);
1957 	hclgevf_reset_handshake(hdev, true);
1958 	dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n",
1959 		 hdev->reset_type);
1960 
1961 	return 0;
1962 }
1963 
1964 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
1965 {
1966 	dev_info(&hdev->pdev->dev, "VF function reset count: %u\n",
1967 		 hdev->rst_stats.vf_func_rst_cnt);
1968 	dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
1969 		 hdev->rst_stats.flr_rst_cnt);
1970 	dev_info(&hdev->pdev->dev, "VF reset count: %u\n",
1971 		 hdev->rst_stats.vf_rst_cnt);
1972 	dev_info(&hdev->pdev->dev, "reset done count: %u\n",
1973 		 hdev->rst_stats.rst_done_cnt);
1974 	dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
1975 		 hdev->rst_stats.hw_rst_done_cnt);
1976 	dev_info(&hdev->pdev->dev, "reset count: %u\n",
1977 		 hdev->rst_stats.rst_cnt);
1978 	dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
1979 		 hdev->rst_stats.rst_fail_cnt);
1980 	dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
1981 		 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
1982 	dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
1983 		 hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG));
1984 	dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
1985 		 hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG));
1986 	dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
1987 		 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
1988 	dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
1989 }
1990 
1991 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
1992 {
1993 	/* recover handshake status with IMP when reset fail */
1994 	hclgevf_reset_handshake(hdev, true);
1995 	hdev->rst_stats.rst_fail_cnt++;
1996 	dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n",
1997 		hdev->rst_stats.rst_fail_cnt);
1998 
1999 	if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
2000 		set_bit(hdev->reset_type, &hdev->reset_pending);
2001 
2002 	if (hclgevf_is_reset_pending(hdev)) {
2003 		set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
2004 		hclgevf_reset_task_schedule(hdev);
2005 	} else {
2006 		set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
2007 		hclgevf_dump_rst_info(hdev);
2008 	}
2009 }
2010 
2011 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
2012 {
2013 	int ret;
2014 
2015 	hdev->rst_stats.rst_cnt++;
2016 
2017 	/* perform reset of the stack & ae device for a client */
2018 	ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2019 	if (ret)
2020 		return ret;
2021 
2022 	rtnl_lock();
2023 	/* bring down the nic to stop any ongoing TX/RX */
2024 	ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
2025 	rtnl_unlock();
2026 	if (ret)
2027 		return ret;
2028 
2029 	return hclgevf_reset_prepare_wait(hdev);
2030 }
2031 
2032 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
2033 {
2034 	int ret;
2035 
2036 	hdev->rst_stats.hw_rst_done_cnt++;
2037 	ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2038 	if (ret)
2039 		return ret;
2040 
2041 	rtnl_lock();
2042 	/* now, re-initialize the nic client and ae device */
2043 	ret = hclgevf_reset_stack(hdev);
2044 	rtnl_unlock();
2045 	if (ret) {
2046 		dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
2047 		return ret;
2048 	}
2049 
2050 	ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
2051 	/* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1
2052 	 * times
2053 	 */
2054 	if (ret &&
2055 	    hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1)
2056 		return ret;
2057 
2058 	ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT);
2059 	if (ret)
2060 		return ret;
2061 
2062 	hdev->last_reset_time = jiffies;
2063 	hdev->rst_stats.rst_done_cnt++;
2064 	hdev->rst_stats.rst_fail_cnt = 0;
2065 	clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
2066 
2067 	return 0;
2068 }
2069 
2070 static void hclgevf_reset(struct hclgevf_dev *hdev)
2071 {
2072 	if (hclgevf_reset_prepare(hdev))
2073 		goto err_reset;
2074 
2075 	/* check if VF could successfully fetch the hardware reset completion
2076 	 * status from the hardware
2077 	 */
2078 	if (hclgevf_reset_wait(hdev)) {
2079 		/* can't do much in this situation, will disable VF */
2080 		dev_err(&hdev->pdev->dev,
2081 			"failed to fetch H/W reset completion status\n");
2082 		goto err_reset;
2083 	}
2084 
2085 	if (hclgevf_reset_rebuild(hdev))
2086 		goto err_reset;
2087 
2088 	return;
2089 
2090 err_reset:
2091 	hclgevf_reset_err_handle(hdev);
2092 }
2093 
2094 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
2095 						     unsigned long *addr)
2096 {
2097 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2098 
2099 	/* return the highest priority reset level amongst all */
2100 	if (test_bit(HNAE3_VF_RESET, addr)) {
2101 		rst_level = HNAE3_VF_RESET;
2102 		clear_bit(HNAE3_VF_RESET, addr);
2103 		clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
2104 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
2105 	} else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
2106 		rst_level = HNAE3_VF_FULL_RESET;
2107 		clear_bit(HNAE3_VF_FULL_RESET, addr);
2108 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
2109 	} else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
2110 		rst_level = HNAE3_VF_PF_FUNC_RESET;
2111 		clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
2112 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
2113 	} else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
2114 		rst_level = HNAE3_VF_FUNC_RESET;
2115 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
2116 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
2117 		rst_level = HNAE3_FLR_RESET;
2118 		clear_bit(HNAE3_FLR_RESET, addr);
2119 	}
2120 
2121 	return rst_level;
2122 }
2123 
2124 static void hclgevf_reset_event(struct pci_dev *pdev,
2125 				struct hnae3_handle *handle)
2126 {
2127 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2128 	struct hclgevf_dev *hdev = ae_dev->priv;
2129 
2130 	dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
2131 
2132 	if (hdev->default_reset_request)
2133 		hdev->reset_level =
2134 			hclgevf_get_reset_level(hdev,
2135 						&hdev->default_reset_request);
2136 	else
2137 		hdev->reset_level = HNAE3_VF_FUNC_RESET;
2138 
2139 	/* reset of this VF requested */
2140 	set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
2141 	hclgevf_reset_task_schedule(hdev);
2142 
2143 	hdev->last_reset_time = jiffies;
2144 }
2145 
2146 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
2147 					  enum hnae3_reset_type rst_type)
2148 {
2149 	struct hclgevf_dev *hdev = ae_dev->priv;
2150 
2151 	set_bit(rst_type, &hdev->default_reset_request);
2152 }
2153 
2154 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
2155 {
2156 	writel(en ? 1 : 0, vector->addr);
2157 }
2158 
2159 static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
2160 					  enum hnae3_reset_type rst_type)
2161 {
2162 #define HCLGEVF_RESET_RETRY_WAIT_MS	500
2163 #define HCLGEVF_RESET_RETRY_CNT		5
2164 
2165 	struct hclgevf_dev *hdev = ae_dev->priv;
2166 	int retry_cnt = 0;
2167 	int ret;
2168 
2169 	while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) {
2170 		down(&hdev->reset_sem);
2171 		set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2172 		hdev->reset_type = rst_type;
2173 		ret = hclgevf_reset_prepare(hdev);
2174 		if (!ret && !hdev->reset_pending)
2175 			break;
2176 
2177 		dev_err(&hdev->pdev->dev,
2178 			"failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
2179 			ret, hdev->reset_pending, retry_cnt);
2180 		clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2181 		up(&hdev->reset_sem);
2182 		msleep(HCLGEVF_RESET_RETRY_WAIT_MS);
2183 	}
2184 
2185 	/* disable misc vector before reset done */
2186 	hclgevf_enable_vector(&hdev->misc_vector, false);
2187 
2188 	if (hdev->reset_type == HNAE3_FLR_RESET)
2189 		hdev->rst_stats.flr_rst_cnt++;
2190 }
2191 
2192 static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev)
2193 {
2194 	struct hclgevf_dev *hdev = ae_dev->priv;
2195 	int ret;
2196 
2197 	hclgevf_enable_vector(&hdev->misc_vector, true);
2198 
2199 	ret = hclgevf_reset_rebuild(hdev);
2200 	if (ret)
2201 		dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n",
2202 			 ret);
2203 
2204 	hdev->reset_type = HNAE3_NONE_RESET;
2205 	clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2206 	up(&hdev->reset_sem);
2207 }
2208 
2209 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
2210 {
2211 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2212 
2213 	return hdev->fw_version;
2214 }
2215 
2216 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
2217 {
2218 	struct hclgevf_misc_vector *vector = &hdev->misc_vector;
2219 
2220 	vector->vector_irq = pci_irq_vector(hdev->pdev,
2221 					    HCLGEVF_MISC_VECTOR_NUM);
2222 	vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
2223 	/* vector status always valid for Vector 0 */
2224 	hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
2225 	hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
2226 
2227 	hdev->num_msi_left -= 1;
2228 	hdev->num_msi_used += 1;
2229 }
2230 
2231 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
2232 {
2233 	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
2234 	    test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) &&
2235 	    !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
2236 			      &hdev->state))
2237 		mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
2238 }
2239 
2240 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
2241 {
2242 	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
2243 	    !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED,
2244 			      &hdev->state))
2245 		mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
2246 }
2247 
2248 static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
2249 				  unsigned long delay)
2250 {
2251 	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
2252 	    !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
2253 		mod_delayed_work(hclgevf_wq, &hdev->service_task, delay);
2254 }
2255 
2256 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
2257 {
2258 #define	HCLGEVF_MAX_RESET_ATTEMPTS_CNT	3
2259 
2260 	if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state))
2261 		return;
2262 
2263 	down(&hdev->reset_sem);
2264 	set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2265 
2266 	if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
2267 			       &hdev->reset_state)) {
2268 		/* PF has intimated that it is about to reset the hardware.
2269 		 * We now have to poll & check if hardware has actually
2270 		 * completed the reset sequence. On hardware reset completion,
2271 		 * VF needs to reset the client and ae device.
2272 		 */
2273 		hdev->reset_attempts = 0;
2274 
2275 		hdev->last_reset_time = jiffies;
2276 		hdev->reset_type =
2277 			hclgevf_get_reset_level(hdev, &hdev->reset_pending);
2278 		if (hdev->reset_type != HNAE3_NONE_RESET)
2279 			hclgevf_reset(hdev);
2280 	} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
2281 				      &hdev->reset_state)) {
2282 		/* we could be here when either of below happens:
2283 		 * 1. reset was initiated due to watchdog timeout caused by
2284 		 *    a. IMP was earlier reset and our TX got choked down and
2285 		 *       which resulted in watchdog reacting and inducing VF
2286 		 *       reset. This also means our cmdq would be unreliable.
2287 		 *    b. problem in TX due to other lower layer(example link
2288 		 *       layer not functioning properly etc.)
2289 		 * 2. VF reset might have been initiated due to some config
2290 		 *    change.
2291 		 *
2292 		 * NOTE: Theres no clear way to detect above cases than to react
2293 		 * to the response of PF for this reset request. PF will ack the
2294 		 * 1b and 2. cases but we will not get any intimation about 1a
2295 		 * from PF as cmdq would be in unreliable state i.e. mailbox
2296 		 * communication between PF and VF would be broken.
2297 		 *
2298 		 * if we are never geting into pending state it means either:
2299 		 * 1. PF is not receiving our request which could be due to IMP
2300 		 *    reset
2301 		 * 2. PF is screwed
2302 		 * We cannot do much for 2. but to check first we can try reset
2303 		 * our PCIe + stack and see if it alleviates the problem.
2304 		 */
2305 		if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
2306 			/* prepare for full reset of stack + pcie interface */
2307 			set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
2308 
2309 			/* "defer" schedule the reset task again */
2310 			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
2311 		} else {
2312 			hdev->reset_attempts++;
2313 
2314 			set_bit(hdev->reset_level, &hdev->reset_pending);
2315 			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
2316 		}
2317 		hclgevf_reset_task_schedule(hdev);
2318 	}
2319 
2320 	hdev->reset_type = HNAE3_NONE_RESET;
2321 	clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2322 	up(&hdev->reset_sem);
2323 }
2324 
2325 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev)
2326 {
2327 	if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
2328 		return;
2329 
2330 	if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
2331 		return;
2332 
2333 	hclgevf_mbx_async_handler(hdev);
2334 
2335 	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
2336 }
2337 
2338 static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
2339 {
2340 	struct hclge_vf_to_pf_msg send_msg;
2341 	int ret;
2342 
2343 	if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
2344 		return;
2345 
2346 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0);
2347 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2348 	if (ret)
2349 		dev_err(&hdev->pdev->dev,
2350 			"VF sends keep alive cmd failed(=%d)\n", ret);
2351 }
2352 
2353 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
2354 {
2355 	unsigned long delta = round_jiffies_relative(HZ);
2356 	struct hnae3_handle *handle = &hdev->nic;
2357 
2358 	if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
2359 		return;
2360 
2361 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
2362 		delta = jiffies - hdev->last_serv_processed;
2363 
2364 		if (delta < round_jiffies_relative(HZ)) {
2365 			delta = round_jiffies_relative(HZ) - delta;
2366 			goto out;
2367 		}
2368 	}
2369 
2370 	hdev->serv_processed_cnt++;
2371 	if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL))
2372 		hclgevf_keep_alive(hdev);
2373 
2374 	if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) {
2375 		hdev->last_serv_processed = jiffies;
2376 		goto out;
2377 	}
2378 
2379 	if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
2380 		hclgevf_tqps_update_stats(handle);
2381 
2382 	/* VF does not need to request link status when this bit is set, because
2383 	 * PF will push its link status to VFs when link status changed.
2384 	 */
2385 	if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state))
2386 		hclgevf_request_link_info(hdev);
2387 
2388 	hclgevf_update_link_mode(hdev);
2389 
2390 	hclgevf_sync_vlan_filter(hdev);
2391 
2392 	hclgevf_sync_mac_table(hdev);
2393 
2394 	hclgevf_sync_promisc_mode(hdev);
2395 
2396 	hdev->last_serv_processed = jiffies;
2397 
2398 out:
2399 	hclgevf_task_schedule(hdev, delta);
2400 }
2401 
2402 static void hclgevf_service_task(struct work_struct *work)
2403 {
2404 	struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev,
2405 						service_task.work);
2406 
2407 	hclgevf_reset_service_task(hdev);
2408 	hclgevf_mailbox_service_task(hdev);
2409 	hclgevf_periodic_service_task(hdev);
2410 
2411 	/* Handle reset and mbx again in case periodical task delays the
2412 	 * handling by calling hclgevf_task_schedule() in
2413 	 * hclgevf_periodic_service_task()
2414 	 */
2415 	hclgevf_reset_service_task(hdev);
2416 	hclgevf_mailbox_service_task(hdev);
2417 }
2418 
2419 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
2420 {
2421 	hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
2422 }
2423 
2424 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
2425 						      u32 *clearval)
2426 {
2427 	u32 val, cmdq_stat_reg, rst_ing_reg;
2428 
2429 	/* fetch the events from their corresponding regs */
2430 	cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
2431 					 HCLGEVF_VECTOR0_CMDQ_STATE_REG);
2432 	if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
2433 		rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
2434 		dev_info(&hdev->pdev->dev,
2435 			 "receive reset interrupt 0x%x!\n", rst_ing_reg);
2436 		set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
2437 		set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
2438 		set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
2439 		*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
2440 		hdev->rst_stats.vf_rst_cnt++;
2441 		/* set up VF hardware reset status, its PF will clear
2442 		 * this status when PF has initialized done.
2443 		 */
2444 		val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING);
2445 		hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING,
2446 				  val | HCLGEVF_VF_RST_ING_BIT);
2447 		return HCLGEVF_VECTOR0_EVENT_RST;
2448 	}
2449 
2450 	/* check for vector0 mailbox(=CMDQ RX) event source */
2451 	if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
2452 		/* for revision 0x21, clearing interrupt is writing bit 0
2453 		 * to the clear register, writing bit 1 means to keep the
2454 		 * old value.
2455 		 * for revision 0x20, the clear register is a read & write
2456 		 * register, so we should just write 0 to the bit we are
2457 		 * handling, and keep other bits as cmdq_stat_reg.
2458 		 */
2459 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2460 			*clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
2461 		else
2462 			*clearval = cmdq_stat_reg &
2463 				    ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
2464 
2465 		return HCLGEVF_VECTOR0_EVENT_MBX;
2466 	}
2467 
2468 	/* print other vector0 event source */
2469 	dev_info(&hdev->pdev->dev,
2470 		 "vector 0 interrupt from unknown source, cmdq_src = %#x\n",
2471 		 cmdq_stat_reg);
2472 
2473 	return HCLGEVF_VECTOR0_EVENT_OTHER;
2474 }
2475 
2476 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
2477 {
2478 	enum hclgevf_evt_cause event_cause;
2479 	struct hclgevf_dev *hdev = data;
2480 	u32 clearval;
2481 
2482 	hclgevf_enable_vector(&hdev->misc_vector, false);
2483 	event_cause = hclgevf_check_evt_cause(hdev, &clearval);
2484 	if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
2485 		hclgevf_clear_event_cause(hdev, clearval);
2486 
2487 	switch (event_cause) {
2488 	case HCLGEVF_VECTOR0_EVENT_RST:
2489 		hclgevf_reset_task_schedule(hdev);
2490 		break;
2491 	case HCLGEVF_VECTOR0_EVENT_MBX:
2492 		hclgevf_mbx_handler(hdev);
2493 		break;
2494 	default:
2495 		break;
2496 	}
2497 
2498 	if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
2499 		hclgevf_enable_vector(&hdev->misc_vector, true);
2500 
2501 	return IRQ_HANDLED;
2502 }
2503 
2504 static int hclgevf_configure(struct hclgevf_dev *hdev)
2505 {
2506 	int ret;
2507 
2508 	hdev->gro_en = true;
2509 
2510 	ret = hclgevf_get_basic_info(hdev);
2511 	if (ret)
2512 		return ret;
2513 
2514 	/* get current port based vlan state from PF */
2515 	ret = hclgevf_get_port_base_vlan_filter_state(hdev);
2516 	if (ret)
2517 		return ret;
2518 
2519 	/* get queue configuration from PF */
2520 	ret = hclgevf_get_queue_info(hdev);
2521 	if (ret)
2522 		return ret;
2523 
2524 	/* get queue depth info from PF */
2525 	ret = hclgevf_get_queue_depth(hdev);
2526 	if (ret)
2527 		return ret;
2528 
2529 	return hclgevf_get_pf_media_type(hdev);
2530 }
2531 
2532 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
2533 {
2534 	struct pci_dev *pdev = ae_dev->pdev;
2535 	struct hclgevf_dev *hdev;
2536 
2537 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
2538 	if (!hdev)
2539 		return -ENOMEM;
2540 
2541 	hdev->pdev = pdev;
2542 	hdev->ae_dev = ae_dev;
2543 	ae_dev->priv = hdev;
2544 
2545 	return 0;
2546 }
2547 
2548 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
2549 {
2550 	struct hnae3_handle *roce = &hdev->roce;
2551 	struct hnae3_handle *nic = &hdev->nic;
2552 
2553 	roce->rinfo.num_vectors = hdev->num_roce_msix;
2554 
2555 	if (hdev->num_msi_left < roce->rinfo.num_vectors ||
2556 	    hdev->num_msi_left == 0)
2557 		return -EINVAL;
2558 
2559 	roce->rinfo.base_vector = hdev->roce_base_msix_offset;
2560 
2561 	roce->rinfo.netdev = nic->kinfo.netdev;
2562 	roce->rinfo.roce_io_base = hdev->hw.io_base;
2563 	roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2564 
2565 	roce->pdev = nic->pdev;
2566 	roce->ae_algo = nic->ae_algo;
2567 	roce->numa_node_mask = nic->numa_node_mask;
2568 
2569 	return 0;
2570 }
2571 
2572 static int hclgevf_config_gro(struct hclgevf_dev *hdev)
2573 {
2574 	struct hclgevf_cfg_gro_status_cmd *req;
2575 	struct hclge_desc desc;
2576 	int ret;
2577 
2578 	if (!hnae3_dev_gro_supported(hdev))
2579 		return 0;
2580 
2581 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
2582 				     false);
2583 	req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
2584 
2585 	req->gro_en = hdev->gro_en ? 1 : 0;
2586 
2587 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
2588 	if (ret)
2589 		dev_err(&hdev->pdev->dev,
2590 			"VF GRO hardware config cmd failed, ret = %d.\n", ret);
2591 
2592 	return ret;
2593 }
2594 
2595 static int hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
2596 {
2597 	u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
2598 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
2599 	struct hclgevf_rss_tuple_cfg *tuple_sets;
2600 	u32 i;
2601 
2602 	rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
2603 	rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
2604 	tuple_sets = &rss_cfg->rss_tuple_sets;
2605 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2606 		u8 *rss_ind_tbl;
2607 
2608 		rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
2609 
2610 		rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
2611 					   sizeof(*rss_ind_tbl), GFP_KERNEL);
2612 		if (!rss_ind_tbl)
2613 			return -ENOMEM;
2614 
2615 		rss_cfg->rss_indirection_tbl = rss_ind_tbl;
2616 		memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
2617 		       HCLGEVF_RSS_KEY_SIZE);
2618 
2619 		tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2620 		tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2621 		tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
2622 		tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2623 		tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2624 		tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2625 		tuple_sets->ipv6_sctp_en =
2626 			hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
2627 					HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT :
2628 					HCLGEVF_RSS_INPUT_TUPLE_SCTP;
2629 		tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2630 	}
2631 
2632 	/* Initialize RSS indirect table */
2633 	for (i = 0; i < rss_ind_tbl_size; i++)
2634 		rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
2635 
2636 	return 0;
2637 }
2638 
2639 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
2640 {
2641 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
2642 	int ret;
2643 
2644 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2645 		ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
2646 					       rss_cfg->rss_hash_key);
2647 		if (ret)
2648 			return ret;
2649 
2650 		ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
2651 		if (ret)
2652 			return ret;
2653 	}
2654 
2655 	ret = hclgevf_set_rss_indir_table(hdev);
2656 	if (ret)
2657 		return ret;
2658 
2659 	return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size);
2660 }
2661 
2662 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
2663 {
2664 	struct hnae3_handle *nic = &hdev->nic;
2665 	int ret;
2666 
2667 	ret = hclgevf_en_hw_strip_rxvtag(nic, true);
2668 	if (ret) {
2669 		dev_err(&hdev->pdev->dev,
2670 			"failed to enable rx vlan offload, ret = %d\n", ret);
2671 		return ret;
2672 	}
2673 
2674 	return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
2675 				       false);
2676 }
2677 
2678 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev)
2679 {
2680 #define HCLGEVF_FLUSH_LINK_TIMEOUT	100000
2681 
2682 	unsigned long last = hdev->serv_processed_cnt;
2683 	int i = 0;
2684 
2685 	while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) &&
2686 	       i++ < HCLGEVF_FLUSH_LINK_TIMEOUT &&
2687 	       last == hdev->serv_processed_cnt)
2688 		usleep_range(1, 1);
2689 }
2690 
2691 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
2692 {
2693 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2694 
2695 	if (enable) {
2696 		hclgevf_task_schedule(hdev, 0);
2697 	} else {
2698 		set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2699 
2700 		/* flush memory to make sure DOWN is seen by service task */
2701 		smp_mb__before_atomic();
2702 		hclgevf_flush_link_update(hdev);
2703 	}
2704 }
2705 
2706 static int hclgevf_ae_start(struct hnae3_handle *handle)
2707 {
2708 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2709 
2710 	clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2711 	clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state);
2712 
2713 	hclgevf_reset_tqp_stats(handle);
2714 
2715 	hclgevf_request_link_info(hdev);
2716 
2717 	hclgevf_update_link_mode(hdev);
2718 
2719 	return 0;
2720 }
2721 
2722 static void hclgevf_ae_stop(struct hnae3_handle *handle)
2723 {
2724 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2725 
2726 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2727 
2728 	if (hdev->reset_type != HNAE3_VF_RESET)
2729 		hclgevf_reset_tqp(handle);
2730 
2731 	hclgevf_reset_tqp_stats(handle);
2732 	hclgevf_update_link_status(hdev, 0);
2733 }
2734 
2735 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
2736 {
2737 #define HCLGEVF_STATE_ALIVE	1
2738 #define HCLGEVF_STATE_NOT_ALIVE	0
2739 
2740 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2741 	struct hclge_vf_to_pf_msg send_msg;
2742 
2743 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0);
2744 	send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE :
2745 				HCLGEVF_STATE_NOT_ALIVE;
2746 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2747 }
2748 
2749 static int hclgevf_client_start(struct hnae3_handle *handle)
2750 {
2751 	return hclgevf_set_alive(handle, true);
2752 }
2753 
2754 static void hclgevf_client_stop(struct hnae3_handle *handle)
2755 {
2756 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2757 	int ret;
2758 
2759 	ret = hclgevf_set_alive(handle, false);
2760 	if (ret)
2761 		dev_warn(&hdev->pdev->dev,
2762 			 "%s failed %d\n", __func__, ret);
2763 }
2764 
2765 static void hclgevf_state_init(struct hclgevf_dev *hdev)
2766 {
2767 	clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
2768 	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
2769 	clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
2770 
2771 	INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
2772 
2773 	mutex_init(&hdev->mbx_resp.mbx_mutex);
2774 	sema_init(&hdev->reset_sem, 1);
2775 
2776 	spin_lock_init(&hdev->mac_table.mac_list_lock);
2777 	INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list);
2778 	INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list);
2779 
2780 	/* bring the device down */
2781 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2782 }
2783 
2784 static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
2785 {
2786 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2787 	set_bit(HCLGEVF_STATE_REMOVING, &hdev->state);
2788 
2789 	if (hdev->service_task.work.func)
2790 		cancel_delayed_work_sync(&hdev->service_task);
2791 
2792 	mutex_destroy(&hdev->mbx_resp.mbx_mutex);
2793 }
2794 
2795 static int hclgevf_init_msi(struct hclgevf_dev *hdev)
2796 {
2797 	struct pci_dev *pdev = hdev->pdev;
2798 	int vectors;
2799 	int i;
2800 
2801 	if (hnae3_dev_roce_supported(hdev))
2802 		vectors = pci_alloc_irq_vectors(pdev,
2803 						hdev->roce_base_msix_offset + 1,
2804 						hdev->num_msi,
2805 						PCI_IRQ_MSIX);
2806 	else
2807 		vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2808 						hdev->num_msi,
2809 						PCI_IRQ_MSI | PCI_IRQ_MSIX);
2810 
2811 	if (vectors < 0) {
2812 		dev_err(&pdev->dev,
2813 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2814 			vectors);
2815 		return vectors;
2816 	}
2817 	if (vectors < hdev->num_msi)
2818 		dev_warn(&hdev->pdev->dev,
2819 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2820 			 hdev->num_msi, vectors);
2821 
2822 	hdev->num_msi = vectors;
2823 	hdev->num_msi_left = vectors;
2824 
2825 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2826 					   sizeof(u16), GFP_KERNEL);
2827 	if (!hdev->vector_status) {
2828 		pci_free_irq_vectors(pdev);
2829 		return -ENOMEM;
2830 	}
2831 
2832 	for (i = 0; i < hdev->num_msi; i++)
2833 		hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
2834 
2835 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2836 					sizeof(int), GFP_KERNEL);
2837 	if (!hdev->vector_irq) {
2838 		devm_kfree(&pdev->dev, hdev->vector_status);
2839 		pci_free_irq_vectors(pdev);
2840 		return -ENOMEM;
2841 	}
2842 
2843 	return 0;
2844 }
2845 
2846 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
2847 {
2848 	struct pci_dev *pdev = hdev->pdev;
2849 
2850 	devm_kfree(&pdev->dev, hdev->vector_status);
2851 	devm_kfree(&pdev->dev, hdev->vector_irq);
2852 	pci_free_irq_vectors(pdev);
2853 }
2854 
2855 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
2856 {
2857 	int ret;
2858 
2859 	hclgevf_get_misc_vector(hdev);
2860 
2861 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
2862 		 HCLGEVF_NAME, pci_name(hdev->pdev));
2863 	ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
2864 			  0, hdev->misc_vector.name, hdev);
2865 	if (ret) {
2866 		dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
2867 			hdev->misc_vector.vector_irq);
2868 		return ret;
2869 	}
2870 
2871 	hclgevf_clear_event_cause(hdev, 0);
2872 
2873 	/* enable misc. vector(vector 0) */
2874 	hclgevf_enable_vector(&hdev->misc_vector, true);
2875 
2876 	return ret;
2877 }
2878 
2879 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
2880 {
2881 	/* disable misc vector(vector 0) */
2882 	hclgevf_enable_vector(&hdev->misc_vector, false);
2883 	synchronize_irq(hdev->misc_vector.vector_irq);
2884 	free_irq(hdev->misc_vector.vector_irq, hdev);
2885 	hclgevf_free_vector(hdev, 0);
2886 }
2887 
2888 static void hclgevf_info_show(struct hclgevf_dev *hdev)
2889 {
2890 	struct device *dev = &hdev->pdev->dev;
2891 
2892 	dev_info(dev, "VF info begin:\n");
2893 
2894 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
2895 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
2896 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
2897 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
2898 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
2899 	dev_info(dev, "PF media type of this VF: %u\n",
2900 		 hdev->hw.mac.media_type);
2901 
2902 	dev_info(dev, "VF info end.\n");
2903 }
2904 
2905 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
2906 					    struct hnae3_client *client)
2907 {
2908 	struct hclgevf_dev *hdev = ae_dev->priv;
2909 	int rst_cnt = hdev->rst_stats.rst_cnt;
2910 	int ret;
2911 
2912 	ret = client->ops->init_instance(&hdev->nic);
2913 	if (ret)
2914 		return ret;
2915 
2916 	set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2917 	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
2918 	    rst_cnt != hdev->rst_stats.rst_cnt) {
2919 		clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2920 
2921 		client->ops->uninit_instance(&hdev->nic, 0);
2922 		return -EBUSY;
2923 	}
2924 
2925 	hnae3_set_client_init_flag(client, ae_dev, 1);
2926 
2927 	if (netif_msg_drv(&hdev->nic))
2928 		hclgevf_info_show(hdev);
2929 
2930 	return 0;
2931 }
2932 
2933 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
2934 					     struct hnae3_client *client)
2935 {
2936 	struct hclgevf_dev *hdev = ae_dev->priv;
2937 	int ret;
2938 
2939 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
2940 	    !hdev->nic_client)
2941 		return 0;
2942 
2943 	ret = hclgevf_init_roce_base_info(hdev);
2944 	if (ret)
2945 		return ret;
2946 
2947 	ret = client->ops->init_instance(&hdev->roce);
2948 	if (ret)
2949 		return ret;
2950 
2951 	set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
2952 	hnae3_set_client_init_flag(client, ae_dev, 1);
2953 
2954 	return 0;
2955 }
2956 
2957 static int hclgevf_init_client_instance(struct hnae3_client *client,
2958 					struct hnae3_ae_dev *ae_dev)
2959 {
2960 	struct hclgevf_dev *hdev = ae_dev->priv;
2961 	int ret;
2962 
2963 	switch (client->type) {
2964 	case HNAE3_CLIENT_KNIC:
2965 		hdev->nic_client = client;
2966 		hdev->nic.client = client;
2967 
2968 		ret = hclgevf_init_nic_client_instance(ae_dev, client);
2969 		if (ret)
2970 			goto clear_nic;
2971 
2972 		ret = hclgevf_init_roce_client_instance(ae_dev,
2973 							hdev->roce_client);
2974 		if (ret)
2975 			goto clear_roce;
2976 
2977 		break;
2978 	case HNAE3_CLIENT_ROCE:
2979 		if (hnae3_dev_roce_supported(hdev)) {
2980 			hdev->roce_client = client;
2981 			hdev->roce.client = client;
2982 		}
2983 
2984 		ret = hclgevf_init_roce_client_instance(ae_dev, client);
2985 		if (ret)
2986 			goto clear_roce;
2987 
2988 		break;
2989 	default:
2990 		return -EINVAL;
2991 	}
2992 
2993 	return 0;
2994 
2995 clear_nic:
2996 	hdev->nic_client = NULL;
2997 	hdev->nic.client = NULL;
2998 	return ret;
2999 clear_roce:
3000 	hdev->roce_client = NULL;
3001 	hdev->roce.client = NULL;
3002 	return ret;
3003 }
3004 
3005 static void hclgevf_uninit_client_instance(struct hnae3_client *client,
3006 					   struct hnae3_ae_dev *ae_dev)
3007 {
3008 	struct hclgevf_dev *hdev = ae_dev->priv;
3009 
3010 	/* un-init roce, if it exists */
3011 	if (hdev->roce_client) {
3012 		while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
3013 			msleep(HCLGEVF_WAIT_RESET_DONE);
3014 		clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
3015 
3016 		hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
3017 		hdev->roce_client = NULL;
3018 		hdev->roce.client = NULL;
3019 	}
3020 
3021 	/* un-init nic/unic, if this was not called by roce client */
3022 	if (client->ops->uninit_instance && hdev->nic_client &&
3023 	    client->type != HNAE3_CLIENT_ROCE) {
3024 		while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
3025 			msleep(HCLGEVF_WAIT_RESET_DONE);
3026 		clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
3027 
3028 		client->ops->uninit_instance(&hdev->nic, 0);
3029 		hdev->nic_client = NULL;
3030 		hdev->nic.client = NULL;
3031 	}
3032 }
3033 
3034 static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev)
3035 {
3036 #define HCLGEVF_MEM_BAR		4
3037 
3038 	struct pci_dev *pdev = hdev->pdev;
3039 	struct hclgevf_hw *hw = &hdev->hw;
3040 
3041 	/* for device does not have device memory, return directly */
3042 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR)))
3043 		return 0;
3044 
3045 	hw->mem_base = devm_ioremap_wc(&pdev->dev,
3046 				       pci_resource_start(pdev,
3047 							  HCLGEVF_MEM_BAR),
3048 				       pci_resource_len(pdev, HCLGEVF_MEM_BAR));
3049 	if (!hw->mem_base) {
3050 		dev_err(&pdev->dev, "failed to map device memory\n");
3051 		return -EFAULT;
3052 	}
3053 
3054 	return 0;
3055 }
3056 
3057 static int hclgevf_pci_init(struct hclgevf_dev *hdev)
3058 {
3059 	struct pci_dev *pdev = hdev->pdev;
3060 	struct hclgevf_hw *hw;
3061 	int ret;
3062 
3063 	ret = pci_enable_device(pdev);
3064 	if (ret) {
3065 		dev_err(&pdev->dev, "failed to enable PCI device\n");
3066 		return ret;
3067 	}
3068 
3069 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3070 	if (ret) {
3071 		dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
3072 		goto err_disable_device;
3073 	}
3074 
3075 	ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
3076 	if (ret) {
3077 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
3078 		goto err_disable_device;
3079 	}
3080 
3081 	pci_set_master(pdev);
3082 	hw = &hdev->hw;
3083 	hw->hdev = hdev;
3084 	hw->io_base = pci_iomap(pdev, 2, 0);
3085 	if (!hw->io_base) {
3086 		dev_err(&pdev->dev, "can't map configuration register space\n");
3087 		ret = -ENOMEM;
3088 		goto err_clr_master;
3089 	}
3090 
3091 	ret = hclgevf_dev_mem_map(hdev);
3092 	if (ret)
3093 		goto err_unmap_io_base;
3094 
3095 	return 0;
3096 
3097 err_unmap_io_base:
3098 	pci_iounmap(pdev, hdev->hw.io_base);
3099 err_clr_master:
3100 	pci_clear_master(pdev);
3101 	pci_release_regions(pdev);
3102 err_disable_device:
3103 	pci_disable_device(pdev);
3104 
3105 	return ret;
3106 }
3107 
3108 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
3109 {
3110 	struct pci_dev *pdev = hdev->pdev;
3111 
3112 	if (hdev->hw.mem_base)
3113 		devm_iounmap(&pdev->dev, hdev->hw.mem_base);
3114 
3115 	pci_iounmap(pdev, hdev->hw.io_base);
3116 	pci_clear_master(pdev);
3117 	pci_release_regions(pdev);
3118 	pci_disable_device(pdev);
3119 }
3120 
3121 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
3122 {
3123 	struct hclgevf_query_res_cmd *req;
3124 	struct hclge_desc desc;
3125 	int ret;
3126 
3127 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
3128 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
3129 	if (ret) {
3130 		dev_err(&hdev->pdev->dev,
3131 			"query vf resource failed, ret = %d.\n", ret);
3132 		return ret;
3133 	}
3134 
3135 	req = (struct hclgevf_query_res_cmd *)desc.data;
3136 
3137 	if (hnae3_dev_roce_supported(hdev)) {
3138 		hdev->roce_base_msix_offset =
3139 		hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
3140 				HCLGEVF_MSIX_OFT_ROCEE_M,
3141 				HCLGEVF_MSIX_OFT_ROCEE_S);
3142 		hdev->num_roce_msix =
3143 		hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
3144 				HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
3145 
3146 		/* nic's msix numbers is always equals to the roce's. */
3147 		hdev->num_nic_msix = hdev->num_roce_msix;
3148 
3149 		/* VF should have NIC vectors and Roce vectors, NIC vectors
3150 		 * are queued before Roce vectors. The offset is fixed to 64.
3151 		 */
3152 		hdev->num_msi = hdev->num_roce_msix +
3153 				hdev->roce_base_msix_offset;
3154 	} else {
3155 		hdev->num_msi =
3156 		hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
3157 				HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
3158 
3159 		hdev->num_nic_msix = hdev->num_msi;
3160 	}
3161 
3162 	if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) {
3163 		dev_err(&hdev->pdev->dev,
3164 			"Just %u msi resources, not enough for vf(min:2).\n",
3165 			hdev->num_nic_msix);
3166 		return -EINVAL;
3167 	}
3168 
3169 	return 0;
3170 }
3171 
3172 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
3173 {
3174 #define HCLGEVF_MAX_NON_TSO_BD_NUM			8U
3175 
3176 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3177 
3178 	ae_dev->dev_specs.max_non_tso_bd_num =
3179 					HCLGEVF_MAX_NON_TSO_BD_NUM;
3180 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
3181 	ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE;
3182 	ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
3183 	ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME;
3184 }
3185 
3186 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
3187 				    struct hclge_desc *desc)
3188 {
3189 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3190 	struct hclgevf_dev_specs_0_cmd *req0;
3191 	struct hclgevf_dev_specs_1_cmd *req1;
3192 
3193 	req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data;
3194 	req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data;
3195 
3196 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
3197 	ae_dev->dev_specs.rss_ind_tbl_size =
3198 					le16_to_cpu(req0->rss_ind_tbl_size);
3199 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
3200 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
3201 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
3202 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
3203 }
3204 
3205 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
3206 {
3207 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
3208 
3209 	if (!dev_specs->max_non_tso_bd_num)
3210 		dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM;
3211 	if (!dev_specs->rss_ind_tbl_size)
3212 		dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
3213 	if (!dev_specs->rss_key_size)
3214 		dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE;
3215 	if (!dev_specs->max_int_gl)
3216 		dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
3217 	if (!dev_specs->max_frm_size)
3218 		dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME;
3219 }
3220 
3221 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
3222 {
3223 	struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
3224 	int ret;
3225 	int i;
3226 
3227 	/* set default specifications as devices lower than version V3 do not
3228 	 * support querying specifications from firmware.
3229 	 */
3230 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
3231 		hclgevf_set_default_dev_specs(hdev);
3232 		return 0;
3233 	}
3234 
3235 	for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
3236 		hclgevf_cmd_setup_basic_desc(&desc[i],
3237 					     HCLGEVF_OPC_QUERY_DEV_SPECS, true);
3238 		desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT);
3239 	}
3240 	hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS,
3241 				     true);
3242 
3243 	ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM);
3244 	if (ret)
3245 		return ret;
3246 
3247 	hclgevf_parse_dev_specs(hdev, desc);
3248 	hclgevf_check_dev_specs(hdev);
3249 
3250 	return 0;
3251 }
3252 
3253 static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
3254 {
3255 	struct pci_dev *pdev = hdev->pdev;
3256 	int ret = 0;
3257 
3258 	if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
3259 	    test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
3260 		hclgevf_misc_irq_uninit(hdev);
3261 		hclgevf_uninit_msi(hdev);
3262 		clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3263 	}
3264 
3265 	if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
3266 		pci_set_master(pdev);
3267 		ret = hclgevf_init_msi(hdev);
3268 		if (ret) {
3269 			dev_err(&pdev->dev,
3270 				"failed(%d) to init MSI/MSI-X\n", ret);
3271 			return ret;
3272 		}
3273 
3274 		ret = hclgevf_misc_irq_init(hdev);
3275 		if (ret) {
3276 			hclgevf_uninit_msi(hdev);
3277 			dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
3278 				ret);
3279 			return ret;
3280 		}
3281 
3282 		set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3283 	}
3284 
3285 	return ret;
3286 }
3287 
3288 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev)
3289 {
3290 	struct hclge_vf_to_pf_msg send_msg;
3291 
3292 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL,
3293 			       HCLGE_MBX_VPORT_LIST_CLEAR);
3294 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3295 }
3296 
3297 static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev)
3298 {
3299 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
3300 		hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1);
3301 }
3302 
3303 static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev)
3304 {
3305 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
3306 		hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0);
3307 }
3308 
3309 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
3310 {
3311 	struct pci_dev *pdev = hdev->pdev;
3312 	int ret;
3313 
3314 	ret = hclgevf_pci_reset(hdev);
3315 	if (ret) {
3316 		dev_err(&pdev->dev, "pci reset failed %d\n", ret);
3317 		return ret;
3318 	}
3319 
3320 	ret = hclgevf_cmd_init(hdev);
3321 	if (ret) {
3322 		dev_err(&pdev->dev, "cmd failed %d\n", ret);
3323 		return ret;
3324 	}
3325 
3326 	ret = hclgevf_rss_init_hw(hdev);
3327 	if (ret) {
3328 		dev_err(&hdev->pdev->dev,
3329 			"failed(%d) to initialize RSS\n", ret);
3330 		return ret;
3331 	}
3332 
3333 	ret = hclgevf_config_gro(hdev);
3334 	if (ret)
3335 		return ret;
3336 
3337 	ret = hclgevf_init_vlan_config(hdev);
3338 	if (ret) {
3339 		dev_err(&hdev->pdev->dev,
3340 			"failed(%d) to initialize VLAN config\n", ret);
3341 		return ret;
3342 	}
3343 
3344 	set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
3345 
3346 	hclgevf_init_rxd_adv_layout(hdev);
3347 
3348 	dev_info(&hdev->pdev->dev, "Reset done\n");
3349 
3350 	return 0;
3351 }
3352 
3353 static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
3354 {
3355 	struct pci_dev *pdev = hdev->pdev;
3356 	int ret;
3357 
3358 	ret = hclgevf_pci_init(hdev);
3359 	if (ret)
3360 		return ret;
3361 
3362 	ret = hclgevf_devlink_init(hdev);
3363 	if (ret)
3364 		goto err_devlink_init;
3365 
3366 	ret = hclgevf_cmd_queue_init(hdev);
3367 	if (ret)
3368 		goto err_cmd_queue_init;
3369 
3370 	ret = hclgevf_cmd_init(hdev);
3371 	if (ret)
3372 		goto err_cmd_init;
3373 
3374 	/* Get vf resource */
3375 	ret = hclgevf_query_vf_resource(hdev);
3376 	if (ret)
3377 		goto err_cmd_init;
3378 
3379 	ret = hclgevf_query_dev_specs(hdev);
3380 	if (ret) {
3381 		dev_err(&pdev->dev,
3382 			"failed to query dev specifications, ret = %d\n", ret);
3383 		goto err_cmd_init;
3384 	}
3385 
3386 	ret = hclgevf_init_msi(hdev);
3387 	if (ret) {
3388 		dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
3389 		goto err_cmd_init;
3390 	}
3391 
3392 	hclgevf_state_init(hdev);
3393 	hdev->reset_level = HNAE3_VF_FUNC_RESET;
3394 	hdev->reset_type = HNAE3_NONE_RESET;
3395 
3396 	ret = hclgevf_misc_irq_init(hdev);
3397 	if (ret)
3398 		goto err_misc_irq_init;
3399 
3400 	set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3401 
3402 	ret = hclgevf_configure(hdev);
3403 	if (ret) {
3404 		dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
3405 		goto err_config;
3406 	}
3407 
3408 	ret = hclgevf_alloc_tqps(hdev);
3409 	if (ret) {
3410 		dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
3411 		goto err_config;
3412 	}
3413 
3414 	ret = hclgevf_set_handle_info(hdev);
3415 	if (ret)
3416 		goto err_config;
3417 
3418 	ret = hclgevf_config_gro(hdev);
3419 	if (ret)
3420 		goto err_config;
3421 
3422 	/* Initialize RSS for this VF */
3423 	ret = hclgevf_rss_init_cfg(hdev);
3424 	if (ret) {
3425 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
3426 		goto err_config;
3427 	}
3428 
3429 	ret = hclgevf_rss_init_hw(hdev);
3430 	if (ret) {
3431 		dev_err(&hdev->pdev->dev,
3432 			"failed(%d) to initialize RSS\n", ret);
3433 		goto err_config;
3434 	}
3435 
3436 	/* ensure vf tbl list as empty before init*/
3437 	ret = hclgevf_clear_vport_list(hdev);
3438 	if (ret) {
3439 		dev_err(&pdev->dev,
3440 			"failed to clear tbl list configuration, ret = %d.\n",
3441 			ret);
3442 		goto err_config;
3443 	}
3444 
3445 	ret = hclgevf_init_vlan_config(hdev);
3446 	if (ret) {
3447 		dev_err(&hdev->pdev->dev,
3448 			"failed(%d) to initialize VLAN config\n", ret);
3449 		goto err_config;
3450 	}
3451 
3452 	hclgevf_init_rxd_adv_layout(hdev);
3453 
3454 	set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
3455 
3456 	hdev->last_reset_time = jiffies;
3457 	dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
3458 		 HCLGEVF_DRIVER_NAME);
3459 
3460 	hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
3461 
3462 	return 0;
3463 
3464 err_config:
3465 	hclgevf_misc_irq_uninit(hdev);
3466 err_misc_irq_init:
3467 	hclgevf_state_uninit(hdev);
3468 	hclgevf_uninit_msi(hdev);
3469 err_cmd_init:
3470 	hclgevf_cmd_uninit(hdev);
3471 err_cmd_queue_init:
3472 	hclgevf_devlink_uninit(hdev);
3473 err_devlink_init:
3474 	hclgevf_pci_uninit(hdev);
3475 	clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3476 	return ret;
3477 }
3478 
3479 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
3480 {
3481 	struct hclge_vf_to_pf_msg send_msg;
3482 
3483 	hclgevf_state_uninit(hdev);
3484 	hclgevf_uninit_rxd_adv_layout(hdev);
3485 
3486 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0);
3487 	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3488 
3489 	if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
3490 		hclgevf_misc_irq_uninit(hdev);
3491 		hclgevf_uninit_msi(hdev);
3492 	}
3493 
3494 	hclgevf_cmd_uninit(hdev);
3495 	hclgevf_devlink_uninit(hdev);
3496 	hclgevf_pci_uninit(hdev);
3497 	hclgevf_uninit_mac_list(hdev);
3498 }
3499 
3500 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
3501 {
3502 	struct pci_dev *pdev = ae_dev->pdev;
3503 	int ret;
3504 
3505 	ret = hclgevf_alloc_hdev(ae_dev);
3506 	if (ret) {
3507 		dev_err(&pdev->dev, "hclge device allocation failed\n");
3508 		return ret;
3509 	}
3510 
3511 	ret = hclgevf_init_hdev(ae_dev->priv);
3512 	if (ret) {
3513 		dev_err(&pdev->dev, "hclge device initialization failed\n");
3514 		return ret;
3515 	}
3516 
3517 	return 0;
3518 }
3519 
3520 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
3521 {
3522 	struct hclgevf_dev *hdev = ae_dev->priv;
3523 
3524 	hclgevf_uninit_hdev(hdev);
3525 	ae_dev->priv = NULL;
3526 }
3527 
3528 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
3529 {
3530 	struct hnae3_handle *nic = &hdev->nic;
3531 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
3532 
3533 	return min_t(u32, hdev->rss_size_max,
3534 		     hdev->num_tqps / kinfo->tc_info.num_tc);
3535 }
3536 
3537 /**
3538  * hclgevf_get_channels - Get the current channels enabled and max supported.
3539  * @handle: hardware information for network interface
3540  * @ch: ethtool channels structure
3541  *
3542  * We don't support separate tx and rx queues as channels. The other count
3543  * represents how many queues are being used for control. max_combined counts
3544  * how many queue pairs we can support. They may not be mapped 1 to 1 with
3545  * q_vectors since we support a lot more queue pairs than q_vectors.
3546  **/
3547 static void hclgevf_get_channels(struct hnae3_handle *handle,
3548 				 struct ethtool_channels *ch)
3549 {
3550 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3551 
3552 	ch->max_combined = hclgevf_get_max_channels(hdev);
3553 	ch->other_count = 0;
3554 	ch->max_other = 0;
3555 	ch->combined_count = handle->kinfo.rss_size;
3556 }
3557 
3558 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
3559 					  u16 *alloc_tqps, u16 *max_rss_size)
3560 {
3561 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3562 
3563 	*alloc_tqps = hdev->num_tqps;
3564 	*max_rss_size = hdev->rss_size_max;
3565 }
3566 
3567 static void hclgevf_update_rss_size(struct hnae3_handle *handle,
3568 				    u32 new_tqps_num)
3569 {
3570 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3571 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3572 	u16 max_rss_size;
3573 
3574 	kinfo->req_rss_size = new_tqps_num;
3575 
3576 	max_rss_size = min_t(u16, hdev->rss_size_max,
3577 			     hdev->num_tqps / kinfo->tc_info.num_tc);
3578 
3579 	/* Use the user's configuration when it is not larger than
3580 	 * max_rss_size, otherwise, use the maximum specification value.
3581 	 */
3582 	if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
3583 	    kinfo->req_rss_size <= max_rss_size)
3584 		kinfo->rss_size = kinfo->req_rss_size;
3585 	else if (kinfo->rss_size > max_rss_size ||
3586 		 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size))
3587 		kinfo->rss_size = max_rss_size;
3588 
3589 	kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size;
3590 }
3591 
3592 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
3593 				bool rxfh_configured)
3594 {
3595 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3596 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3597 	u16 cur_rss_size = kinfo->rss_size;
3598 	u16 cur_tqps = kinfo->num_tqps;
3599 	u32 *rss_indir;
3600 	unsigned int i;
3601 	int ret;
3602 
3603 	hclgevf_update_rss_size(handle, new_tqps_num);
3604 
3605 	ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size);
3606 	if (ret)
3607 		return ret;
3608 
3609 	/* RSS indirection table has been configured by user */
3610 	if (rxfh_configured)
3611 		goto out;
3612 
3613 	/* Reinitializes the rss indirect table according to the new RSS size */
3614 	rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size,
3615 			    sizeof(u32), GFP_KERNEL);
3616 	if (!rss_indir)
3617 		return -ENOMEM;
3618 
3619 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
3620 		rss_indir[i] = i % kinfo->rss_size;
3621 
3622 	hdev->rss_cfg.rss_size = kinfo->rss_size;
3623 
3624 	ret = hclgevf_set_rss(handle, rss_indir, NULL, 0);
3625 	if (ret)
3626 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
3627 			ret);
3628 
3629 	kfree(rss_indir);
3630 
3631 out:
3632 	if (!ret)
3633 		dev_info(&hdev->pdev->dev,
3634 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
3635 			 cur_rss_size, kinfo->rss_size,
3636 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
3637 
3638 	return ret;
3639 }
3640 
3641 static int hclgevf_get_status(struct hnae3_handle *handle)
3642 {
3643 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3644 
3645 	return hdev->hw.mac.link;
3646 }
3647 
3648 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
3649 					    u8 *auto_neg, u32 *speed,
3650 					    u8 *duplex)
3651 {
3652 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3653 
3654 	if (speed)
3655 		*speed = hdev->hw.mac.speed;
3656 	if (duplex)
3657 		*duplex = hdev->hw.mac.duplex;
3658 	if (auto_neg)
3659 		*auto_neg = AUTONEG_DISABLE;
3660 }
3661 
3662 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
3663 				 u8 duplex)
3664 {
3665 	hdev->hw.mac.speed = speed;
3666 	hdev->hw.mac.duplex = duplex;
3667 }
3668 
3669 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
3670 {
3671 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3672 	bool gro_en_old = hdev->gro_en;
3673 	int ret;
3674 
3675 	hdev->gro_en = enable;
3676 	ret = hclgevf_config_gro(hdev);
3677 	if (ret)
3678 		hdev->gro_en = gro_en_old;
3679 
3680 	return ret;
3681 }
3682 
3683 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type,
3684 				   u8 *module_type)
3685 {
3686 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3687 
3688 	if (media_type)
3689 		*media_type = hdev->hw.mac.media_type;
3690 
3691 	if (module_type)
3692 		*module_type = hdev->hw.mac.module_type;
3693 }
3694 
3695 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
3696 {
3697 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3698 
3699 	return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
3700 }
3701 
3702 static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle)
3703 {
3704 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3705 
3706 	return test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
3707 }
3708 
3709 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
3710 {
3711 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3712 
3713 	return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
3714 }
3715 
3716 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
3717 {
3718 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3719 
3720 	return hdev->rst_stats.hw_rst_done_cnt;
3721 }
3722 
3723 static void hclgevf_get_link_mode(struct hnae3_handle *handle,
3724 				  unsigned long *supported,
3725 				  unsigned long *advertising)
3726 {
3727 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3728 
3729 	*supported = hdev->hw.mac.supported;
3730 	*advertising = hdev->hw.mac.advertising;
3731 }
3732 
3733 #define MAX_SEPARATE_NUM	4
3734 #define SEPARATOR_VALUE		0xFDFCFBFA
3735 #define REG_NUM_PER_LINE	4
3736 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
3737 
3738 static int hclgevf_get_regs_len(struct hnae3_handle *handle)
3739 {
3740 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
3741 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3742 
3743 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
3744 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
3745 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
3746 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
3747 
3748 	return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps +
3749 		tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE;
3750 }
3751 
3752 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
3753 			     void *data)
3754 {
3755 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3756 	int i, j, reg_um, separator_num;
3757 	u32 *reg = data;
3758 
3759 	*version = hdev->fw_version;
3760 
3761 	/* fetching per-VF registers values from VF PCIe register space */
3762 	reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
3763 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3764 	for (i = 0; i < reg_um; i++)
3765 		*reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
3766 	for (i = 0; i < separator_num; i++)
3767 		*reg++ = SEPARATOR_VALUE;
3768 
3769 	reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
3770 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3771 	for (i = 0; i < reg_um; i++)
3772 		*reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
3773 	for (i = 0; i < separator_num; i++)
3774 		*reg++ = SEPARATOR_VALUE;
3775 
3776 	reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
3777 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3778 	for (j = 0; j < hdev->num_tqps; j++) {
3779 		for (i = 0; i < reg_um; i++)
3780 			*reg++ = hclgevf_read_dev(&hdev->hw,
3781 						  ring_reg_addr_list[i] +
3782 						  0x200 * j);
3783 		for (i = 0; i < separator_num; i++)
3784 			*reg++ = SEPARATOR_VALUE;
3785 	}
3786 
3787 	reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
3788 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3789 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
3790 		for (i = 0; i < reg_um; i++)
3791 			*reg++ = hclgevf_read_dev(&hdev->hw,
3792 						  tqp_intr_reg_addr_list[i] +
3793 						  4 * j);
3794 		for (i = 0; i < separator_num; i++)
3795 			*reg++ = SEPARATOR_VALUE;
3796 	}
3797 }
3798 
3799 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
3800 					u8 *port_base_vlan_info, u8 data_size)
3801 {
3802 	struct hnae3_handle *nic = &hdev->nic;
3803 	struct hclge_vf_to_pf_msg send_msg;
3804 	int ret;
3805 
3806 	rtnl_lock();
3807 
3808 	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
3809 	    test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
3810 		dev_warn(&hdev->pdev->dev,
3811 			 "is resetting when updating port based vlan info\n");
3812 		rtnl_unlock();
3813 		return;
3814 	}
3815 
3816 	ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
3817 	if (ret) {
3818 		rtnl_unlock();
3819 		return;
3820 	}
3821 
3822 	/* send msg to PF and wait update port based vlan info */
3823 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
3824 			       HCLGE_MBX_PORT_BASE_VLAN_CFG);
3825 	memcpy(send_msg.data, port_base_vlan_info, data_size);
3826 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3827 	if (!ret) {
3828 		if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
3829 			nic->port_base_vlan_state = state;
3830 		else
3831 			nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
3832 	}
3833 
3834 	hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
3835 	rtnl_unlock();
3836 }
3837 
3838 static const struct hnae3_ae_ops hclgevf_ops = {
3839 	.init_ae_dev = hclgevf_init_ae_dev,
3840 	.uninit_ae_dev = hclgevf_uninit_ae_dev,
3841 	.reset_prepare = hclgevf_reset_prepare_general,
3842 	.reset_done = hclgevf_reset_done,
3843 	.init_client_instance = hclgevf_init_client_instance,
3844 	.uninit_client_instance = hclgevf_uninit_client_instance,
3845 	.start = hclgevf_ae_start,
3846 	.stop = hclgevf_ae_stop,
3847 	.client_start = hclgevf_client_start,
3848 	.client_stop = hclgevf_client_stop,
3849 	.map_ring_to_vector = hclgevf_map_ring_to_vector,
3850 	.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
3851 	.get_vector = hclgevf_get_vector,
3852 	.put_vector = hclgevf_put_vector,
3853 	.reset_queue = hclgevf_reset_tqp,
3854 	.get_mac_addr = hclgevf_get_mac_addr,
3855 	.set_mac_addr = hclgevf_set_mac_addr,
3856 	.add_uc_addr = hclgevf_add_uc_addr,
3857 	.rm_uc_addr = hclgevf_rm_uc_addr,
3858 	.add_mc_addr = hclgevf_add_mc_addr,
3859 	.rm_mc_addr = hclgevf_rm_mc_addr,
3860 	.get_stats = hclgevf_get_stats,
3861 	.update_stats = hclgevf_update_stats,
3862 	.get_strings = hclgevf_get_strings,
3863 	.get_sset_count = hclgevf_get_sset_count,
3864 	.get_rss_key_size = hclgevf_get_rss_key_size,
3865 	.get_rss = hclgevf_get_rss,
3866 	.set_rss = hclgevf_set_rss,
3867 	.get_rss_tuple = hclgevf_get_rss_tuple,
3868 	.set_rss_tuple = hclgevf_set_rss_tuple,
3869 	.get_tc_size = hclgevf_get_tc_size,
3870 	.get_fw_version = hclgevf_get_fw_version,
3871 	.set_vlan_filter = hclgevf_set_vlan_filter,
3872 	.enable_vlan_filter = hclgevf_enable_vlan_filter,
3873 	.enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
3874 	.reset_event = hclgevf_reset_event,
3875 	.set_default_reset_request = hclgevf_set_def_reset_request,
3876 	.set_channels = hclgevf_set_channels,
3877 	.get_channels = hclgevf_get_channels,
3878 	.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
3879 	.get_regs_len = hclgevf_get_regs_len,
3880 	.get_regs = hclgevf_get_regs,
3881 	.get_status = hclgevf_get_status,
3882 	.get_ksettings_an_result = hclgevf_get_ksettings_an_result,
3883 	.get_media_type = hclgevf_get_media_type,
3884 	.get_hw_reset_stat = hclgevf_get_hw_reset_stat,
3885 	.ae_dev_resetting = hclgevf_ae_dev_resetting,
3886 	.ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
3887 	.set_gro_en = hclgevf_gro_en,
3888 	.set_mtu = hclgevf_set_mtu,
3889 	.get_global_queue_id = hclgevf_get_qid_global,
3890 	.set_timer_task = hclgevf_set_timer_task,
3891 	.get_link_mode = hclgevf_get_link_mode,
3892 	.set_promisc_mode = hclgevf_set_promisc_mode,
3893 	.request_update_promisc_mode = hclgevf_request_update_promisc_mode,
3894 	.get_cmdq_stat = hclgevf_get_cmdq_stat,
3895 };
3896 
3897 static struct hnae3_ae_algo ae_algovf = {
3898 	.ops = &hclgevf_ops,
3899 	.pdev_id_table = ae_algovf_pci_tbl,
3900 };
3901 
3902 static int hclgevf_init(void)
3903 {
3904 	pr_info("%s is initializing\n", HCLGEVF_NAME);
3905 
3906 	hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME);
3907 	if (!hclgevf_wq) {
3908 		pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
3909 		return -ENOMEM;
3910 	}
3911 
3912 	hnae3_register_ae_algo(&ae_algovf);
3913 
3914 	return 0;
3915 }
3916 
3917 static void hclgevf_exit(void)
3918 {
3919 	hnae3_unregister_ae_algo(&ae_algovf);
3920 	destroy_workqueue(hclgevf_wq);
3921 }
3922 module_init(hclgevf_init);
3923 module_exit(hclgevf_exit);
3924 
3925 MODULE_LICENSE("GPL");
3926 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3927 MODULE_DESCRIPTION("HCLGEVF Driver");
3928 MODULE_VERSION(HCLGEVF_MOD_VERSION);
3929