1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/etherdevice.h>
5 #include <linux/iopoll.h>
6 #include <net/rtnetlink.h>
7 #include "hclgevf_cmd.h"
8 #include "hclgevf_main.h"
9 #include "hclge_mbx.h"
10 #include "hnae3.h"
11 
12 #define HCLGEVF_NAME	"hclgevf"
13 
14 #define HCLGEVF_RESET_MAX_FAIL_CNT	5
15 
16 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
17 static struct hnae3_ae_algo ae_algovf;
18 
19 static struct workqueue_struct *hclgevf_wq;
20 
21 static const struct pci_device_id ae_algovf_pci_tbl[] = {
22 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
23 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
24 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
25 	/* required last entry */
26 	{0, }
27 };
28 
29 static const u8 hclgevf_hash_key[] = {
30 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
31 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
32 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
33 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
34 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
35 };
36 
37 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
38 
39 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG,
40 					 HCLGEVF_CMDQ_TX_ADDR_H_REG,
41 					 HCLGEVF_CMDQ_TX_DEPTH_REG,
42 					 HCLGEVF_CMDQ_TX_TAIL_REG,
43 					 HCLGEVF_CMDQ_TX_HEAD_REG,
44 					 HCLGEVF_CMDQ_RX_ADDR_L_REG,
45 					 HCLGEVF_CMDQ_RX_ADDR_H_REG,
46 					 HCLGEVF_CMDQ_RX_DEPTH_REG,
47 					 HCLGEVF_CMDQ_RX_TAIL_REG,
48 					 HCLGEVF_CMDQ_RX_HEAD_REG,
49 					 HCLGEVF_VECTOR0_CMDQ_SRC_REG,
50 					 HCLGEVF_VECTOR0_CMDQ_STATE_REG,
51 					 HCLGEVF_CMDQ_INTR_EN_REG,
52 					 HCLGEVF_CMDQ_INTR_GEN_REG};
53 
54 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
55 					   HCLGEVF_RST_ING,
56 					   HCLGEVF_GRO_EN_REG};
57 
58 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
59 					 HCLGEVF_RING_RX_ADDR_H_REG,
60 					 HCLGEVF_RING_RX_BD_NUM_REG,
61 					 HCLGEVF_RING_RX_BD_LENGTH_REG,
62 					 HCLGEVF_RING_RX_MERGE_EN_REG,
63 					 HCLGEVF_RING_RX_TAIL_REG,
64 					 HCLGEVF_RING_RX_HEAD_REG,
65 					 HCLGEVF_RING_RX_FBD_NUM_REG,
66 					 HCLGEVF_RING_RX_OFFSET_REG,
67 					 HCLGEVF_RING_RX_FBD_OFFSET_REG,
68 					 HCLGEVF_RING_RX_STASH_REG,
69 					 HCLGEVF_RING_RX_BD_ERR_REG,
70 					 HCLGEVF_RING_TX_ADDR_L_REG,
71 					 HCLGEVF_RING_TX_ADDR_H_REG,
72 					 HCLGEVF_RING_TX_BD_NUM_REG,
73 					 HCLGEVF_RING_TX_PRIORITY_REG,
74 					 HCLGEVF_RING_TX_TC_REG,
75 					 HCLGEVF_RING_TX_MERGE_EN_REG,
76 					 HCLGEVF_RING_TX_TAIL_REG,
77 					 HCLGEVF_RING_TX_HEAD_REG,
78 					 HCLGEVF_RING_TX_FBD_NUM_REG,
79 					 HCLGEVF_RING_TX_OFFSET_REG,
80 					 HCLGEVF_RING_TX_EBD_NUM_REG,
81 					 HCLGEVF_RING_TX_EBD_OFFSET_REG,
82 					 HCLGEVF_RING_TX_BD_ERR_REG,
83 					 HCLGEVF_RING_EN_REG};
84 
85 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
86 					     HCLGEVF_TQP_INTR_GL0_REG,
87 					     HCLGEVF_TQP_INTR_GL1_REG,
88 					     HCLGEVF_TQP_INTR_GL2_REG,
89 					     HCLGEVF_TQP_INTR_RL_REG};
90 
91 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
92 {
93 	if (!handle->client)
94 		return container_of(handle, struct hclgevf_dev, nic);
95 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
96 		return container_of(handle, struct hclgevf_dev, roce);
97 	else
98 		return container_of(handle, struct hclgevf_dev, nic);
99 }
100 
101 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
102 {
103 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
104 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
105 	struct hclgevf_desc desc;
106 	struct hclgevf_tqp *tqp;
107 	int status;
108 	int i;
109 
110 	for (i = 0; i < kinfo->num_tqps; i++) {
111 		tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
112 		hclgevf_cmd_setup_basic_desc(&desc,
113 					     HCLGEVF_OPC_QUERY_RX_STATUS,
114 					     true);
115 
116 		desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
117 		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
118 		if (status) {
119 			dev_err(&hdev->pdev->dev,
120 				"Query tqp stat fail, status = %d,queue = %d\n",
121 				status,	i);
122 			return status;
123 		}
124 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
125 			le32_to_cpu(desc.data[1]);
126 
127 		hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
128 					     true);
129 
130 		desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
131 		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
132 		if (status) {
133 			dev_err(&hdev->pdev->dev,
134 				"Query tqp stat fail, status = %d,queue = %d\n",
135 				status, i);
136 			return status;
137 		}
138 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
139 			le32_to_cpu(desc.data[1]);
140 	}
141 
142 	return 0;
143 }
144 
145 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
146 {
147 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
148 	struct hclgevf_tqp *tqp;
149 	u64 *buff = data;
150 	int i;
151 
152 	for (i = 0; i < kinfo->num_tqps; i++) {
153 		tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
154 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
155 	}
156 	for (i = 0; i < kinfo->num_tqps; i++) {
157 		tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
158 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
159 	}
160 
161 	return buff;
162 }
163 
164 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
165 {
166 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
167 
168 	return kinfo->num_tqps * 2;
169 }
170 
171 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
172 {
173 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
174 	u8 *buff = data;
175 	int i;
176 
177 	for (i = 0; i < kinfo->num_tqps; i++) {
178 		struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
179 						       struct hclgevf_tqp, q);
180 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
181 			 tqp->index);
182 		buff += ETH_GSTRING_LEN;
183 	}
184 
185 	for (i = 0; i < kinfo->num_tqps; i++) {
186 		struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
187 						       struct hclgevf_tqp, q);
188 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
189 			 tqp->index);
190 		buff += ETH_GSTRING_LEN;
191 	}
192 
193 	return buff;
194 }
195 
196 static void hclgevf_update_stats(struct hnae3_handle *handle,
197 				 struct net_device_stats *net_stats)
198 {
199 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
200 	int status;
201 
202 	status = hclgevf_tqps_update_stats(handle);
203 	if (status)
204 		dev_err(&hdev->pdev->dev,
205 			"VF update of TQPS stats fail, status = %d.\n",
206 			status);
207 }
208 
209 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
210 {
211 	if (strset == ETH_SS_TEST)
212 		return -EOPNOTSUPP;
213 	else if (strset == ETH_SS_STATS)
214 		return hclgevf_tqps_get_sset_count(handle, strset);
215 
216 	return 0;
217 }
218 
219 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
220 				u8 *data)
221 {
222 	u8 *p = (char *)data;
223 
224 	if (strset == ETH_SS_STATS)
225 		p = hclgevf_tqps_get_strings(handle, p);
226 }
227 
228 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
229 {
230 	hclgevf_tqps_get_stats(handle, data);
231 }
232 
233 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
234 				   u8 subcode)
235 {
236 	if (msg) {
237 		memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg));
238 		msg->code = code;
239 		msg->subcode = subcode;
240 	}
241 }
242 
243 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
244 {
245 	struct hclge_vf_to_pf_msg send_msg;
246 	u8 resp_msg;
247 	int status;
248 
249 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0);
250 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
251 				      sizeof(resp_msg));
252 	if (status) {
253 		dev_err(&hdev->pdev->dev,
254 			"VF request to get TC info from PF failed %d",
255 			status);
256 		return status;
257 	}
258 
259 	hdev->hw_tc_map = resp_msg;
260 
261 	return 0;
262 }
263 
264 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
265 {
266 	struct hnae3_handle *nic = &hdev->nic;
267 	struct hclge_vf_to_pf_msg send_msg;
268 	u8 resp_msg;
269 	int ret;
270 
271 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
272 			       HCLGE_MBX_GET_PORT_BASE_VLAN_STATE);
273 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
274 				   sizeof(u8));
275 	if (ret) {
276 		dev_err(&hdev->pdev->dev,
277 			"VF request to get port based vlan state failed %d",
278 			ret);
279 		return ret;
280 	}
281 
282 	nic->port_base_vlan_state = resp_msg;
283 
284 	return 0;
285 }
286 
287 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
288 {
289 #define HCLGEVF_TQPS_RSS_INFO_LEN	6
290 #define HCLGEVF_TQPS_ALLOC_OFFSET	0
291 #define HCLGEVF_TQPS_RSS_SIZE_OFFSET	2
292 #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET	4
293 
294 	u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
295 	struct hclge_vf_to_pf_msg send_msg;
296 	int status;
297 
298 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0);
299 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
300 				      HCLGEVF_TQPS_RSS_INFO_LEN);
301 	if (status) {
302 		dev_err(&hdev->pdev->dev,
303 			"VF request to get tqp info from PF failed %d",
304 			status);
305 		return status;
306 	}
307 
308 	memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET],
309 	       sizeof(u16));
310 	memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET],
311 	       sizeof(u16));
312 	memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET],
313 	       sizeof(u16));
314 
315 	return 0;
316 }
317 
318 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
319 {
320 #define HCLGEVF_TQPS_DEPTH_INFO_LEN	4
321 #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET	0
322 #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET	2
323 
324 	u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN];
325 	struct hclge_vf_to_pf_msg send_msg;
326 	int ret;
327 
328 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0);
329 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
330 				   HCLGEVF_TQPS_DEPTH_INFO_LEN);
331 	if (ret) {
332 		dev_err(&hdev->pdev->dev,
333 			"VF request to get tqp depth info from PF failed %d",
334 			ret);
335 		return ret;
336 	}
337 
338 	memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET],
339 	       sizeof(u16));
340 	memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET],
341 	       sizeof(u16));
342 
343 	return 0;
344 }
345 
346 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
347 {
348 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
349 	struct hclge_vf_to_pf_msg send_msg;
350 	u16 qid_in_pf = 0;
351 	u8 resp_data[2];
352 	int ret;
353 
354 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0);
355 	memcpy(send_msg.data, &queue_id, sizeof(queue_id));
356 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data,
357 				   sizeof(resp_data));
358 	if (!ret)
359 		qid_in_pf = *(u16 *)resp_data;
360 
361 	return qid_in_pf;
362 }
363 
364 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
365 {
366 	struct hclge_vf_to_pf_msg send_msg;
367 	u8 resp_msg[2];
368 	int ret;
369 
370 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0);
371 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
372 				   sizeof(resp_msg));
373 	if (ret) {
374 		dev_err(&hdev->pdev->dev,
375 			"VF request to get the pf port media type failed %d",
376 			ret);
377 		return ret;
378 	}
379 
380 	hdev->hw.mac.media_type = resp_msg[0];
381 	hdev->hw.mac.module_type = resp_msg[1];
382 
383 	return 0;
384 }
385 
386 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
387 {
388 	struct hclgevf_tqp *tqp;
389 	int i;
390 
391 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
392 				  sizeof(struct hclgevf_tqp), GFP_KERNEL);
393 	if (!hdev->htqp)
394 		return -ENOMEM;
395 
396 	tqp = hdev->htqp;
397 
398 	for (i = 0; i < hdev->num_tqps; i++) {
399 		tqp->dev = &hdev->pdev->dev;
400 		tqp->index = i;
401 
402 		tqp->q.ae_algo = &ae_algovf;
403 		tqp->q.buf_size = hdev->rx_buf_len;
404 		tqp->q.tx_desc_num = hdev->num_tx_desc;
405 		tqp->q.rx_desc_num = hdev->num_rx_desc;
406 		tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
407 			i * HCLGEVF_TQP_REG_SIZE;
408 
409 		tqp++;
410 	}
411 
412 	return 0;
413 }
414 
415 static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
416 {
417 	struct hnae3_handle *nic = &hdev->nic;
418 	struct hnae3_knic_private_info *kinfo;
419 	u16 new_tqps = hdev->num_tqps;
420 	unsigned int i;
421 
422 	kinfo = &nic->kinfo;
423 	kinfo->num_tc = 0;
424 	kinfo->num_tx_desc = hdev->num_tx_desc;
425 	kinfo->num_rx_desc = hdev->num_rx_desc;
426 	kinfo->rx_buf_len = hdev->rx_buf_len;
427 	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
428 		if (hdev->hw_tc_map & BIT(i))
429 			kinfo->num_tc++;
430 
431 	kinfo->rss_size
432 		= min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
433 	new_tqps = kinfo->rss_size * kinfo->num_tc;
434 	kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
435 
436 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
437 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
438 	if (!kinfo->tqp)
439 		return -ENOMEM;
440 
441 	for (i = 0; i < kinfo->num_tqps; i++) {
442 		hdev->htqp[i].q.handle = &hdev->nic;
443 		hdev->htqp[i].q.tqp_index = i;
444 		kinfo->tqp[i] = &hdev->htqp[i].q;
445 	}
446 
447 	/* after init the max rss_size and tqps, adjust the default tqp numbers
448 	 * and rss size with the actual vector numbers
449 	 */
450 	kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
451 	kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc,
452 				kinfo->rss_size);
453 
454 	return 0;
455 }
456 
457 static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
458 {
459 	struct hclge_vf_to_pf_msg send_msg;
460 	int status;
461 
462 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0);
463 	status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
464 	if (status)
465 		dev_err(&hdev->pdev->dev,
466 			"VF failed to fetch link status(%d) from PF", status);
467 }
468 
469 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
470 {
471 	struct hnae3_handle *rhandle = &hdev->roce;
472 	struct hnae3_handle *handle = &hdev->nic;
473 	struct hnae3_client *rclient;
474 	struct hnae3_client *client;
475 
476 	if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state))
477 		return;
478 
479 	client = handle->client;
480 	rclient = hdev->roce_client;
481 
482 	link_state =
483 		test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
484 
485 	if (link_state != hdev->hw.mac.link) {
486 		client->ops->link_status_change(handle, !!link_state);
487 		if (rclient && rclient->ops->link_status_change)
488 			rclient->ops->link_status_change(rhandle, !!link_state);
489 		hdev->hw.mac.link = link_state;
490 	}
491 
492 	clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
493 }
494 
495 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
496 {
497 #define HCLGEVF_ADVERTISING	0
498 #define HCLGEVF_SUPPORTED	1
499 
500 	struct hclge_vf_to_pf_msg send_msg;
501 
502 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0);
503 	send_msg.data[0] = HCLGEVF_ADVERTISING;
504 	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
505 	send_msg.data[0] = HCLGEVF_SUPPORTED;
506 	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
507 }
508 
509 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
510 {
511 	struct hnae3_handle *nic = &hdev->nic;
512 	int ret;
513 
514 	nic->ae_algo = &ae_algovf;
515 	nic->pdev = hdev->pdev;
516 	nic->numa_node_mask = hdev->numa_node_mask;
517 	nic->flags |= HNAE3_SUPPORT_VF;
518 
519 	ret = hclgevf_knic_setup(hdev);
520 	if (ret)
521 		dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
522 			ret);
523 	return ret;
524 }
525 
526 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
527 {
528 	if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
529 		dev_warn(&hdev->pdev->dev,
530 			 "vector(vector_id %d) has been freed.\n", vector_id);
531 		return;
532 	}
533 
534 	hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
535 	hdev->num_msi_left += 1;
536 	hdev->num_msi_used -= 1;
537 }
538 
539 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
540 			      struct hnae3_vector_info *vector_info)
541 {
542 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
543 	struct hnae3_vector_info *vector = vector_info;
544 	int alloc = 0;
545 	int i, j;
546 
547 	vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num);
548 	vector_num = min(hdev->num_msi_left, vector_num);
549 
550 	for (j = 0; j < vector_num; j++) {
551 		for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
552 			if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
553 				vector->vector = pci_irq_vector(hdev->pdev, i);
554 				vector->io_addr = hdev->hw.io_base +
555 					HCLGEVF_VECTOR_REG_BASE +
556 					(i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
557 				hdev->vector_status[i] = 0;
558 				hdev->vector_irq[i] = vector->vector;
559 
560 				vector++;
561 				alloc++;
562 
563 				break;
564 			}
565 		}
566 	}
567 	hdev->num_msi_left -= alloc;
568 	hdev->num_msi_used += alloc;
569 
570 	return alloc;
571 }
572 
573 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
574 {
575 	int i;
576 
577 	for (i = 0; i < hdev->num_msi; i++)
578 		if (vector == hdev->vector_irq[i])
579 			return i;
580 
581 	return -EINVAL;
582 }
583 
584 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
585 				    const u8 hfunc, const u8 *key)
586 {
587 	struct hclgevf_rss_config_cmd *req;
588 	unsigned int key_offset = 0;
589 	struct hclgevf_desc desc;
590 	int key_counts;
591 	int key_size;
592 	int ret;
593 
594 	key_counts = HCLGEVF_RSS_KEY_SIZE;
595 	req = (struct hclgevf_rss_config_cmd *)desc.data;
596 
597 	while (key_counts) {
598 		hclgevf_cmd_setup_basic_desc(&desc,
599 					     HCLGEVF_OPC_RSS_GENERIC_CONFIG,
600 					     false);
601 
602 		req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK);
603 		req->hash_config |=
604 			(key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
605 
606 		key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts);
607 		memcpy(req->hash_key,
608 		       key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
609 
610 		key_counts -= key_size;
611 		key_offset++;
612 		ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
613 		if (ret) {
614 			dev_err(&hdev->pdev->dev,
615 				"Configure RSS config fail, status = %d\n",
616 				ret);
617 			return ret;
618 		}
619 	}
620 
621 	return 0;
622 }
623 
624 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
625 {
626 	return HCLGEVF_RSS_KEY_SIZE;
627 }
628 
629 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
630 {
631 	return HCLGEVF_RSS_IND_TBL_SIZE;
632 }
633 
634 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
635 {
636 	const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
637 	struct hclgevf_rss_indirection_table_cmd *req;
638 	struct hclgevf_desc desc;
639 	int status;
640 	int i, j;
641 
642 	req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
643 
644 	for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
645 		hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
646 					     false);
647 		req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
648 		req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
649 		for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
650 			req->rss_result[j] =
651 				indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
652 
653 		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
654 		if (status) {
655 			dev_err(&hdev->pdev->dev,
656 				"VF failed(=%d) to set RSS indirection table\n",
657 				status);
658 			return status;
659 		}
660 	}
661 
662 	return 0;
663 }
664 
665 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev,  u16 rss_size)
666 {
667 	struct hclgevf_rss_tc_mode_cmd *req;
668 	u16 tc_offset[HCLGEVF_MAX_TC_NUM];
669 	u16 tc_valid[HCLGEVF_MAX_TC_NUM];
670 	u16 tc_size[HCLGEVF_MAX_TC_NUM];
671 	struct hclgevf_desc desc;
672 	u16 roundup_size;
673 	unsigned int i;
674 	int status;
675 
676 	req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
677 
678 	roundup_size = roundup_pow_of_two(rss_size);
679 	roundup_size = ilog2(roundup_size);
680 
681 	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
682 		tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
683 		tc_size[i] = roundup_size;
684 		tc_offset[i] = rss_size * i;
685 	}
686 
687 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
688 	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
689 		hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
690 			      (tc_valid[i] & 0x1));
691 		hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
692 				HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
693 		hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
694 				HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
695 	}
696 	status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
697 	if (status)
698 		dev_err(&hdev->pdev->dev,
699 			"VF failed(=%d) to set rss tc mode\n", status);
700 
701 	return status;
702 }
703 
704 /* for revision 0x20, vf shared the same rss config with pf */
705 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
706 {
707 #define HCLGEVF_RSS_MBX_RESP_LEN	8
708 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
709 	u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
710 	struct hclge_vf_to_pf_msg send_msg;
711 	u16 msg_num, hash_key_index;
712 	u8 index;
713 	int ret;
714 
715 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0);
716 	msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
717 			HCLGEVF_RSS_MBX_RESP_LEN;
718 	for (index = 0; index < msg_num; index++) {
719 		send_msg.data[0] = index;
720 		ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
721 					   HCLGEVF_RSS_MBX_RESP_LEN);
722 		if (ret) {
723 			dev_err(&hdev->pdev->dev,
724 				"VF get rss hash key from PF failed, ret=%d",
725 				ret);
726 			return ret;
727 		}
728 
729 		hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index;
730 		if (index == msg_num - 1)
731 			memcpy(&rss_cfg->rss_hash_key[hash_key_index],
732 			       &resp_msg[0],
733 			       HCLGEVF_RSS_KEY_SIZE - hash_key_index);
734 		else
735 			memcpy(&rss_cfg->rss_hash_key[hash_key_index],
736 			       &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN);
737 	}
738 
739 	return 0;
740 }
741 
742 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
743 			   u8 *hfunc)
744 {
745 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
746 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
747 	int i, ret;
748 
749 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
750 		/* Get hash algorithm */
751 		if (hfunc) {
752 			switch (rss_cfg->hash_algo) {
753 			case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ:
754 				*hfunc = ETH_RSS_HASH_TOP;
755 				break;
756 			case HCLGEVF_RSS_HASH_ALGO_SIMPLE:
757 				*hfunc = ETH_RSS_HASH_XOR;
758 				break;
759 			default:
760 				*hfunc = ETH_RSS_HASH_UNKNOWN;
761 				break;
762 			}
763 		}
764 
765 		/* Get the RSS Key required by the user */
766 		if (key)
767 			memcpy(key, rss_cfg->rss_hash_key,
768 			       HCLGEVF_RSS_KEY_SIZE);
769 	} else {
770 		if (hfunc)
771 			*hfunc = ETH_RSS_HASH_TOP;
772 		if (key) {
773 			ret = hclgevf_get_rss_hash_key(hdev);
774 			if (ret)
775 				return ret;
776 			memcpy(key, rss_cfg->rss_hash_key,
777 			       HCLGEVF_RSS_KEY_SIZE);
778 		}
779 	}
780 
781 	if (indir)
782 		for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
783 			indir[i] = rss_cfg->rss_indirection_tbl[i];
784 
785 	return 0;
786 }
787 
788 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
789 			   const u8 *key, const u8 hfunc)
790 {
791 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
792 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
793 	int ret, i;
794 
795 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
796 		/* Set the RSS Hash Key if specififed by the user */
797 		if (key) {
798 			switch (hfunc) {
799 			case ETH_RSS_HASH_TOP:
800 				rss_cfg->hash_algo =
801 					HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
802 				break;
803 			case ETH_RSS_HASH_XOR:
804 				rss_cfg->hash_algo =
805 					HCLGEVF_RSS_HASH_ALGO_SIMPLE;
806 				break;
807 			case ETH_RSS_HASH_NO_CHANGE:
808 				break;
809 			default:
810 				return -EINVAL;
811 			}
812 
813 			ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
814 						       key);
815 			if (ret)
816 				return ret;
817 
818 			/* Update the shadow RSS key with user specified qids */
819 			memcpy(rss_cfg->rss_hash_key, key,
820 			       HCLGEVF_RSS_KEY_SIZE);
821 		}
822 	}
823 
824 	/* update the shadow RSS table with user specified qids */
825 	for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
826 		rss_cfg->rss_indirection_tbl[i] = indir[i];
827 
828 	/* update the hardware */
829 	return hclgevf_set_rss_indir_table(hdev);
830 }
831 
832 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
833 {
834 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0;
835 
836 	if (nfc->data & RXH_L4_B_2_3)
837 		hash_sets |= HCLGEVF_D_PORT_BIT;
838 	else
839 		hash_sets &= ~HCLGEVF_D_PORT_BIT;
840 
841 	if (nfc->data & RXH_IP_SRC)
842 		hash_sets |= HCLGEVF_S_IP_BIT;
843 	else
844 		hash_sets &= ~HCLGEVF_S_IP_BIT;
845 
846 	if (nfc->data & RXH_IP_DST)
847 		hash_sets |= HCLGEVF_D_IP_BIT;
848 	else
849 		hash_sets &= ~HCLGEVF_D_IP_BIT;
850 
851 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
852 		hash_sets |= HCLGEVF_V_TAG_BIT;
853 
854 	return hash_sets;
855 }
856 
857 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
858 				 struct ethtool_rxnfc *nfc)
859 {
860 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
861 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
862 	struct hclgevf_rss_input_tuple_cmd *req;
863 	struct hclgevf_desc desc;
864 	u8 tuple_sets;
865 	int ret;
866 
867 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
868 		return -EOPNOTSUPP;
869 
870 	if (nfc->data &
871 	    ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
872 		return -EINVAL;
873 
874 	req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
875 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
876 
877 	req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
878 	req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
879 	req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
880 	req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
881 	req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
882 	req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
883 	req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
884 	req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
885 
886 	tuple_sets = hclgevf_get_rss_hash_bits(nfc);
887 	switch (nfc->flow_type) {
888 	case TCP_V4_FLOW:
889 		req->ipv4_tcp_en = tuple_sets;
890 		break;
891 	case TCP_V6_FLOW:
892 		req->ipv6_tcp_en = tuple_sets;
893 		break;
894 	case UDP_V4_FLOW:
895 		req->ipv4_udp_en = tuple_sets;
896 		break;
897 	case UDP_V6_FLOW:
898 		req->ipv6_udp_en = tuple_sets;
899 		break;
900 	case SCTP_V4_FLOW:
901 		req->ipv4_sctp_en = tuple_sets;
902 		break;
903 	case SCTP_V6_FLOW:
904 		if ((nfc->data & RXH_L4_B_0_1) ||
905 		    (nfc->data & RXH_L4_B_2_3))
906 			return -EINVAL;
907 
908 		req->ipv6_sctp_en = tuple_sets;
909 		break;
910 	case IPV4_FLOW:
911 		req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
912 		break;
913 	case IPV6_FLOW:
914 		req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
915 		break;
916 	default:
917 		return -EINVAL;
918 	}
919 
920 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
921 	if (ret) {
922 		dev_err(&hdev->pdev->dev,
923 			"Set rss tuple fail, status = %d\n", ret);
924 		return ret;
925 	}
926 
927 	rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
928 	rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
929 	rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
930 	rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
931 	rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
932 	rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
933 	rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
934 	rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
935 	return 0;
936 }
937 
938 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
939 				 struct ethtool_rxnfc *nfc)
940 {
941 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
942 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
943 	u8 tuple_sets;
944 
945 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
946 		return -EOPNOTSUPP;
947 
948 	nfc->data = 0;
949 
950 	switch (nfc->flow_type) {
951 	case TCP_V4_FLOW:
952 		tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
953 		break;
954 	case UDP_V4_FLOW:
955 		tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
956 		break;
957 	case TCP_V6_FLOW:
958 		tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
959 		break;
960 	case UDP_V6_FLOW:
961 		tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
962 		break;
963 	case SCTP_V4_FLOW:
964 		tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
965 		break;
966 	case SCTP_V6_FLOW:
967 		tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
968 		break;
969 	case IPV4_FLOW:
970 	case IPV6_FLOW:
971 		tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
972 		break;
973 	default:
974 		return -EINVAL;
975 	}
976 
977 	if (!tuple_sets)
978 		return 0;
979 
980 	if (tuple_sets & HCLGEVF_D_PORT_BIT)
981 		nfc->data |= RXH_L4_B_2_3;
982 	if (tuple_sets & HCLGEVF_S_PORT_BIT)
983 		nfc->data |= RXH_L4_B_0_1;
984 	if (tuple_sets & HCLGEVF_D_IP_BIT)
985 		nfc->data |= RXH_IP_DST;
986 	if (tuple_sets & HCLGEVF_S_IP_BIT)
987 		nfc->data |= RXH_IP_SRC;
988 
989 	return 0;
990 }
991 
992 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev,
993 				       struct hclgevf_rss_cfg *rss_cfg)
994 {
995 	struct hclgevf_rss_input_tuple_cmd *req;
996 	struct hclgevf_desc desc;
997 	int ret;
998 
999 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
1000 
1001 	req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
1002 
1003 	req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
1004 	req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
1005 	req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
1006 	req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
1007 	req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
1008 	req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
1009 	req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
1010 	req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
1011 
1012 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1013 	if (ret)
1014 		dev_err(&hdev->pdev->dev,
1015 			"Configure rss input fail, status = %d\n", ret);
1016 	return ret;
1017 }
1018 
1019 static int hclgevf_get_tc_size(struct hnae3_handle *handle)
1020 {
1021 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1022 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
1023 
1024 	return rss_cfg->rss_size;
1025 }
1026 
1027 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
1028 				       int vector_id,
1029 				       struct hnae3_ring_chain_node *ring_chain)
1030 {
1031 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1032 	struct hclge_vf_to_pf_msg send_msg;
1033 	struct hnae3_ring_chain_node *node;
1034 	int status;
1035 	int i = 0;
1036 
1037 	memset(&send_msg, 0, sizeof(send_msg));
1038 	send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR :
1039 		HCLGE_MBX_UNMAP_RING_TO_VECTOR;
1040 	send_msg.vector_id = vector_id;
1041 
1042 	for (node = ring_chain; node; node = node->next) {
1043 		send_msg.param[i].ring_type =
1044 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
1045 
1046 		send_msg.param[i].tqp_index = node->tqp_index;
1047 		send_msg.param[i].int_gl_index =
1048 					hnae3_get_field(node->int_gl_idx,
1049 							HNAE3_RING_GL_IDX_M,
1050 							HNAE3_RING_GL_IDX_S);
1051 
1052 		i++;
1053 		if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) {
1054 			send_msg.ring_num = i;
1055 
1056 			status = hclgevf_send_mbx_msg(hdev, &send_msg, false,
1057 						      NULL, 0);
1058 			if (status) {
1059 				dev_err(&hdev->pdev->dev,
1060 					"Map TQP fail, status is %d.\n",
1061 					status);
1062 				return status;
1063 			}
1064 			i = 0;
1065 		}
1066 	}
1067 
1068 	return 0;
1069 }
1070 
1071 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
1072 				      struct hnae3_ring_chain_node *ring_chain)
1073 {
1074 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1075 	int vector_id;
1076 
1077 	vector_id = hclgevf_get_vector_index(hdev, vector);
1078 	if (vector_id < 0) {
1079 		dev_err(&handle->pdev->dev,
1080 			"Get vector index fail. ret =%d\n", vector_id);
1081 		return vector_id;
1082 	}
1083 
1084 	return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
1085 }
1086 
1087 static int hclgevf_unmap_ring_from_vector(
1088 				struct hnae3_handle *handle,
1089 				int vector,
1090 				struct hnae3_ring_chain_node *ring_chain)
1091 {
1092 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1093 	int ret, vector_id;
1094 
1095 	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
1096 		return 0;
1097 
1098 	vector_id = hclgevf_get_vector_index(hdev, vector);
1099 	if (vector_id < 0) {
1100 		dev_err(&handle->pdev->dev,
1101 			"Get vector index fail. ret =%d\n", vector_id);
1102 		return vector_id;
1103 	}
1104 
1105 	ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
1106 	if (ret)
1107 		dev_err(&handle->pdev->dev,
1108 			"Unmap ring from vector fail. vector=%d, ret =%d\n",
1109 			vector_id,
1110 			ret);
1111 
1112 	return ret;
1113 }
1114 
1115 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
1116 {
1117 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1118 	int vector_id;
1119 
1120 	vector_id = hclgevf_get_vector_index(hdev, vector);
1121 	if (vector_id < 0) {
1122 		dev_err(&handle->pdev->dev,
1123 			"hclgevf_put_vector get vector index fail. ret =%d\n",
1124 			vector_id);
1125 		return vector_id;
1126 	}
1127 
1128 	hclgevf_free_vector(hdev, vector_id);
1129 
1130 	return 0;
1131 }
1132 
1133 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
1134 					bool en_uc_pmc, bool en_mc_pmc,
1135 					bool en_bc_pmc)
1136 {
1137 	struct hclge_vf_to_pf_msg send_msg;
1138 	int ret;
1139 
1140 	memset(&send_msg, 0, sizeof(send_msg));
1141 	send_msg.code = HCLGE_MBX_SET_PROMISC_MODE;
1142 	send_msg.en_bc = en_bc_pmc ? 1 : 0;
1143 	send_msg.en_uc = en_uc_pmc ? 1 : 0;
1144 	send_msg.en_mc = en_mc_pmc ? 1 : 0;
1145 
1146 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1147 	if (ret)
1148 		dev_err(&hdev->pdev->dev,
1149 			"Set promisc mode fail, status is %d.\n", ret);
1150 
1151 	return ret;
1152 }
1153 
1154 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
1155 				    bool en_mc_pmc)
1156 {
1157 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1158 	bool en_bc_pmc;
1159 
1160 	en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
1161 
1162 	return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc,
1163 					    en_bc_pmc);
1164 }
1165 
1166 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle)
1167 {
1168 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1169 
1170 	set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
1171 }
1172 
1173 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev)
1174 {
1175 	struct hnae3_handle *handle = &hdev->nic;
1176 	bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE;
1177 	bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE;
1178 	int ret;
1179 
1180 	if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) {
1181 		ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc);
1182 		if (!ret)
1183 			clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
1184 	}
1185 }
1186 
1187 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id,
1188 			      int stream_id, bool enable)
1189 {
1190 	struct hclgevf_cfg_com_tqp_queue_cmd *req;
1191 	struct hclgevf_desc desc;
1192 	int status;
1193 
1194 	req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
1195 
1196 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
1197 				     false);
1198 	req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
1199 	req->stream_id = cpu_to_le16(stream_id);
1200 	if (enable)
1201 		req->enable |= 1U << HCLGEVF_TQP_ENABLE_B;
1202 
1203 	status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1204 	if (status)
1205 		dev_err(&hdev->pdev->dev,
1206 			"TQP enable fail, status =%d.\n", status);
1207 
1208 	return status;
1209 }
1210 
1211 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
1212 {
1213 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
1214 	struct hclgevf_tqp *tqp;
1215 	int i;
1216 
1217 	for (i = 0; i < kinfo->num_tqps; i++) {
1218 		tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
1219 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
1220 	}
1221 }
1222 
1223 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
1224 {
1225 	struct hclge_vf_to_pf_msg send_msg;
1226 	u8 host_mac[ETH_ALEN];
1227 	int status;
1228 
1229 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0);
1230 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac,
1231 				      ETH_ALEN);
1232 	if (status) {
1233 		dev_err(&hdev->pdev->dev,
1234 			"fail to get VF MAC from host %d", status);
1235 		return status;
1236 	}
1237 
1238 	ether_addr_copy(p, host_mac);
1239 
1240 	return 0;
1241 }
1242 
1243 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
1244 {
1245 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1246 	u8 host_mac_addr[ETH_ALEN];
1247 
1248 	if (hclgevf_get_host_mac_addr(hdev, host_mac_addr))
1249 		return;
1250 
1251 	hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr);
1252 	if (hdev->has_pf_mac)
1253 		ether_addr_copy(p, host_mac_addr);
1254 	else
1255 		ether_addr_copy(p, hdev->hw.mac.mac_addr);
1256 }
1257 
1258 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
1259 				bool is_first)
1260 {
1261 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1262 	u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
1263 	struct hclge_vf_to_pf_msg send_msg;
1264 	u8 *new_mac_addr = (u8 *)p;
1265 	int status;
1266 
1267 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0);
1268 	send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY;
1269 	ether_addr_copy(send_msg.data, new_mac_addr);
1270 	if (is_first && !hdev->has_pf_mac)
1271 		eth_zero_addr(&send_msg.data[ETH_ALEN]);
1272 	else
1273 		ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
1274 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1275 	if (!status)
1276 		ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
1277 
1278 	return status;
1279 }
1280 
1281 static struct hclgevf_mac_addr_node *
1282 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr)
1283 {
1284 	struct hclgevf_mac_addr_node *mac_node, *tmp;
1285 
1286 	list_for_each_entry_safe(mac_node, tmp, list, node)
1287 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
1288 			return mac_node;
1289 
1290 	return NULL;
1291 }
1292 
1293 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node,
1294 				    enum HCLGEVF_MAC_NODE_STATE state)
1295 {
1296 	switch (state) {
1297 	/* from set_rx_mode or tmp_add_list */
1298 	case HCLGEVF_MAC_TO_ADD:
1299 		if (mac_node->state == HCLGEVF_MAC_TO_DEL)
1300 			mac_node->state = HCLGEVF_MAC_ACTIVE;
1301 		break;
1302 	/* only from set_rx_mode */
1303 	case HCLGEVF_MAC_TO_DEL:
1304 		if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
1305 			list_del(&mac_node->node);
1306 			kfree(mac_node);
1307 		} else {
1308 			mac_node->state = HCLGEVF_MAC_TO_DEL;
1309 		}
1310 		break;
1311 	/* only from tmp_add_list, the mac_node->state won't be
1312 	 * HCLGEVF_MAC_ACTIVE
1313 	 */
1314 	case HCLGEVF_MAC_ACTIVE:
1315 		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1316 			mac_node->state = HCLGEVF_MAC_ACTIVE;
1317 		break;
1318 	}
1319 }
1320 
1321 static int hclgevf_update_mac_list(struct hnae3_handle *handle,
1322 				   enum HCLGEVF_MAC_NODE_STATE state,
1323 				   enum HCLGEVF_MAC_ADDR_TYPE mac_type,
1324 				   const unsigned char *addr)
1325 {
1326 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1327 	struct hclgevf_mac_addr_node *mac_node;
1328 	struct list_head *list;
1329 
1330 	list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
1331 	       &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
1332 
1333 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
1334 
1335 	/* if the mac addr is already in the mac list, no need to add a new
1336 	 * one into it, just check the mac addr state, convert it to a new
1337 	 * new state, or just remove it, or do nothing.
1338 	 */
1339 	mac_node = hclgevf_find_mac_node(list, addr);
1340 	if (mac_node) {
1341 		hclgevf_update_mac_node(mac_node, state);
1342 		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1343 		return 0;
1344 	}
1345 	/* if this address is never added, unnecessary to delete */
1346 	if (state == HCLGEVF_MAC_TO_DEL) {
1347 		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1348 		return -ENOENT;
1349 	}
1350 
1351 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
1352 	if (!mac_node) {
1353 		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1354 		return -ENOMEM;
1355 	}
1356 
1357 	mac_node->state = state;
1358 	ether_addr_copy(mac_node->mac_addr, addr);
1359 	list_add_tail(&mac_node->node, list);
1360 
1361 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1362 	return 0;
1363 }
1364 
1365 static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
1366 			       const unsigned char *addr)
1367 {
1368 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
1369 				       HCLGEVF_MAC_ADDR_UC, addr);
1370 }
1371 
1372 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
1373 			      const unsigned char *addr)
1374 {
1375 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
1376 				       HCLGEVF_MAC_ADDR_UC, addr);
1377 }
1378 
1379 static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
1380 			       const unsigned char *addr)
1381 {
1382 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
1383 				       HCLGEVF_MAC_ADDR_MC, addr);
1384 }
1385 
1386 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
1387 			      const unsigned char *addr)
1388 {
1389 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
1390 				       HCLGEVF_MAC_ADDR_MC, addr);
1391 }
1392 
1393 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev,
1394 				    struct hclgevf_mac_addr_node *mac_node,
1395 				    enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1396 {
1397 	struct hclge_vf_to_pf_msg send_msg;
1398 	u8 code, subcode;
1399 
1400 	if (mac_type == HCLGEVF_MAC_ADDR_UC) {
1401 		code = HCLGE_MBX_SET_UNICAST;
1402 		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1403 			subcode = HCLGE_MBX_MAC_VLAN_UC_ADD;
1404 		else
1405 			subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE;
1406 	} else {
1407 		code = HCLGE_MBX_SET_MULTICAST;
1408 		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1409 			subcode = HCLGE_MBX_MAC_VLAN_MC_ADD;
1410 		else
1411 			subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE;
1412 	}
1413 
1414 	hclgevf_build_send_msg(&send_msg, code, subcode);
1415 	ether_addr_copy(send_msg.data, mac_node->mac_addr);
1416 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1417 }
1418 
1419 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
1420 				    struct list_head *list,
1421 				    enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1422 {
1423 	struct hclgevf_mac_addr_node *mac_node, *tmp;
1424 	int ret;
1425 
1426 	list_for_each_entry_safe(mac_node, tmp, list, node) {
1427 		ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
1428 		if  (ret) {
1429 			dev_err(&hdev->pdev->dev,
1430 				"failed to configure mac %pM, state = %d, ret = %d\n",
1431 				mac_node->mac_addr, mac_node->state, ret);
1432 			return;
1433 		}
1434 		if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
1435 			mac_node->state = HCLGEVF_MAC_ACTIVE;
1436 		} else {
1437 			list_del(&mac_node->node);
1438 			kfree(mac_node);
1439 		}
1440 	}
1441 }
1442 
1443 static void hclgevf_sync_from_add_list(struct list_head *add_list,
1444 				       struct list_head *mac_list)
1445 {
1446 	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1447 
1448 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
1449 		/* if the mac address from tmp_add_list is not in the
1450 		 * uc/mc_mac_list, it means have received a TO_DEL request
1451 		 * during the time window of sending mac config request to PF
1452 		 * If mac_node state is ACTIVE, then change its state to TO_DEL,
1453 		 * then it will be removed at next time. If is TO_ADD, it means
1454 		 * send TO_ADD request failed, so just remove the mac node.
1455 		 */
1456 		new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
1457 		if (new_node) {
1458 			hclgevf_update_mac_node(new_node, mac_node->state);
1459 			list_del(&mac_node->node);
1460 			kfree(mac_node);
1461 		} else if (mac_node->state == HCLGEVF_MAC_ACTIVE) {
1462 			mac_node->state = HCLGEVF_MAC_TO_DEL;
1463 			list_del(&mac_node->node);
1464 			list_add_tail(&mac_node->node, mac_list);
1465 		} else {
1466 			list_del(&mac_node->node);
1467 			kfree(mac_node);
1468 		}
1469 	}
1470 }
1471 
1472 static void hclgevf_sync_from_del_list(struct list_head *del_list,
1473 				       struct list_head *mac_list)
1474 {
1475 	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1476 
1477 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
1478 		new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
1479 		if (new_node) {
1480 			/* If the mac addr is exist in the mac list, it means
1481 			 * received a new request TO_ADD during the time window
1482 			 * of sending mac addr configurrequest to PF, so just
1483 			 * change the mac state to ACTIVE.
1484 			 */
1485 			new_node->state = HCLGEVF_MAC_ACTIVE;
1486 			list_del(&mac_node->node);
1487 			kfree(mac_node);
1488 		} else {
1489 			list_del(&mac_node->node);
1490 			list_add_tail(&mac_node->node, mac_list);
1491 		}
1492 	}
1493 }
1494 
1495 static void hclgevf_clear_list(struct list_head *list)
1496 {
1497 	struct hclgevf_mac_addr_node *mac_node, *tmp;
1498 
1499 	list_for_each_entry_safe(mac_node, tmp, list, node) {
1500 		list_del(&mac_node->node);
1501 		kfree(mac_node);
1502 	}
1503 }
1504 
1505 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev,
1506 				  enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1507 {
1508 	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1509 	struct list_head tmp_add_list, tmp_del_list;
1510 	struct list_head *list;
1511 
1512 	INIT_LIST_HEAD(&tmp_add_list);
1513 	INIT_LIST_HEAD(&tmp_del_list);
1514 
1515 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
1516 	 * we can add/delete these mac addr outside the spin lock
1517 	 */
1518 	list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
1519 		&hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
1520 
1521 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
1522 
1523 	list_for_each_entry_safe(mac_node, tmp, list, node) {
1524 		switch (mac_node->state) {
1525 		case HCLGEVF_MAC_TO_DEL:
1526 			list_del(&mac_node->node);
1527 			list_add_tail(&mac_node->node, &tmp_del_list);
1528 			break;
1529 		case HCLGEVF_MAC_TO_ADD:
1530 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
1531 			if (!new_node)
1532 				goto stop_traverse;
1533 
1534 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
1535 			new_node->state = mac_node->state;
1536 			list_add_tail(&new_node->node, &tmp_add_list);
1537 			break;
1538 		default:
1539 			break;
1540 		}
1541 	}
1542 
1543 stop_traverse:
1544 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1545 
1546 	/* delete first, in order to get max mac table space for adding */
1547 	hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type);
1548 	hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type);
1549 
1550 	/* if some mac addresses were added/deleted fail, move back to the
1551 	 * mac_list, and retry at next time.
1552 	 */
1553 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
1554 
1555 	hclgevf_sync_from_del_list(&tmp_del_list, list);
1556 	hclgevf_sync_from_add_list(&tmp_add_list, list);
1557 
1558 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1559 }
1560 
1561 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev)
1562 {
1563 	hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC);
1564 	hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC);
1565 }
1566 
1567 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev)
1568 {
1569 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
1570 
1571 	hclgevf_clear_list(&hdev->mac_table.uc_mac_list);
1572 	hclgevf_clear_list(&hdev->mac_table.mc_mac_list);
1573 
1574 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1575 }
1576 
1577 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
1578 				   __be16 proto, u16 vlan_id,
1579 				   bool is_kill)
1580 {
1581 #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET	0
1582 #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET	1
1583 #define HCLGEVF_VLAN_MBX_PROTO_OFFSET	3
1584 
1585 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1586 	struct hclge_vf_to_pf_msg send_msg;
1587 	int ret;
1588 
1589 	if (vlan_id > HCLGEVF_MAX_VLAN_ID)
1590 		return -EINVAL;
1591 
1592 	if (proto != htons(ETH_P_8021Q))
1593 		return -EPROTONOSUPPORT;
1594 
1595 	/* When device is resetting or reset failed, firmware is unable to
1596 	 * handle mailbox. Just record the vlan id, and remove it after
1597 	 * reset finished.
1598 	 */
1599 	if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
1600 	     test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
1601 		set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1602 		return -EBUSY;
1603 	}
1604 
1605 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1606 			       HCLGE_MBX_VLAN_FILTER);
1607 	send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill;
1608 	memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id,
1609 	       sizeof(vlan_id));
1610 	memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto,
1611 	       sizeof(proto));
1612 	/* when remove hw vlan filter failed, record the vlan id,
1613 	 * and try to remove it from hw later, to be consistence
1614 	 * with stack.
1615 	 */
1616 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1617 	if (is_kill && ret)
1618 		set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1619 
1620 	return ret;
1621 }
1622 
1623 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
1624 {
1625 #define HCLGEVF_MAX_SYNC_COUNT	60
1626 	struct hnae3_handle *handle = &hdev->nic;
1627 	int ret, sync_cnt = 0;
1628 	u16 vlan_id;
1629 
1630 	vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
1631 	while (vlan_id != VLAN_N_VID) {
1632 		ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
1633 					      vlan_id, true);
1634 		if (ret)
1635 			return;
1636 
1637 		clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
1638 		sync_cnt++;
1639 		if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
1640 			return;
1641 
1642 		vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
1643 	}
1644 }
1645 
1646 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
1647 {
1648 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1649 	struct hclge_vf_to_pf_msg send_msg;
1650 
1651 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1652 			       HCLGE_MBX_VLAN_RX_OFF_CFG);
1653 	send_msg.data[0] = enable ? 1 : 0;
1654 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1655 }
1656 
1657 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
1658 {
1659 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1660 	struct hclge_vf_to_pf_msg send_msg;
1661 	int ret;
1662 
1663 	/* disable vf queue before send queue reset msg to PF */
1664 	ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
1665 	if (ret)
1666 		return ret;
1667 
1668 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
1669 	memcpy(send_msg.data, &queue_id, sizeof(queue_id));
1670 	return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1671 }
1672 
1673 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
1674 {
1675 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1676 	struct hclge_vf_to_pf_msg send_msg;
1677 
1678 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0);
1679 	memcpy(send_msg.data, &new_mtu, sizeof(new_mtu));
1680 	return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1681 }
1682 
1683 static int hclgevf_notify_client(struct hclgevf_dev *hdev,
1684 				 enum hnae3_reset_notify_type type)
1685 {
1686 	struct hnae3_client *client = hdev->nic_client;
1687 	struct hnae3_handle *handle = &hdev->nic;
1688 	int ret;
1689 
1690 	if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) ||
1691 	    !client)
1692 		return 0;
1693 
1694 	if (!client->ops->reset_notify)
1695 		return -EOPNOTSUPP;
1696 
1697 	ret = client->ops->reset_notify(handle, type);
1698 	if (ret)
1699 		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
1700 			type, ret);
1701 
1702 	return ret;
1703 }
1704 
1705 static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
1706 {
1707 #define HCLGEVF_RESET_WAIT_US	20000
1708 #define HCLGEVF_RESET_WAIT_CNT	2000
1709 #define HCLGEVF_RESET_WAIT_TIMEOUT_US	\
1710 	(HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
1711 
1712 	u32 val;
1713 	int ret;
1714 
1715 	if (hdev->reset_type == HNAE3_VF_RESET)
1716 		ret = readl_poll_timeout(hdev->hw.io_base +
1717 					 HCLGEVF_VF_RST_ING, val,
1718 					 !(val & HCLGEVF_VF_RST_ING_BIT),
1719 					 HCLGEVF_RESET_WAIT_US,
1720 					 HCLGEVF_RESET_WAIT_TIMEOUT_US);
1721 	else
1722 		ret = readl_poll_timeout(hdev->hw.io_base +
1723 					 HCLGEVF_RST_ING, val,
1724 					 !(val & HCLGEVF_RST_ING_BITS),
1725 					 HCLGEVF_RESET_WAIT_US,
1726 					 HCLGEVF_RESET_WAIT_TIMEOUT_US);
1727 
1728 	/* hardware completion status should be available by this time */
1729 	if (ret) {
1730 		dev_err(&hdev->pdev->dev,
1731 			"couldn't get reset done status from h/w, timeout!\n");
1732 		return ret;
1733 	}
1734 
1735 	/* we will wait a bit more to let reset of the stack to complete. This
1736 	 * might happen in case reset assertion was made by PF. Yes, this also
1737 	 * means we might end up waiting bit more even for VF reset.
1738 	 */
1739 	msleep(5000);
1740 
1741 	return 0;
1742 }
1743 
1744 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable)
1745 {
1746 	u32 reg_val;
1747 
1748 	reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
1749 	if (enable)
1750 		reg_val |= HCLGEVF_NIC_SW_RST_RDY;
1751 	else
1752 		reg_val &= ~HCLGEVF_NIC_SW_RST_RDY;
1753 
1754 	hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
1755 			  reg_val);
1756 }
1757 
1758 static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
1759 {
1760 	int ret;
1761 
1762 	/* uninitialize the nic client */
1763 	ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1764 	if (ret)
1765 		return ret;
1766 
1767 	/* re-initialize the hclge device */
1768 	ret = hclgevf_reset_hdev(hdev);
1769 	if (ret) {
1770 		dev_err(&hdev->pdev->dev,
1771 			"hclge device re-init failed, VF is disabled!\n");
1772 		return ret;
1773 	}
1774 
1775 	/* bring up the nic client again */
1776 	ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
1777 	if (ret)
1778 		return ret;
1779 
1780 	/* clear handshake status with IMP */
1781 	hclgevf_reset_handshake(hdev, false);
1782 
1783 	/* bring up the nic to enable TX/RX again */
1784 	return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1785 }
1786 
1787 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
1788 {
1789 #define HCLGEVF_RESET_SYNC_TIME 100
1790 
1791 	if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
1792 		struct hclge_vf_to_pf_msg send_msg;
1793 		int ret;
1794 
1795 		hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
1796 		ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1797 		if (ret) {
1798 			dev_err(&hdev->pdev->dev,
1799 				"failed to assert VF reset, ret = %d\n", ret);
1800 			return ret;
1801 		}
1802 		hdev->rst_stats.vf_func_rst_cnt++;
1803 	}
1804 
1805 	set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
1806 	/* inform hardware that preparatory work is done */
1807 	msleep(HCLGEVF_RESET_SYNC_TIME);
1808 	hclgevf_reset_handshake(hdev, true);
1809 	dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n",
1810 		 hdev->reset_type);
1811 
1812 	return 0;
1813 }
1814 
1815 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
1816 {
1817 	dev_info(&hdev->pdev->dev, "VF function reset count: %u\n",
1818 		 hdev->rst_stats.vf_func_rst_cnt);
1819 	dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
1820 		 hdev->rst_stats.flr_rst_cnt);
1821 	dev_info(&hdev->pdev->dev, "VF reset count: %u\n",
1822 		 hdev->rst_stats.vf_rst_cnt);
1823 	dev_info(&hdev->pdev->dev, "reset done count: %u\n",
1824 		 hdev->rst_stats.rst_done_cnt);
1825 	dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
1826 		 hdev->rst_stats.hw_rst_done_cnt);
1827 	dev_info(&hdev->pdev->dev, "reset count: %u\n",
1828 		 hdev->rst_stats.rst_cnt);
1829 	dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
1830 		 hdev->rst_stats.rst_fail_cnt);
1831 	dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
1832 		 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
1833 	dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
1834 		 hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG));
1835 	dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
1836 		 hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG));
1837 	dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
1838 		 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
1839 	dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
1840 }
1841 
1842 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
1843 {
1844 	/* recover handshake status with IMP when reset fail */
1845 	hclgevf_reset_handshake(hdev, true);
1846 	hdev->rst_stats.rst_fail_cnt++;
1847 	dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n",
1848 		hdev->rst_stats.rst_fail_cnt);
1849 
1850 	if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
1851 		set_bit(hdev->reset_type, &hdev->reset_pending);
1852 
1853 	if (hclgevf_is_reset_pending(hdev)) {
1854 		set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1855 		hclgevf_reset_task_schedule(hdev);
1856 	} else {
1857 		set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
1858 		hclgevf_dump_rst_info(hdev);
1859 	}
1860 }
1861 
1862 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
1863 {
1864 	int ret;
1865 
1866 	hdev->rst_stats.rst_cnt++;
1867 
1868 	rtnl_lock();
1869 	/* bring down the nic to stop any ongoing TX/RX */
1870 	ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
1871 	rtnl_unlock();
1872 	if (ret)
1873 		return ret;
1874 
1875 	return hclgevf_reset_prepare_wait(hdev);
1876 }
1877 
1878 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
1879 {
1880 	int ret;
1881 
1882 	hdev->rst_stats.hw_rst_done_cnt++;
1883 
1884 	rtnl_lock();
1885 	/* now, re-initialize the nic client and ae device */
1886 	ret = hclgevf_reset_stack(hdev);
1887 	rtnl_unlock();
1888 	if (ret) {
1889 		dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
1890 		return ret;
1891 	}
1892 
1893 	hdev->last_reset_time = jiffies;
1894 	hdev->rst_stats.rst_done_cnt++;
1895 	hdev->rst_stats.rst_fail_cnt = 0;
1896 	clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
1897 
1898 	return 0;
1899 }
1900 
1901 static void hclgevf_reset(struct hclgevf_dev *hdev)
1902 {
1903 	if (hclgevf_reset_prepare(hdev))
1904 		goto err_reset;
1905 
1906 	/* check if VF could successfully fetch the hardware reset completion
1907 	 * status from the hardware
1908 	 */
1909 	if (hclgevf_reset_wait(hdev)) {
1910 		/* can't do much in this situation, will disable VF */
1911 		dev_err(&hdev->pdev->dev,
1912 			"failed to fetch H/W reset completion status\n");
1913 		goto err_reset;
1914 	}
1915 
1916 	if (hclgevf_reset_rebuild(hdev))
1917 		goto err_reset;
1918 
1919 	return;
1920 
1921 err_reset:
1922 	hclgevf_reset_err_handle(hdev);
1923 }
1924 
1925 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
1926 						     unsigned long *addr)
1927 {
1928 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
1929 
1930 	/* return the highest priority reset level amongst all */
1931 	if (test_bit(HNAE3_VF_RESET, addr)) {
1932 		rst_level = HNAE3_VF_RESET;
1933 		clear_bit(HNAE3_VF_RESET, addr);
1934 		clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1935 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1936 	} else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
1937 		rst_level = HNAE3_VF_FULL_RESET;
1938 		clear_bit(HNAE3_VF_FULL_RESET, addr);
1939 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1940 	} else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
1941 		rst_level = HNAE3_VF_PF_FUNC_RESET;
1942 		clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1943 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1944 	} else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
1945 		rst_level = HNAE3_VF_FUNC_RESET;
1946 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1947 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
1948 		rst_level = HNAE3_FLR_RESET;
1949 		clear_bit(HNAE3_FLR_RESET, addr);
1950 	}
1951 
1952 	return rst_level;
1953 }
1954 
1955 static void hclgevf_reset_event(struct pci_dev *pdev,
1956 				struct hnae3_handle *handle)
1957 {
1958 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1959 	struct hclgevf_dev *hdev = ae_dev->priv;
1960 
1961 	dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
1962 
1963 	if (hdev->default_reset_request)
1964 		hdev->reset_level =
1965 			hclgevf_get_reset_level(hdev,
1966 						&hdev->default_reset_request);
1967 	else
1968 		hdev->reset_level = HNAE3_VF_FUNC_RESET;
1969 
1970 	/* reset of this VF requested */
1971 	set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
1972 	hclgevf_reset_task_schedule(hdev);
1973 
1974 	hdev->last_reset_time = jiffies;
1975 }
1976 
1977 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
1978 					  enum hnae3_reset_type rst_type)
1979 {
1980 	struct hclgevf_dev *hdev = ae_dev->priv;
1981 
1982 	set_bit(rst_type, &hdev->default_reset_request);
1983 }
1984 
1985 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
1986 {
1987 	writel(en ? 1 : 0, vector->addr);
1988 }
1989 
1990 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
1991 {
1992 #define HCLGEVF_FLR_RETRY_WAIT_MS	500
1993 #define HCLGEVF_FLR_RETRY_CNT		5
1994 
1995 	struct hclgevf_dev *hdev = ae_dev->priv;
1996 	int retry_cnt = 0;
1997 	int ret;
1998 
1999 retry:
2000 	down(&hdev->reset_sem);
2001 	set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2002 	hdev->reset_type = HNAE3_FLR_RESET;
2003 	ret = hclgevf_reset_prepare(hdev);
2004 	if (ret) {
2005 		dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
2006 			ret);
2007 		if (hdev->reset_pending ||
2008 		    retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) {
2009 			dev_err(&hdev->pdev->dev,
2010 				"reset_pending:0x%lx, retry_cnt:%d\n",
2011 				hdev->reset_pending, retry_cnt);
2012 			clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2013 			up(&hdev->reset_sem);
2014 			msleep(HCLGEVF_FLR_RETRY_WAIT_MS);
2015 			goto retry;
2016 		}
2017 	}
2018 
2019 	/* disable misc vector before FLR done */
2020 	hclgevf_enable_vector(&hdev->misc_vector, false);
2021 	hdev->rst_stats.flr_rst_cnt++;
2022 }
2023 
2024 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
2025 {
2026 	struct hclgevf_dev *hdev = ae_dev->priv;
2027 	int ret;
2028 
2029 	hclgevf_enable_vector(&hdev->misc_vector, true);
2030 
2031 	ret = hclgevf_reset_rebuild(hdev);
2032 	if (ret)
2033 		dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n",
2034 			 ret);
2035 
2036 	hdev->reset_type = HNAE3_NONE_RESET;
2037 	clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2038 	up(&hdev->reset_sem);
2039 }
2040 
2041 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
2042 {
2043 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2044 
2045 	return hdev->fw_version;
2046 }
2047 
2048 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
2049 {
2050 	struct hclgevf_misc_vector *vector = &hdev->misc_vector;
2051 
2052 	vector->vector_irq = pci_irq_vector(hdev->pdev,
2053 					    HCLGEVF_MISC_VECTOR_NUM);
2054 	vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
2055 	/* vector status always valid for Vector 0 */
2056 	hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
2057 	hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
2058 
2059 	hdev->num_msi_left -= 1;
2060 	hdev->num_msi_used += 1;
2061 }
2062 
2063 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
2064 {
2065 	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
2066 	    !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
2067 			      &hdev->state))
2068 		mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
2069 }
2070 
2071 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
2072 {
2073 	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
2074 	    !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED,
2075 			      &hdev->state))
2076 		mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
2077 }
2078 
2079 static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
2080 				  unsigned long delay)
2081 {
2082 	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
2083 	    !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
2084 		mod_delayed_work(hclgevf_wq, &hdev->service_task, delay);
2085 }
2086 
2087 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
2088 {
2089 #define	HCLGEVF_MAX_RESET_ATTEMPTS_CNT	3
2090 
2091 	if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state))
2092 		return;
2093 
2094 	down(&hdev->reset_sem);
2095 	set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2096 
2097 	if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
2098 			       &hdev->reset_state)) {
2099 		/* PF has initmated that it is about to reset the hardware.
2100 		 * We now have to poll & check if hardware has actually
2101 		 * completed the reset sequence. On hardware reset completion,
2102 		 * VF needs to reset the client and ae device.
2103 		 */
2104 		hdev->reset_attempts = 0;
2105 
2106 		hdev->last_reset_time = jiffies;
2107 		while ((hdev->reset_type =
2108 			hclgevf_get_reset_level(hdev, &hdev->reset_pending))
2109 		       != HNAE3_NONE_RESET)
2110 			hclgevf_reset(hdev);
2111 	} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
2112 				      &hdev->reset_state)) {
2113 		/* we could be here when either of below happens:
2114 		 * 1. reset was initiated due to watchdog timeout caused by
2115 		 *    a. IMP was earlier reset and our TX got choked down and
2116 		 *       which resulted in watchdog reacting and inducing VF
2117 		 *       reset. This also means our cmdq would be unreliable.
2118 		 *    b. problem in TX due to other lower layer(example link
2119 		 *       layer not functioning properly etc.)
2120 		 * 2. VF reset might have been initiated due to some config
2121 		 *    change.
2122 		 *
2123 		 * NOTE: Theres no clear way to detect above cases than to react
2124 		 * to the response of PF for this reset request. PF will ack the
2125 		 * 1b and 2. cases but we will not get any intimation about 1a
2126 		 * from PF as cmdq would be in unreliable state i.e. mailbox
2127 		 * communication between PF and VF would be broken.
2128 		 *
2129 		 * if we are never geting into pending state it means either:
2130 		 * 1. PF is not receiving our request which could be due to IMP
2131 		 *    reset
2132 		 * 2. PF is screwed
2133 		 * We cannot do much for 2. but to check first we can try reset
2134 		 * our PCIe + stack and see if it alleviates the problem.
2135 		 */
2136 		if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
2137 			/* prepare for full reset of stack + pcie interface */
2138 			set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
2139 
2140 			/* "defer" schedule the reset task again */
2141 			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
2142 		} else {
2143 			hdev->reset_attempts++;
2144 
2145 			set_bit(hdev->reset_level, &hdev->reset_pending);
2146 			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
2147 		}
2148 		hclgevf_reset_task_schedule(hdev);
2149 	}
2150 
2151 	hdev->reset_type = HNAE3_NONE_RESET;
2152 	clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2153 	up(&hdev->reset_sem);
2154 }
2155 
2156 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev)
2157 {
2158 	if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
2159 		return;
2160 
2161 	if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
2162 		return;
2163 
2164 	hclgevf_mbx_async_handler(hdev);
2165 
2166 	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
2167 }
2168 
2169 static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
2170 {
2171 	struct hclge_vf_to_pf_msg send_msg;
2172 	int ret;
2173 
2174 	if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
2175 		return;
2176 
2177 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0);
2178 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2179 	if (ret)
2180 		dev_err(&hdev->pdev->dev,
2181 			"VF sends keep alive cmd failed(=%d)\n", ret);
2182 }
2183 
2184 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
2185 {
2186 	unsigned long delta = round_jiffies_relative(HZ);
2187 	struct hnae3_handle *handle = &hdev->nic;
2188 
2189 	if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
2190 		return;
2191 
2192 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
2193 		delta = jiffies - hdev->last_serv_processed;
2194 
2195 		if (delta < round_jiffies_relative(HZ)) {
2196 			delta = round_jiffies_relative(HZ) - delta;
2197 			goto out;
2198 		}
2199 	}
2200 
2201 	hdev->serv_processed_cnt++;
2202 	if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL))
2203 		hclgevf_keep_alive(hdev);
2204 
2205 	if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) {
2206 		hdev->last_serv_processed = jiffies;
2207 		goto out;
2208 	}
2209 
2210 	if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
2211 		hclgevf_tqps_update_stats(handle);
2212 
2213 	/* request the link status from the PF. PF would be able to tell VF
2214 	 * about such updates in future so we might remove this later
2215 	 */
2216 	hclgevf_request_link_info(hdev);
2217 
2218 	hclgevf_update_link_mode(hdev);
2219 
2220 	hclgevf_sync_vlan_filter(hdev);
2221 
2222 	hclgevf_sync_mac_table(hdev);
2223 
2224 	hclgevf_sync_promisc_mode(hdev);
2225 
2226 	hdev->last_serv_processed = jiffies;
2227 
2228 out:
2229 	hclgevf_task_schedule(hdev, delta);
2230 }
2231 
2232 static void hclgevf_service_task(struct work_struct *work)
2233 {
2234 	struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev,
2235 						service_task.work);
2236 
2237 	hclgevf_reset_service_task(hdev);
2238 	hclgevf_mailbox_service_task(hdev);
2239 	hclgevf_periodic_service_task(hdev);
2240 
2241 	/* Handle reset and mbx again in case periodical task delays the
2242 	 * handling by calling hclgevf_task_schedule() in
2243 	 * hclgevf_periodic_service_task()
2244 	 */
2245 	hclgevf_reset_service_task(hdev);
2246 	hclgevf_mailbox_service_task(hdev);
2247 }
2248 
2249 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
2250 {
2251 	hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
2252 }
2253 
2254 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
2255 						      u32 *clearval)
2256 {
2257 	u32 val, cmdq_stat_reg, rst_ing_reg;
2258 
2259 	/* fetch the events from their corresponding regs */
2260 	cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
2261 					 HCLGEVF_VECTOR0_CMDQ_STATE_REG);
2262 
2263 	if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
2264 		rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
2265 		dev_info(&hdev->pdev->dev,
2266 			 "receive reset interrupt 0x%x!\n", rst_ing_reg);
2267 		set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
2268 		set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
2269 		set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
2270 		*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
2271 		hdev->rst_stats.vf_rst_cnt++;
2272 		/* set up VF hardware reset status, its PF will clear
2273 		 * this status when PF has initialized done.
2274 		 */
2275 		val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING);
2276 		hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING,
2277 				  val | HCLGEVF_VF_RST_ING_BIT);
2278 		return HCLGEVF_VECTOR0_EVENT_RST;
2279 	}
2280 
2281 	/* check for vector0 mailbox(=CMDQ RX) event source */
2282 	if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
2283 		/* for revision 0x21, clearing interrupt is writing bit 0
2284 		 * to the clear register, writing bit 1 means to keep the
2285 		 * old value.
2286 		 * for revision 0x20, the clear register is a read & write
2287 		 * register, so we should just write 0 to the bit we are
2288 		 * handling, and keep other bits as cmdq_stat_reg.
2289 		 */
2290 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2291 			*clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
2292 		else
2293 			*clearval = cmdq_stat_reg &
2294 				    ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
2295 
2296 		return HCLGEVF_VECTOR0_EVENT_MBX;
2297 	}
2298 
2299 	/* print other vector0 event source */
2300 	dev_info(&hdev->pdev->dev,
2301 		 "vector 0 interrupt from unknown source, cmdq_src = %#x\n",
2302 		 cmdq_stat_reg);
2303 
2304 	return HCLGEVF_VECTOR0_EVENT_OTHER;
2305 }
2306 
2307 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
2308 {
2309 	enum hclgevf_evt_cause event_cause;
2310 	struct hclgevf_dev *hdev = data;
2311 	u32 clearval;
2312 
2313 	hclgevf_enable_vector(&hdev->misc_vector, false);
2314 	event_cause = hclgevf_check_evt_cause(hdev, &clearval);
2315 
2316 	switch (event_cause) {
2317 	case HCLGEVF_VECTOR0_EVENT_RST:
2318 		hclgevf_reset_task_schedule(hdev);
2319 		break;
2320 	case HCLGEVF_VECTOR0_EVENT_MBX:
2321 		hclgevf_mbx_handler(hdev);
2322 		break;
2323 	default:
2324 		break;
2325 	}
2326 
2327 	if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
2328 		hclgevf_clear_event_cause(hdev, clearval);
2329 		hclgevf_enable_vector(&hdev->misc_vector, true);
2330 	}
2331 
2332 	return IRQ_HANDLED;
2333 }
2334 
2335 static int hclgevf_configure(struct hclgevf_dev *hdev)
2336 {
2337 	int ret;
2338 
2339 	/* get current port based vlan state from PF */
2340 	ret = hclgevf_get_port_base_vlan_filter_state(hdev);
2341 	if (ret)
2342 		return ret;
2343 
2344 	/* get queue configuration from PF */
2345 	ret = hclgevf_get_queue_info(hdev);
2346 	if (ret)
2347 		return ret;
2348 
2349 	/* get queue depth info from PF */
2350 	ret = hclgevf_get_queue_depth(hdev);
2351 	if (ret)
2352 		return ret;
2353 
2354 	ret = hclgevf_get_pf_media_type(hdev);
2355 	if (ret)
2356 		return ret;
2357 
2358 	/* get tc configuration from PF */
2359 	return hclgevf_get_tc_info(hdev);
2360 }
2361 
2362 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
2363 {
2364 	struct pci_dev *pdev = ae_dev->pdev;
2365 	struct hclgevf_dev *hdev;
2366 
2367 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
2368 	if (!hdev)
2369 		return -ENOMEM;
2370 
2371 	hdev->pdev = pdev;
2372 	hdev->ae_dev = ae_dev;
2373 	ae_dev->priv = hdev;
2374 
2375 	return 0;
2376 }
2377 
2378 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
2379 {
2380 	struct hnae3_handle *roce = &hdev->roce;
2381 	struct hnae3_handle *nic = &hdev->nic;
2382 
2383 	roce->rinfo.num_vectors = hdev->num_roce_msix;
2384 
2385 	if (hdev->num_msi_left < roce->rinfo.num_vectors ||
2386 	    hdev->num_msi_left == 0)
2387 		return -EINVAL;
2388 
2389 	roce->rinfo.base_vector = hdev->roce_base_vector;
2390 
2391 	roce->rinfo.netdev = nic->kinfo.netdev;
2392 	roce->rinfo.roce_io_base = hdev->hw.io_base;
2393 
2394 	roce->pdev = nic->pdev;
2395 	roce->ae_algo = nic->ae_algo;
2396 	roce->numa_node_mask = nic->numa_node_mask;
2397 
2398 	return 0;
2399 }
2400 
2401 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
2402 {
2403 	struct hclgevf_cfg_gro_status_cmd *req;
2404 	struct hclgevf_desc desc;
2405 	int ret;
2406 
2407 	if (!hnae3_dev_gro_supported(hdev))
2408 		return 0;
2409 
2410 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
2411 				     false);
2412 	req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
2413 
2414 	req->gro_en = en ? 1 : 0;
2415 
2416 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
2417 	if (ret)
2418 		dev_err(&hdev->pdev->dev,
2419 			"VF GRO hardware config cmd failed, ret = %d.\n", ret);
2420 
2421 	return ret;
2422 }
2423 
2424 static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
2425 {
2426 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
2427 	struct hclgevf_rss_tuple_cfg *tuple_sets;
2428 	u32 i;
2429 
2430 	rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
2431 	rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
2432 	tuple_sets = &rss_cfg->rss_tuple_sets;
2433 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2434 		rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
2435 		memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
2436 		       HCLGEVF_RSS_KEY_SIZE);
2437 
2438 		tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2439 		tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2440 		tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
2441 		tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2442 		tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2443 		tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2444 		tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
2445 		tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2446 	}
2447 
2448 	/* Initialize RSS indirect table */
2449 	for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
2450 		rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
2451 }
2452 
2453 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
2454 {
2455 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
2456 	int ret;
2457 
2458 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2459 		ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
2460 					       rss_cfg->rss_hash_key);
2461 		if (ret)
2462 			return ret;
2463 
2464 		ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
2465 		if (ret)
2466 			return ret;
2467 	}
2468 
2469 	ret = hclgevf_set_rss_indir_table(hdev);
2470 	if (ret)
2471 		return ret;
2472 
2473 	return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size);
2474 }
2475 
2476 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
2477 {
2478 	return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
2479 				       false);
2480 }
2481 
2482 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev)
2483 {
2484 #define HCLGEVF_FLUSH_LINK_TIMEOUT	100000
2485 
2486 	unsigned long last = hdev->serv_processed_cnt;
2487 	int i = 0;
2488 
2489 	while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) &&
2490 	       i++ < HCLGEVF_FLUSH_LINK_TIMEOUT &&
2491 	       last == hdev->serv_processed_cnt)
2492 		usleep_range(1, 1);
2493 }
2494 
2495 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
2496 {
2497 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2498 
2499 	if (enable) {
2500 		hclgevf_task_schedule(hdev, 0);
2501 	} else {
2502 		set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2503 
2504 		/* flush memory to make sure DOWN is seen by service task */
2505 		smp_mb__before_atomic();
2506 		hclgevf_flush_link_update(hdev);
2507 	}
2508 }
2509 
2510 static int hclgevf_ae_start(struct hnae3_handle *handle)
2511 {
2512 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2513 
2514 	hclgevf_reset_tqp_stats(handle);
2515 
2516 	hclgevf_request_link_info(hdev);
2517 
2518 	hclgevf_update_link_mode(hdev);
2519 
2520 	clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2521 
2522 	return 0;
2523 }
2524 
2525 static void hclgevf_ae_stop(struct hnae3_handle *handle)
2526 {
2527 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2528 	int i;
2529 
2530 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2531 
2532 	if (hdev->reset_type != HNAE3_VF_RESET)
2533 		for (i = 0; i < handle->kinfo.num_tqps; i++)
2534 			if (hclgevf_reset_tqp(handle, i))
2535 				break;
2536 
2537 	hclgevf_reset_tqp_stats(handle);
2538 	hclgevf_update_link_status(hdev, 0);
2539 }
2540 
2541 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
2542 {
2543 #define HCLGEVF_STATE_ALIVE	1
2544 #define HCLGEVF_STATE_NOT_ALIVE	0
2545 
2546 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2547 	struct hclge_vf_to_pf_msg send_msg;
2548 
2549 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0);
2550 	send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE :
2551 				HCLGEVF_STATE_NOT_ALIVE;
2552 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2553 }
2554 
2555 static int hclgevf_client_start(struct hnae3_handle *handle)
2556 {
2557 	return hclgevf_set_alive(handle, true);
2558 }
2559 
2560 static void hclgevf_client_stop(struct hnae3_handle *handle)
2561 {
2562 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2563 	int ret;
2564 
2565 	ret = hclgevf_set_alive(handle, false);
2566 	if (ret)
2567 		dev_warn(&hdev->pdev->dev,
2568 			 "%s failed %d\n", __func__, ret);
2569 }
2570 
2571 static void hclgevf_state_init(struct hclgevf_dev *hdev)
2572 {
2573 	clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
2574 	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
2575 	clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
2576 
2577 	INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
2578 
2579 	mutex_init(&hdev->mbx_resp.mbx_mutex);
2580 	sema_init(&hdev->reset_sem, 1);
2581 
2582 	spin_lock_init(&hdev->mac_table.mac_list_lock);
2583 	INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list);
2584 	INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list);
2585 
2586 	/* bring the device down */
2587 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2588 }
2589 
2590 static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
2591 {
2592 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2593 	set_bit(HCLGEVF_STATE_REMOVING, &hdev->state);
2594 
2595 	if (hdev->service_task.work.func)
2596 		cancel_delayed_work_sync(&hdev->service_task);
2597 
2598 	mutex_destroy(&hdev->mbx_resp.mbx_mutex);
2599 }
2600 
2601 static int hclgevf_init_msi(struct hclgevf_dev *hdev)
2602 {
2603 	struct pci_dev *pdev = hdev->pdev;
2604 	int vectors;
2605 	int i;
2606 
2607 	if (hnae3_dev_roce_supported(hdev))
2608 		vectors = pci_alloc_irq_vectors(pdev,
2609 						hdev->roce_base_msix_offset + 1,
2610 						hdev->num_msi,
2611 						PCI_IRQ_MSIX);
2612 	else
2613 		vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2614 						hdev->num_msi,
2615 						PCI_IRQ_MSI | PCI_IRQ_MSIX);
2616 
2617 	if (vectors < 0) {
2618 		dev_err(&pdev->dev,
2619 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2620 			vectors);
2621 		return vectors;
2622 	}
2623 	if (vectors < hdev->num_msi)
2624 		dev_warn(&hdev->pdev->dev,
2625 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2626 			 hdev->num_msi, vectors);
2627 
2628 	hdev->num_msi = vectors;
2629 	hdev->num_msi_left = vectors;
2630 
2631 	hdev->base_msi_vector = pdev->irq;
2632 	hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
2633 
2634 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2635 					   sizeof(u16), GFP_KERNEL);
2636 	if (!hdev->vector_status) {
2637 		pci_free_irq_vectors(pdev);
2638 		return -ENOMEM;
2639 	}
2640 
2641 	for (i = 0; i < hdev->num_msi; i++)
2642 		hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
2643 
2644 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2645 					sizeof(int), GFP_KERNEL);
2646 	if (!hdev->vector_irq) {
2647 		devm_kfree(&pdev->dev, hdev->vector_status);
2648 		pci_free_irq_vectors(pdev);
2649 		return -ENOMEM;
2650 	}
2651 
2652 	return 0;
2653 }
2654 
2655 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
2656 {
2657 	struct pci_dev *pdev = hdev->pdev;
2658 
2659 	devm_kfree(&pdev->dev, hdev->vector_status);
2660 	devm_kfree(&pdev->dev, hdev->vector_irq);
2661 	pci_free_irq_vectors(pdev);
2662 }
2663 
2664 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
2665 {
2666 	int ret;
2667 
2668 	hclgevf_get_misc_vector(hdev);
2669 
2670 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
2671 		 HCLGEVF_NAME, pci_name(hdev->pdev));
2672 	ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
2673 			  0, hdev->misc_vector.name, hdev);
2674 	if (ret) {
2675 		dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
2676 			hdev->misc_vector.vector_irq);
2677 		return ret;
2678 	}
2679 
2680 	hclgevf_clear_event_cause(hdev, 0);
2681 
2682 	/* enable misc. vector(vector 0) */
2683 	hclgevf_enable_vector(&hdev->misc_vector, true);
2684 
2685 	return ret;
2686 }
2687 
2688 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
2689 {
2690 	/* disable misc vector(vector 0) */
2691 	hclgevf_enable_vector(&hdev->misc_vector, false);
2692 	synchronize_irq(hdev->misc_vector.vector_irq);
2693 	free_irq(hdev->misc_vector.vector_irq, hdev);
2694 	hclgevf_free_vector(hdev, 0);
2695 }
2696 
2697 static void hclgevf_info_show(struct hclgevf_dev *hdev)
2698 {
2699 	struct device *dev = &hdev->pdev->dev;
2700 
2701 	dev_info(dev, "VF info begin:\n");
2702 
2703 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
2704 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
2705 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
2706 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
2707 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
2708 	dev_info(dev, "PF media type of this VF: %u\n",
2709 		 hdev->hw.mac.media_type);
2710 
2711 	dev_info(dev, "VF info end.\n");
2712 }
2713 
2714 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
2715 					    struct hnae3_client *client)
2716 {
2717 	struct hclgevf_dev *hdev = ae_dev->priv;
2718 	int rst_cnt = hdev->rst_stats.rst_cnt;
2719 	int ret;
2720 
2721 	ret = client->ops->init_instance(&hdev->nic);
2722 	if (ret)
2723 		return ret;
2724 
2725 	set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2726 	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
2727 	    rst_cnt != hdev->rst_stats.rst_cnt) {
2728 		clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2729 
2730 		client->ops->uninit_instance(&hdev->nic, 0);
2731 		return -EBUSY;
2732 	}
2733 
2734 	hnae3_set_client_init_flag(client, ae_dev, 1);
2735 
2736 	if (netif_msg_drv(&hdev->nic))
2737 		hclgevf_info_show(hdev);
2738 
2739 	return 0;
2740 }
2741 
2742 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
2743 					     struct hnae3_client *client)
2744 {
2745 	struct hclgevf_dev *hdev = ae_dev->priv;
2746 	int ret;
2747 
2748 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
2749 	    !hdev->nic_client)
2750 		return 0;
2751 
2752 	ret = hclgevf_init_roce_base_info(hdev);
2753 	if (ret)
2754 		return ret;
2755 
2756 	ret = client->ops->init_instance(&hdev->roce);
2757 	if (ret)
2758 		return ret;
2759 
2760 	hnae3_set_client_init_flag(client, ae_dev, 1);
2761 
2762 	return 0;
2763 }
2764 
2765 static int hclgevf_init_client_instance(struct hnae3_client *client,
2766 					struct hnae3_ae_dev *ae_dev)
2767 {
2768 	struct hclgevf_dev *hdev = ae_dev->priv;
2769 	int ret;
2770 
2771 	switch (client->type) {
2772 	case HNAE3_CLIENT_KNIC:
2773 		hdev->nic_client = client;
2774 		hdev->nic.client = client;
2775 
2776 		ret = hclgevf_init_nic_client_instance(ae_dev, client);
2777 		if (ret)
2778 			goto clear_nic;
2779 
2780 		ret = hclgevf_init_roce_client_instance(ae_dev,
2781 							hdev->roce_client);
2782 		if (ret)
2783 			goto clear_roce;
2784 
2785 		break;
2786 	case HNAE3_CLIENT_ROCE:
2787 		if (hnae3_dev_roce_supported(hdev)) {
2788 			hdev->roce_client = client;
2789 			hdev->roce.client = client;
2790 		}
2791 
2792 		ret = hclgevf_init_roce_client_instance(ae_dev, client);
2793 		if (ret)
2794 			goto clear_roce;
2795 
2796 		break;
2797 	default:
2798 		return -EINVAL;
2799 	}
2800 
2801 	return 0;
2802 
2803 clear_nic:
2804 	hdev->nic_client = NULL;
2805 	hdev->nic.client = NULL;
2806 	return ret;
2807 clear_roce:
2808 	hdev->roce_client = NULL;
2809 	hdev->roce.client = NULL;
2810 	return ret;
2811 }
2812 
2813 static void hclgevf_uninit_client_instance(struct hnae3_client *client,
2814 					   struct hnae3_ae_dev *ae_dev)
2815 {
2816 	struct hclgevf_dev *hdev = ae_dev->priv;
2817 
2818 	/* un-init roce, if it exists */
2819 	if (hdev->roce_client) {
2820 		hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
2821 		hdev->roce_client = NULL;
2822 		hdev->roce.client = NULL;
2823 	}
2824 
2825 	/* un-init nic/unic, if this was not called by roce client */
2826 	if (client->ops->uninit_instance && hdev->nic_client &&
2827 	    client->type != HNAE3_CLIENT_ROCE) {
2828 		clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2829 
2830 		client->ops->uninit_instance(&hdev->nic, 0);
2831 		hdev->nic_client = NULL;
2832 		hdev->nic.client = NULL;
2833 	}
2834 }
2835 
2836 static int hclgevf_pci_init(struct hclgevf_dev *hdev)
2837 {
2838 	struct pci_dev *pdev = hdev->pdev;
2839 	struct hclgevf_hw *hw;
2840 	int ret;
2841 
2842 	ret = pci_enable_device(pdev);
2843 	if (ret) {
2844 		dev_err(&pdev->dev, "failed to enable PCI device\n");
2845 		return ret;
2846 	}
2847 
2848 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2849 	if (ret) {
2850 		dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
2851 		goto err_disable_device;
2852 	}
2853 
2854 	ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
2855 	if (ret) {
2856 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
2857 		goto err_disable_device;
2858 	}
2859 
2860 	pci_set_master(pdev);
2861 	hw = &hdev->hw;
2862 	hw->hdev = hdev;
2863 	hw->io_base = pci_iomap(pdev, 2, 0);
2864 	if (!hw->io_base) {
2865 		dev_err(&pdev->dev, "can't map configuration register space\n");
2866 		ret = -ENOMEM;
2867 		goto err_clr_master;
2868 	}
2869 
2870 	return 0;
2871 
2872 err_clr_master:
2873 	pci_clear_master(pdev);
2874 	pci_release_regions(pdev);
2875 err_disable_device:
2876 	pci_disable_device(pdev);
2877 
2878 	return ret;
2879 }
2880 
2881 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
2882 {
2883 	struct pci_dev *pdev = hdev->pdev;
2884 
2885 	pci_iounmap(pdev, hdev->hw.io_base);
2886 	pci_clear_master(pdev);
2887 	pci_release_regions(pdev);
2888 	pci_disable_device(pdev);
2889 }
2890 
2891 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
2892 {
2893 	struct hclgevf_query_res_cmd *req;
2894 	struct hclgevf_desc desc;
2895 	int ret;
2896 
2897 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
2898 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
2899 	if (ret) {
2900 		dev_err(&hdev->pdev->dev,
2901 			"query vf resource failed, ret = %d.\n", ret);
2902 		return ret;
2903 	}
2904 
2905 	req = (struct hclgevf_query_res_cmd *)desc.data;
2906 
2907 	if (hnae3_dev_roce_supported(hdev)) {
2908 		hdev->roce_base_msix_offset =
2909 		hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
2910 				HCLGEVF_MSIX_OFT_ROCEE_M,
2911 				HCLGEVF_MSIX_OFT_ROCEE_S);
2912 		hdev->num_roce_msix =
2913 		hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
2914 				HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2915 
2916 		/* nic's msix numbers is always equals to the roce's. */
2917 		hdev->num_nic_msix = hdev->num_roce_msix;
2918 
2919 		/* VF should have NIC vectors and Roce vectors, NIC vectors
2920 		 * are queued before Roce vectors. The offset is fixed to 64.
2921 		 */
2922 		hdev->num_msi = hdev->num_roce_msix +
2923 				hdev->roce_base_msix_offset;
2924 	} else {
2925 		hdev->num_msi =
2926 		hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
2927 				HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2928 
2929 		hdev->num_nic_msix = hdev->num_msi;
2930 	}
2931 
2932 	if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) {
2933 		dev_err(&hdev->pdev->dev,
2934 			"Just %u msi resources, not enough for vf(min:2).\n",
2935 			hdev->num_nic_msix);
2936 		return -EINVAL;
2937 	}
2938 
2939 	return 0;
2940 }
2941 
2942 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
2943 {
2944 #define HCLGEVF_MAX_NON_TSO_BD_NUM			8U
2945 
2946 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2947 
2948 	ae_dev->dev_specs.max_non_tso_bd_num =
2949 					HCLGEVF_MAX_NON_TSO_BD_NUM;
2950 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
2951 	ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE;
2952 }
2953 
2954 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
2955 				    struct hclgevf_desc *desc)
2956 {
2957 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2958 	struct hclgevf_dev_specs_0_cmd *req0;
2959 
2960 	req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data;
2961 
2962 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
2963 	ae_dev->dev_specs.rss_ind_tbl_size =
2964 					le16_to_cpu(req0->rss_ind_tbl_size);
2965 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
2966 }
2967 
2968 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
2969 {
2970 	struct hclgevf_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
2971 	int ret;
2972 	int i;
2973 
2974 	/* set default specifications as devices lower than version V3 do not
2975 	 * support querying specifications from firmware.
2976 	 */
2977 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
2978 		hclgevf_set_default_dev_specs(hdev);
2979 		return 0;
2980 	}
2981 
2982 	for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
2983 		hclgevf_cmd_setup_basic_desc(&desc[i],
2984 					     HCLGEVF_OPC_QUERY_DEV_SPECS, true);
2985 		desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT);
2986 	}
2987 	hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS,
2988 				     true);
2989 
2990 	ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM);
2991 	if (ret)
2992 		return ret;
2993 
2994 	hclgevf_parse_dev_specs(hdev, desc);
2995 
2996 	return 0;
2997 }
2998 
2999 static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
3000 {
3001 	struct pci_dev *pdev = hdev->pdev;
3002 	int ret = 0;
3003 
3004 	if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
3005 	    test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
3006 		hclgevf_misc_irq_uninit(hdev);
3007 		hclgevf_uninit_msi(hdev);
3008 		clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3009 	}
3010 
3011 	if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
3012 		pci_set_master(pdev);
3013 		ret = hclgevf_init_msi(hdev);
3014 		if (ret) {
3015 			dev_err(&pdev->dev,
3016 				"failed(%d) to init MSI/MSI-X\n", ret);
3017 			return ret;
3018 		}
3019 
3020 		ret = hclgevf_misc_irq_init(hdev);
3021 		if (ret) {
3022 			hclgevf_uninit_msi(hdev);
3023 			dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
3024 				ret);
3025 			return ret;
3026 		}
3027 
3028 		set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3029 	}
3030 
3031 	return ret;
3032 }
3033 
3034 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev)
3035 {
3036 	struct hclge_vf_to_pf_msg send_msg;
3037 
3038 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL,
3039 			       HCLGE_MBX_VPORT_LIST_CLEAR);
3040 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3041 }
3042 
3043 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
3044 {
3045 	struct pci_dev *pdev = hdev->pdev;
3046 	int ret;
3047 
3048 	ret = hclgevf_pci_reset(hdev);
3049 	if (ret) {
3050 		dev_err(&pdev->dev, "pci reset failed %d\n", ret);
3051 		return ret;
3052 	}
3053 
3054 	ret = hclgevf_cmd_init(hdev);
3055 	if (ret) {
3056 		dev_err(&pdev->dev, "cmd failed %d\n", ret);
3057 		return ret;
3058 	}
3059 
3060 	ret = hclgevf_rss_init_hw(hdev);
3061 	if (ret) {
3062 		dev_err(&hdev->pdev->dev,
3063 			"failed(%d) to initialize RSS\n", ret);
3064 		return ret;
3065 	}
3066 
3067 	ret = hclgevf_config_gro(hdev, true);
3068 	if (ret)
3069 		return ret;
3070 
3071 	ret = hclgevf_init_vlan_config(hdev);
3072 	if (ret) {
3073 		dev_err(&hdev->pdev->dev,
3074 			"failed(%d) to initialize VLAN config\n", ret);
3075 		return ret;
3076 	}
3077 
3078 	set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
3079 
3080 	dev_info(&hdev->pdev->dev, "Reset done\n");
3081 
3082 	return 0;
3083 }
3084 
3085 static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
3086 {
3087 	struct pci_dev *pdev = hdev->pdev;
3088 	int ret;
3089 
3090 	ret = hclgevf_pci_init(hdev);
3091 	if (ret)
3092 		return ret;
3093 
3094 	ret = hclgevf_cmd_queue_init(hdev);
3095 	if (ret)
3096 		goto err_cmd_queue_init;
3097 
3098 	ret = hclgevf_cmd_init(hdev);
3099 	if (ret)
3100 		goto err_cmd_init;
3101 
3102 	/* Get vf resource */
3103 	ret = hclgevf_query_vf_resource(hdev);
3104 	if (ret)
3105 		goto err_cmd_init;
3106 
3107 	ret = hclgevf_query_dev_specs(hdev);
3108 	if (ret) {
3109 		dev_err(&pdev->dev,
3110 			"failed to query dev specifications, ret = %d\n", ret);
3111 		goto err_cmd_init;
3112 	}
3113 
3114 	ret = hclgevf_init_msi(hdev);
3115 	if (ret) {
3116 		dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
3117 		goto err_cmd_init;
3118 	}
3119 
3120 	hclgevf_state_init(hdev);
3121 	hdev->reset_level = HNAE3_VF_FUNC_RESET;
3122 	hdev->reset_type = HNAE3_NONE_RESET;
3123 
3124 	ret = hclgevf_misc_irq_init(hdev);
3125 	if (ret)
3126 		goto err_misc_irq_init;
3127 
3128 	set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3129 
3130 	ret = hclgevf_configure(hdev);
3131 	if (ret) {
3132 		dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
3133 		goto err_config;
3134 	}
3135 
3136 	ret = hclgevf_alloc_tqps(hdev);
3137 	if (ret) {
3138 		dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
3139 		goto err_config;
3140 	}
3141 
3142 	ret = hclgevf_set_handle_info(hdev);
3143 	if (ret)
3144 		goto err_config;
3145 
3146 	ret = hclgevf_config_gro(hdev, true);
3147 	if (ret)
3148 		goto err_config;
3149 
3150 	/* Initialize RSS for this VF */
3151 	hclgevf_rss_init_cfg(hdev);
3152 	ret = hclgevf_rss_init_hw(hdev);
3153 	if (ret) {
3154 		dev_err(&hdev->pdev->dev,
3155 			"failed(%d) to initialize RSS\n", ret);
3156 		goto err_config;
3157 	}
3158 
3159 	/* ensure vf tbl list as empty before init*/
3160 	ret = hclgevf_clear_vport_list(hdev);
3161 	if (ret) {
3162 		dev_err(&pdev->dev,
3163 			"failed to clear tbl list configuration, ret = %d.\n",
3164 			ret);
3165 		goto err_config;
3166 	}
3167 
3168 	ret = hclgevf_init_vlan_config(hdev);
3169 	if (ret) {
3170 		dev_err(&hdev->pdev->dev,
3171 			"failed(%d) to initialize VLAN config\n", ret);
3172 		goto err_config;
3173 	}
3174 
3175 	hdev->last_reset_time = jiffies;
3176 	dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
3177 		 HCLGEVF_DRIVER_NAME);
3178 
3179 	hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
3180 
3181 	return 0;
3182 
3183 err_config:
3184 	hclgevf_misc_irq_uninit(hdev);
3185 err_misc_irq_init:
3186 	hclgevf_state_uninit(hdev);
3187 	hclgevf_uninit_msi(hdev);
3188 err_cmd_init:
3189 	hclgevf_cmd_uninit(hdev);
3190 err_cmd_queue_init:
3191 	hclgevf_pci_uninit(hdev);
3192 	clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3193 	return ret;
3194 }
3195 
3196 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
3197 {
3198 	struct hclge_vf_to_pf_msg send_msg;
3199 
3200 	hclgevf_state_uninit(hdev);
3201 
3202 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0);
3203 	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3204 
3205 	if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
3206 		hclgevf_misc_irq_uninit(hdev);
3207 		hclgevf_uninit_msi(hdev);
3208 	}
3209 
3210 	hclgevf_pci_uninit(hdev);
3211 	hclgevf_cmd_uninit(hdev);
3212 	hclgevf_uninit_mac_list(hdev);
3213 }
3214 
3215 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
3216 {
3217 	struct pci_dev *pdev = ae_dev->pdev;
3218 	int ret;
3219 
3220 	ret = hclgevf_alloc_hdev(ae_dev);
3221 	if (ret) {
3222 		dev_err(&pdev->dev, "hclge device allocation failed\n");
3223 		return ret;
3224 	}
3225 
3226 	ret = hclgevf_init_hdev(ae_dev->priv);
3227 	if (ret) {
3228 		dev_err(&pdev->dev, "hclge device initialization failed\n");
3229 		return ret;
3230 	}
3231 
3232 	return 0;
3233 }
3234 
3235 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
3236 {
3237 	struct hclgevf_dev *hdev = ae_dev->priv;
3238 
3239 	hclgevf_uninit_hdev(hdev);
3240 	ae_dev->priv = NULL;
3241 }
3242 
3243 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
3244 {
3245 	struct hnae3_handle *nic = &hdev->nic;
3246 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
3247 
3248 	return min_t(u32, hdev->rss_size_max,
3249 		     hdev->num_tqps / kinfo->num_tc);
3250 }
3251 
3252 /**
3253  * hclgevf_get_channels - Get the current channels enabled and max supported.
3254  * @handle: hardware information for network interface
3255  * @ch: ethtool channels structure
3256  *
3257  * We don't support separate tx and rx queues as channels. The other count
3258  * represents how many queues are being used for control. max_combined counts
3259  * how many queue pairs we can support. They may not be mapped 1 to 1 with
3260  * q_vectors since we support a lot more queue pairs than q_vectors.
3261  **/
3262 static void hclgevf_get_channels(struct hnae3_handle *handle,
3263 				 struct ethtool_channels *ch)
3264 {
3265 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3266 
3267 	ch->max_combined = hclgevf_get_max_channels(hdev);
3268 	ch->other_count = 0;
3269 	ch->max_other = 0;
3270 	ch->combined_count = handle->kinfo.rss_size;
3271 }
3272 
3273 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
3274 					  u16 *alloc_tqps, u16 *max_rss_size)
3275 {
3276 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3277 
3278 	*alloc_tqps = hdev->num_tqps;
3279 	*max_rss_size = hdev->rss_size_max;
3280 }
3281 
3282 static void hclgevf_update_rss_size(struct hnae3_handle *handle,
3283 				    u32 new_tqps_num)
3284 {
3285 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3286 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3287 	u16 max_rss_size;
3288 
3289 	kinfo->req_rss_size = new_tqps_num;
3290 
3291 	max_rss_size = min_t(u16, hdev->rss_size_max,
3292 			     hdev->num_tqps / kinfo->num_tc);
3293 
3294 	/* Use the user's configuration when it is not larger than
3295 	 * max_rss_size, otherwise, use the maximum specification value.
3296 	 */
3297 	if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
3298 	    kinfo->req_rss_size <= max_rss_size)
3299 		kinfo->rss_size = kinfo->req_rss_size;
3300 	else if (kinfo->rss_size > max_rss_size ||
3301 		 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size))
3302 		kinfo->rss_size = max_rss_size;
3303 
3304 	kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
3305 }
3306 
3307 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
3308 				bool rxfh_configured)
3309 {
3310 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3311 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3312 	u16 cur_rss_size = kinfo->rss_size;
3313 	u16 cur_tqps = kinfo->num_tqps;
3314 	u32 *rss_indir;
3315 	unsigned int i;
3316 	int ret;
3317 
3318 	hclgevf_update_rss_size(handle, new_tqps_num);
3319 
3320 	ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size);
3321 	if (ret)
3322 		return ret;
3323 
3324 	/* RSS indirection table has been configuared by user */
3325 	if (rxfh_configured)
3326 		goto out;
3327 
3328 	/* Reinitializes the rss indirect table according to the new RSS size */
3329 	rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
3330 	if (!rss_indir)
3331 		return -ENOMEM;
3332 
3333 	for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
3334 		rss_indir[i] = i % kinfo->rss_size;
3335 
3336 	hdev->rss_cfg.rss_size = kinfo->rss_size;
3337 
3338 	ret = hclgevf_set_rss(handle, rss_indir, NULL, 0);
3339 	if (ret)
3340 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
3341 			ret);
3342 
3343 	kfree(rss_indir);
3344 
3345 out:
3346 	if (!ret)
3347 		dev_info(&hdev->pdev->dev,
3348 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
3349 			 cur_rss_size, kinfo->rss_size,
3350 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
3351 
3352 	return ret;
3353 }
3354 
3355 static int hclgevf_get_status(struct hnae3_handle *handle)
3356 {
3357 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3358 
3359 	return hdev->hw.mac.link;
3360 }
3361 
3362 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
3363 					    u8 *auto_neg, u32 *speed,
3364 					    u8 *duplex)
3365 {
3366 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3367 
3368 	if (speed)
3369 		*speed = hdev->hw.mac.speed;
3370 	if (duplex)
3371 		*duplex = hdev->hw.mac.duplex;
3372 	if (auto_neg)
3373 		*auto_neg = AUTONEG_DISABLE;
3374 }
3375 
3376 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
3377 				 u8 duplex)
3378 {
3379 	hdev->hw.mac.speed = speed;
3380 	hdev->hw.mac.duplex = duplex;
3381 }
3382 
3383 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
3384 {
3385 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3386 
3387 	return hclgevf_config_gro(hdev, enable);
3388 }
3389 
3390 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type,
3391 				   u8 *module_type)
3392 {
3393 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3394 
3395 	if (media_type)
3396 		*media_type = hdev->hw.mac.media_type;
3397 
3398 	if (module_type)
3399 		*module_type = hdev->hw.mac.module_type;
3400 }
3401 
3402 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
3403 {
3404 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3405 
3406 	return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
3407 }
3408 
3409 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
3410 {
3411 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3412 
3413 	return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
3414 }
3415 
3416 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
3417 {
3418 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3419 
3420 	return hdev->rst_stats.hw_rst_done_cnt;
3421 }
3422 
3423 static void hclgevf_get_link_mode(struct hnae3_handle *handle,
3424 				  unsigned long *supported,
3425 				  unsigned long *advertising)
3426 {
3427 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3428 
3429 	*supported = hdev->hw.mac.supported;
3430 	*advertising = hdev->hw.mac.advertising;
3431 }
3432 
3433 #define MAX_SEPARATE_NUM	4
3434 #define SEPARATOR_VALUE		0xFFFFFFFF
3435 #define REG_NUM_PER_LINE	4
3436 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
3437 
3438 static int hclgevf_get_regs_len(struct hnae3_handle *handle)
3439 {
3440 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
3441 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3442 
3443 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
3444 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
3445 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
3446 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
3447 
3448 	return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps +
3449 		tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE;
3450 }
3451 
3452 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
3453 			     void *data)
3454 {
3455 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3456 	int i, j, reg_um, separator_num;
3457 	u32 *reg = data;
3458 
3459 	*version = hdev->fw_version;
3460 
3461 	/* fetching per-VF registers values from VF PCIe register space */
3462 	reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
3463 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3464 	for (i = 0; i < reg_um; i++)
3465 		*reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
3466 	for (i = 0; i < separator_num; i++)
3467 		*reg++ = SEPARATOR_VALUE;
3468 
3469 	reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
3470 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3471 	for (i = 0; i < reg_um; i++)
3472 		*reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
3473 	for (i = 0; i < separator_num; i++)
3474 		*reg++ = SEPARATOR_VALUE;
3475 
3476 	reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
3477 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3478 	for (j = 0; j < hdev->num_tqps; j++) {
3479 		for (i = 0; i < reg_um; i++)
3480 			*reg++ = hclgevf_read_dev(&hdev->hw,
3481 						  ring_reg_addr_list[i] +
3482 						  0x200 * j);
3483 		for (i = 0; i < separator_num; i++)
3484 			*reg++ = SEPARATOR_VALUE;
3485 	}
3486 
3487 	reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
3488 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3489 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
3490 		for (i = 0; i < reg_um; i++)
3491 			*reg++ = hclgevf_read_dev(&hdev->hw,
3492 						  tqp_intr_reg_addr_list[i] +
3493 						  4 * j);
3494 		for (i = 0; i < separator_num; i++)
3495 			*reg++ = SEPARATOR_VALUE;
3496 	}
3497 }
3498 
3499 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
3500 					u8 *port_base_vlan_info, u8 data_size)
3501 {
3502 	struct hnae3_handle *nic = &hdev->nic;
3503 	struct hclge_vf_to_pf_msg send_msg;
3504 	int ret;
3505 
3506 	rtnl_lock();
3507 
3508 	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
3509 	    test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
3510 		dev_warn(&hdev->pdev->dev,
3511 			 "is resetting when updating port based vlan info\n");
3512 		rtnl_unlock();
3513 		return;
3514 	}
3515 
3516 	ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
3517 	if (ret) {
3518 		rtnl_unlock();
3519 		return;
3520 	}
3521 
3522 	/* send msg to PF and wait update port based vlan info */
3523 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
3524 			       HCLGE_MBX_PORT_BASE_VLAN_CFG);
3525 	memcpy(send_msg.data, port_base_vlan_info, data_size);
3526 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3527 	if (!ret) {
3528 		if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
3529 			nic->port_base_vlan_state = state;
3530 		else
3531 			nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
3532 	}
3533 
3534 	hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
3535 	rtnl_unlock();
3536 }
3537 
3538 static const struct hnae3_ae_ops hclgevf_ops = {
3539 	.init_ae_dev = hclgevf_init_ae_dev,
3540 	.uninit_ae_dev = hclgevf_uninit_ae_dev,
3541 	.flr_prepare = hclgevf_flr_prepare,
3542 	.flr_done = hclgevf_flr_done,
3543 	.init_client_instance = hclgevf_init_client_instance,
3544 	.uninit_client_instance = hclgevf_uninit_client_instance,
3545 	.start = hclgevf_ae_start,
3546 	.stop = hclgevf_ae_stop,
3547 	.client_start = hclgevf_client_start,
3548 	.client_stop = hclgevf_client_stop,
3549 	.map_ring_to_vector = hclgevf_map_ring_to_vector,
3550 	.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
3551 	.get_vector = hclgevf_get_vector,
3552 	.put_vector = hclgevf_put_vector,
3553 	.reset_queue = hclgevf_reset_tqp,
3554 	.get_mac_addr = hclgevf_get_mac_addr,
3555 	.set_mac_addr = hclgevf_set_mac_addr,
3556 	.add_uc_addr = hclgevf_add_uc_addr,
3557 	.rm_uc_addr = hclgevf_rm_uc_addr,
3558 	.add_mc_addr = hclgevf_add_mc_addr,
3559 	.rm_mc_addr = hclgevf_rm_mc_addr,
3560 	.get_stats = hclgevf_get_stats,
3561 	.update_stats = hclgevf_update_stats,
3562 	.get_strings = hclgevf_get_strings,
3563 	.get_sset_count = hclgevf_get_sset_count,
3564 	.get_rss_key_size = hclgevf_get_rss_key_size,
3565 	.get_rss_indir_size = hclgevf_get_rss_indir_size,
3566 	.get_rss = hclgevf_get_rss,
3567 	.set_rss = hclgevf_set_rss,
3568 	.get_rss_tuple = hclgevf_get_rss_tuple,
3569 	.set_rss_tuple = hclgevf_set_rss_tuple,
3570 	.get_tc_size = hclgevf_get_tc_size,
3571 	.get_fw_version = hclgevf_get_fw_version,
3572 	.set_vlan_filter = hclgevf_set_vlan_filter,
3573 	.enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
3574 	.reset_event = hclgevf_reset_event,
3575 	.set_default_reset_request = hclgevf_set_def_reset_request,
3576 	.set_channels = hclgevf_set_channels,
3577 	.get_channels = hclgevf_get_channels,
3578 	.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
3579 	.get_regs_len = hclgevf_get_regs_len,
3580 	.get_regs = hclgevf_get_regs,
3581 	.get_status = hclgevf_get_status,
3582 	.get_ksettings_an_result = hclgevf_get_ksettings_an_result,
3583 	.get_media_type = hclgevf_get_media_type,
3584 	.get_hw_reset_stat = hclgevf_get_hw_reset_stat,
3585 	.ae_dev_resetting = hclgevf_ae_dev_resetting,
3586 	.ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
3587 	.set_gro_en = hclgevf_gro_en,
3588 	.set_mtu = hclgevf_set_mtu,
3589 	.get_global_queue_id = hclgevf_get_qid_global,
3590 	.set_timer_task = hclgevf_set_timer_task,
3591 	.get_link_mode = hclgevf_get_link_mode,
3592 	.set_promisc_mode = hclgevf_set_promisc_mode,
3593 	.request_update_promisc_mode = hclgevf_request_update_promisc_mode,
3594 };
3595 
3596 static struct hnae3_ae_algo ae_algovf = {
3597 	.ops = &hclgevf_ops,
3598 	.pdev_id_table = ae_algovf_pci_tbl,
3599 };
3600 
3601 static int hclgevf_init(void)
3602 {
3603 	pr_info("%s is initializing\n", HCLGEVF_NAME);
3604 
3605 	hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME);
3606 	if (!hclgevf_wq) {
3607 		pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
3608 		return -ENOMEM;
3609 	}
3610 
3611 	hnae3_register_ae_algo(&ae_algovf);
3612 
3613 	return 0;
3614 }
3615 
3616 static void hclgevf_exit(void)
3617 {
3618 	hnae3_unregister_ae_algo(&ae_algovf);
3619 	destroy_workqueue(hclgevf_wq);
3620 }
3621 module_init(hclgevf_init);
3622 module_exit(hclgevf_exit);
3623 
3624 MODULE_LICENSE("GPL");
3625 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3626 MODULE_DESCRIPTION("HCLGEVF Driver");
3627 MODULE_VERSION(HCLGEVF_MOD_VERSION);
3628