1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/etherdevice.h>
5 #include <linux/iopoll.h>
6 #include <net/rtnetlink.h>
7 #include "hclgevf_cmd.h"
8 #include "hclgevf_main.h"
9 #include "hclgevf_regs.h"
10 #include "hclge_mbx.h"
11 #include "hnae3.h"
12 #include "hclgevf_devlink.h"
13 #include "hclge_comm_rss.h"
14 
15 #define HCLGEVF_NAME	"hclgevf"
16 
17 #define HCLGEVF_RESET_MAX_FAIL_CNT	5
18 
19 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
20 static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
21 				  unsigned long delay);
22 
23 static struct hnae3_ae_algo ae_algovf;
24 
25 static struct workqueue_struct *hclgevf_wq;
26 
27 static const struct pci_device_id ae_algovf_pci_tbl[] = {
28 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
29 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
30 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
31 	/* required last entry */
32 	{0, }
33 };
34 
35 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
36 
37 /* hclgevf_cmd_send - send command to command queue
38  * @hw: pointer to the hw struct
39  * @desc: prefilled descriptor for describing the command
40  * @num : the number of descriptors to be sent
41  *
42  * This is the main send command for command queue, it
43  * sends the queue, cleans the queue, etc
44  */
45 int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num)
46 {
47 	return hclge_comm_cmd_send(&hw->hw, desc, num);
48 }
49 
50 void hclgevf_arq_init(struct hclgevf_dev *hdev)
51 {
52 	struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq;
53 
54 	spin_lock(&cmdq->crq.lock);
55 	/* initialize the pointers of async rx queue of mailbox */
56 	hdev->arq.hdev = hdev;
57 	hdev->arq.head = 0;
58 	hdev->arq.tail = 0;
59 	atomic_set(&hdev->arq.count, 0);
60 	spin_unlock(&cmdq->crq.lock);
61 }
62 
63 struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
64 {
65 	if (!handle->client)
66 		return container_of(handle, struct hclgevf_dev, nic);
67 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
68 		return container_of(handle, struct hclgevf_dev, roce);
69 	else
70 		return container_of(handle, struct hclgevf_dev, nic);
71 }
72 
73 static void hclgevf_update_stats(struct hnae3_handle *handle)
74 {
75 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
76 	int status;
77 
78 	status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
79 	if (status)
80 		dev_err(&hdev->pdev->dev,
81 			"VF update of TQPS stats fail, status = %d.\n",
82 			status);
83 }
84 
85 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
86 {
87 	if (strset == ETH_SS_TEST)
88 		return -EOPNOTSUPP;
89 	else if (strset == ETH_SS_STATS)
90 		return hclge_comm_tqps_get_sset_count(handle);
91 
92 	return 0;
93 }
94 
95 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
96 				u8 *data)
97 {
98 	u8 *p = (char *)data;
99 
100 	if (strset == ETH_SS_STATS)
101 		p = hclge_comm_tqps_get_strings(handle, p);
102 }
103 
104 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
105 {
106 	hclge_comm_tqps_get_stats(handle, data);
107 }
108 
109 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
110 				   u8 subcode)
111 {
112 	if (msg) {
113 		memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg));
114 		msg->code = code;
115 		msg->subcode = subcode;
116 	}
117 }
118 
119 static int hclgevf_get_basic_info(struct hclgevf_dev *hdev)
120 {
121 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
122 	u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE];
123 	struct hclge_basic_info *basic_info;
124 	struct hclge_vf_to_pf_msg send_msg;
125 	unsigned long caps;
126 	int status;
127 
128 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0);
129 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
130 				      sizeof(resp_msg));
131 	if (status) {
132 		dev_err(&hdev->pdev->dev,
133 			"failed to get basic info from pf, ret = %d", status);
134 		return status;
135 	}
136 
137 	basic_info = (struct hclge_basic_info *)resp_msg;
138 
139 	hdev->hw_tc_map = basic_info->hw_tc_map;
140 	hdev->mbx_api_version = le16_to_cpu(basic_info->mbx_api_version);
141 	caps = le32_to_cpu(basic_info->pf_caps);
142 	if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps))
143 		set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
144 
145 	return 0;
146 }
147 
148 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
149 {
150 	struct hnae3_handle *nic = &hdev->nic;
151 	struct hclge_vf_to_pf_msg send_msg;
152 	u8 resp_msg;
153 	int ret;
154 
155 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
156 			       HCLGE_MBX_GET_PORT_BASE_VLAN_STATE);
157 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
158 				   sizeof(u8));
159 	if (ret) {
160 		dev_err(&hdev->pdev->dev,
161 			"VF request to get port based vlan state failed %d",
162 			ret);
163 		return ret;
164 	}
165 
166 	nic->port_base_vlan_state = resp_msg;
167 
168 	return 0;
169 }
170 
171 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
172 {
173 #define HCLGEVF_TQPS_RSS_INFO_LEN	6
174 
175 	struct hclge_mbx_vf_queue_info *queue_info;
176 	u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
177 	struct hclge_vf_to_pf_msg send_msg;
178 	int status;
179 
180 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0);
181 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
182 				      HCLGEVF_TQPS_RSS_INFO_LEN);
183 	if (status) {
184 		dev_err(&hdev->pdev->dev,
185 			"VF request to get tqp info from PF failed %d",
186 			status);
187 		return status;
188 	}
189 
190 	queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg;
191 	hdev->num_tqps = le16_to_cpu(queue_info->num_tqps);
192 	hdev->rss_size_max = le16_to_cpu(queue_info->rss_size);
193 	hdev->rx_buf_len = le16_to_cpu(queue_info->rx_buf_len);
194 
195 	return 0;
196 }
197 
198 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
199 {
200 #define HCLGEVF_TQPS_DEPTH_INFO_LEN	4
201 
202 	struct hclge_mbx_vf_queue_depth *queue_depth;
203 	u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN];
204 	struct hclge_vf_to_pf_msg send_msg;
205 	int ret;
206 
207 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0);
208 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
209 				   HCLGEVF_TQPS_DEPTH_INFO_LEN);
210 	if (ret) {
211 		dev_err(&hdev->pdev->dev,
212 			"VF request to get tqp depth info from PF failed %d",
213 			ret);
214 		return ret;
215 	}
216 
217 	queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg;
218 	hdev->num_tx_desc = le16_to_cpu(queue_depth->num_tx_desc);
219 	hdev->num_rx_desc = le16_to_cpu(queue_depth->num_rx_desc);
220 
221 	return 0;
222 }
223 
224 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
225 {
226 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
227 	struct hclge_vf_to_pf_msg send_msg;
228 	u16 qid_in_pf = 0;
229 	u8 resp_data[2];
230 	int ret;
231 
232 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0);
233 	*(__le16 *)send_msg.data = cpu_to_le16(queue_id);
234 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data,
235 				   sizeof(resp_data));
236 	if (!ret)
237 		qid_in_pf = le16_to_cpu(*(__le16 *)resp_data);
238 
239 	return qid_in_pf;
240 }
241 
242 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
243 {
244 	struct hclge_vf_to_pf_msg send_msg;
245 	u8 resp_msg[2];
246 	int ret;
247 
248 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0);
249 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
250 				   sizeof(resp_msg));
251 	if (ret) {
252 		dev_err(&hdev->pdev->dev,
253 			"VF request to get the pf port media type failed %d",
254 			ret);
255 		return ret;
256 	}
257 
258 	hdev->hw.mac.media_type = resp_msg[0];
259 	hdev->hw.mac.module_type = resp_msg[1];
260 
261 	return 0;
262 }
263 
264 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
265 {
266 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
267 	struct hclge_comm_tqp *tqp;
268 	int i;
269 
270 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
271 				  sizeof(struct hclge_comm_tqp), GFP_KERNEL);
272 	if (!hdev->htqp)
273 		return -ENOMEM;
274 
275 	tqp = hdev->htqp;
276 
277 	for (i = 0; i < hdev->num_tqps; i++) {
278 		tqp->dev = &hdev->pdev->dev;
279 		tqp->index = i;
280 
281 		tqp->q.ae_algo = &ae_algovf;
282 		tqp->q.buf_size = hdev->rx_buf_len;
283 		tqp->q.tx_desc_num = hdev->num_tx_desc;
284 		tqp->q.rx_desc_num = hdev->num_rx_desc;
285 
286 		/* need an extended offset to configure queues >=
287 		 * HCLGEVF_TQP_MAX_SIZE_DEV_V2.
288 		 */
289 		if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2)
290 			tqp->q.io_base = hdev->hw.hw.io_base +
291 					 HCLGEVF_TQP_REG_OFFSET +
292 					 i * HCLGEVF_TQP_REG_SIZE;
293 		else
294 			tqp->q.io_base = hdev->hw.hw.io_base +
295 					 HCLGEVF_TQP_REG_OFFSET +
296 					 HCLGEVF_TQP_EXT_REG_OFFSET +
297 					 (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) *
298 					 HCLGEVF_TQP_REG_SIZE;
299 
300 		/* when device supports tx push and has device memory,
301 		 * the queue can execute push mode or doorbell mode on
302 		 * device memory.
303 		 */
304 		if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
305 			tqp->q.mem_base = hdev->hw.hw.mem_base +
306 					  HCLGEVF_TQP_MEM_OFFSET(hdev, i);
307 
308 		tqp++;
309 	}
310 
311 	return 0;
312 }
313 
314 static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
315 {
316 	struct hnae3_handle *nic = &hdev->nic;
317 	struct hnae3_knic_private_info *kinfo;
318 	u16 new_tqps = hdev->num_tqps;
319 	unsigned int i;
320 	u8 num_tc = 0;
321 
322 	kinfo = &nic->kinfo;
323 	kinfo->num_tx_desc = hdev->num_tx_desc;
324 	kinfo->num_rx_desc = hdev->num_rx_desc;
325 	kinfo->rx_buf_len = hdev->rx_buf_len;
326 	for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++)
327 		if (hdev->hw_tc_map & BIT(i))
328 			num_tc++;
329 
330 	num_tc = num_tc ? num_tc : 1;
331 	kinfo->tc_info.num_tc = num_tc;
332 	kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc);
333 	new_tqps = kinfo->rss_size * num_tc;
334 	kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
335 
336 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
337 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
338 	if (!kinfo->tqp)
339 		return -ENOMEM;
340 
341 	for (i = 0; i < kinfo->num_tqps; i++) {
342 		hdev->htqp[i].q.handle = &hdev->nic;
343 		hdev->htqp[i].q.tqp_index = i;
344 		kinfo->tqp[i] = &hdev->htqp[i].q;
345 	}
346 
347 	/* after init the max rss_size and tqps, adjust the default tqp numbers
348 	 * and rss size with the actual vector numbers
349 	 */
350 	kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
351 	kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc,
352 				kinfo->rss_size);
353 
354 	return 0;
355 }
356 
357 static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
358 {
359 	struct hclge_vf_to_pf_msg send_msg;
360 	int status;
361 
362 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0);
363 	status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
364 	if (status)
365 		dev_err(&hdev->pdev->dev,
366 			"VF failed to fetch link status(%d) from PF", status);
367 }
368 
369 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
370 {
371 	struct hnae3_handle *rhandle = &hdev->roce;
372 	struct hnae3_handle *handle = &hdev->nic;
373 	struct hnae3_client *rclient;
374 	struct hnae3_client *client;
375 
376 	if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state))
377 		return;
378 
379 	client = handle->client;
380 	rclient = hdev->roce_client;
381 
382 	link_state =
383 		test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
384 	if (link_state != hdev->hw.mac.link) {
385 		hdev->hw.mac.link = link_state;
386 		client->ops->link_status_change(handle, !!link_state);
387 		if (rclient && rclient->ops->link_status_change)
388 			rclient->ops->link_status_change(rhandle, !!link_state);
389 	}
390 
391 	clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
392 }
393 
394 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
395 {
396 #define HCLGEVF_ADVERTISING	0
397 #define HCLGEVF_SUPPORTED	1
398 
399 	struct hclge_vf_to_pf_msg send_msg;
400 
401 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0);
402 	send_msg.data[0] = HCLGEVF_ADVERTISING;
403 	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
404 	send_msg.data[0] = HCLGEVF_SUPPORTED;
405 	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
406 }
407 
408 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
409 {
410 	struct hnae3_handle *nic = &hdev->nic;
411 	int ret;
412 
413 	nic->ae_algo = &ae_algovf;
414 	nic->pdev = hdev->pdev;
415 	nic->numa_node_mask = hdev->numa_node_mask;
416 	nic->flags |= HNAE3_SUPPORT_VF;
417 	nic->kinfo.io_base = hdev->hw.hw.io_base;
418 
419 	ret = hclgevf_knic_setup(hdev);
420 	if (ret)
421 		dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
422 			ret);
423 	return ret;
424 }
425 
426 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
427 {
428 	if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
429 		dev_warn(&hdev->pdev->dev,
430 			 "vector(vector_id %d) has been freed.\n", vector_id);
431 		return;
432 	}
433 
434 	hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
435 	hdev->num_msi_left += 1;
436 	hdev->num_msi_used -= 1;
437 }
438 
439 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
440 			      struct hnae3_vector_info *vector_info)
441 {
442 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
443 	struct hnae3_vector_info *vector = vector_info;
444 	int alloc = 0;
445 	int i, j;
446 
447 	vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num);
448 	vector_num = min(hdev->num_msi_left, vector_num);
449 
450 	for (j = 0; j < vector_num; j++) {
451 		for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
452 			if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
453 				vector->vector = pci_irq_vector(hdev->pdev, i);
454 				vector->io_addr = hdev->hw.hw.io_base +
455 					HCLGEVF_VECTOR_REG_BASE +
456 					(i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
457 				hdev->vector_status[i] = 0;
458 				hdev->vector_irq[i] = vector->vector;
459 
460 				vector++;
461 				alloc++;
462 
463 				break;
464 			}
465 		}
466 	}
467 	hdev->num_msi_left -= alloc;
468 	hdev->num_msi_used += alloc;
469 
470 	return alloc;
471 }
472 
473 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
474 {
475 	int i;
476 
477 	for (i = 0; i < hdev->num_msi; i++)
478 		if (vector == hdev->vector_irq[i])
479 			return i;
480 
481 	return -EINVAL;
482 }
483 
484 /* for revision 0x20, vf shared the same rss config with pf */
485 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
486 {
487 #define HCLGEVF_RSS_MBX_RESP_LEN	8
488 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
489 	u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
490 	struct hclge_vf_to_pf_msg send_msg;
491 	u16 msg_num, hash_key_index;
492 	u8 index;
493 	int ret;
494 
495 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0);
496 	msg_num = (HCLGE_COMM_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
497 			HCLGEVF_RSS_MBX_RESP_LEN;
498 	for (index = 0; index < msg_num; index++) {
499 		send_msg.data[0] = index;
500 		ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
501 					   HCLGEVF_RSS_MBX_RESP_LEN);
502 		if (ret) {
503 			dev_err(&hdev->pdev->dev,
504 				"VF get rss hash key from PF failed, ret=%d",
505 				ret);
506 			return ret;
507 		}
508 
509 		hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index;
510 		if (index == msg_num - 1)
511 			memcpy(&rss_cfg->rss_hash_key[hash_key_index],
512 			       &resp_msg[0],
513 			       HCLGE_COMM_RSS_KEY_SIZE - hash_key_index);
514 		else
515 			memcpy(&rss_cfg->rss_hash_key[hash_key_index],
516 			       &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN);
517 	}
518 
519 	return 0;
520 }
521 
522 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
523 			   u8 *hfunc)
524 {
525 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
526 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
527 	int ret;
528 
529 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
530 		hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
531 	} else {
532 		if (hfunc)
533 			*hfunc = ETH_RSS_HASH_TOP;
534 		if (key) {
535 			ret = hclgevf_get_rss_hash_key(hdev);
536 			if (ret)
537 				return ret;
538 			memcpy(key, rss_cfg->rss_hash_key,
539 			       HCLGE_COMM_RSS_KEY_SIZE);
540 		}
541 	}
542 
543 	hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
544 				     hdev->ae_dev->dev_specs.rss_ind_tbl_size);
545 
546 	return 0;
547 }
548 
549 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
550 			   const u8 *key, const u8 hfunc)
551 {
552 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
553 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
554 	int ret, i;
555 
556 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
557 		ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key,
558 						  hfunc);
559 		if (ret)
560 			return ret;
561 	}
562 
563 	/* update the shadow RSS table with user specified qids */
564 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
565 		rss_cfg->rss_indirection_tbl[i] = indir[i];
566 
567 	/* update the hardware */
568 	return hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
569 					      rss_cfg->rss_indirection_tbl);
570 }
571 
572 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
573 				 struct ethtool_rxnfc *nfc)
574 {
575 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
576 	int ret;
577 
578 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
579 		return -EOPNOTSUPP;
580 
581 	ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
582 				       &hdev->rss_cfg, nfc);
583 	if (ret)
584 		dev_err(&hdev->pdev->dev,
585 		"failed to set rss tuple, ret = %d.\n", ret);
586 
587 	return ret;
588 }
589 
590 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
591 				 struct ethtool_rxnfc *nfc)
592 {
593 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
594 	u8 tuple_sets;
595 	int ret;
596 
597 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
598 		return -EOPNOTSUPP;
599 
600 	nfc->data = 0;
601 
602 	ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type,
603 				       &tuple_sets);
604 	if (ret || !tuple_sets)
605 		return ret;
606 
607 	nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
608 
609 	return 0;
610 }
611 
612 static int hclgevf_get_tc_size(struct hnae3_handle *handle)
613 {
614 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
615 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
616 
617 	return rss_cfg->rss_size;
618 }
619 
620 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
621 				       int vector_id,
622 				       struct hnae3_ring_chain_node *ring_chain)
623 {
624 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
625 	struct hclge_vf_to_pf_msg send_msg;
626 	struct hnae3_ring_chain_node *node;
627 	int status;
628 	int i = 0;
629 
630 	memset(&send_msg, 0, sizeof(send_msg));
631 	send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR :
632 		HCLGE_MBX_UNMAP_RING_TO_VECTOR;
633 	send_msg.vector_id = vector_id;
634 
635 	for (node = ring_chain; node; node = node->next) {
636 		send_msg.param[i].ring_type =
637 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
638 
639 		send_msg.param[i].tqp_index = node->tqp_index;
640 		send_msg.param[i].int_gl_index =
641 					hnae3_get_field(node->int_gl_idx,
642 							HNAE3_RING_GL_IDX_M,
643 							HNAE3_RING_GL_IDX_S);
644 
645 		i++;
646 		if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) {
647 			send_msg.ring_num = i;
648 
649 			status = hclgevf_send_mbx_msg(hdev, &send_msg, false,
650 						      NULL, 0);
651 			if (status) {
652 				dev_err(&hdev->pdev->dev,
653 					"Map TQP fail, status is %d.\n",
654 					status);
655 				return status;
656 			}
657 			i = 0;
658 		}
659 	}
660 
661 	return 0;
662 }
663 
664 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
665 				      struct hnae3_ring_chain_node *ring_chain)
666 {
667 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
668 	int vector_id;
669 
670 	vector_id = hclgevf_get_vector_index(hdev, vector);
671 	if (vector_id < 0) {
672 		dev_err(&handle->pdev->dev,
673 			"Get vector index fail. ret =%d\n", vector_id);
674 		return vector_id;
675 	}
676 
677 	return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
678 }
679 
680 static int hclgevf_unmap_ring_from_vector(
681 				struct hnae3_handle *handle,
682 				int vector,
683 				struct hnae3_ring_chain_node *ring_chain)
684 {
685 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
686 	int ret, vector_id;
687 
688 	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
689 		return 0;
690 
691 	vector_id = hclgevf_get_vector_index(hdev, vector);
692 	if (vector_id < 0) {
693 		dev_err(&handle->pdev->dev,
694 			"Get vector index fail. ret =%d\n", vector_id);
695 		return vector_id;
696 	}
697 
698 	ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
699 	if (ret)
700 		dev_err(&handle->pdev->dev,
701 			"Unmap ring from vector fail. vector=%d, ret =%d\n",
702 			vector_id,
703 			ret);
704 
705 	return ret;
706 }
707 
708 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
709 {
710 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
711 	int vector_id;
712 
713 	vector_id = hclgevf_get_vector_index(hdev, vector);
714 	if (vector_id < 0) {
715 		dev_err(&handle->pdev->dev,
716 			"hclgevf_put_vector get vector index fail. ret =%d\n",
717 			vector_id);
718 		return vector_id;
719 	}
720 
721 	hclgevf_free_vector(hdev, vector_id);
722 
723 	return 0;
724 }
725 
726 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
727 					bool en_uc_pmc, bool en_mc_pmc,
728 					bool en_bc_pmc)
729 {
730 	struct hnae3_handle *handle = &hdev->nic;
731 	struct hclge_vf_to_pf_msg send_msg;
732 	int ret;
733 
734 	memset(&send_msg, 0, sizeof(send_msg));
735 	send_msg.code = HCLGE_MBX_SET_PROMISC_MODE;
736 	send_msg.en_bc = en_bc_pmc ? 1 : 0;
737 	send_msg.en_uc = en_uc_pmc ? 1 : 0;
738 	send_msg.en_mc = en_mc_pmc ? 1 : 0;
739 	send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC,
740 					     &handle->priv_flags) ? 1 : 0;
741 
742 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
743 	if (ret)
744 		dev_err(&hdev->pdev->dev,
745 			"Set promisc mode fail, status is %d.\n", ret);
746 
747 	return ret;
748 }
749 
750 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
751 				    bool en_mc_pmc)
752 {
753 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
754 	bool en_bc_pmc;
755 
756 	en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
757 
758 	return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc,
759 					    en_bc_pmc);
760 }
761 
762 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle)
763 {
764 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
765 
766 	set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
767 	hclgevf_task_schedule(hdev, 0);
768 }
769 
770 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev)
771 {
772 	struct hnae3_handle *handle = &hdev->nic;
773 	bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE;
774 	bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE;
775 	int ret;
776 
777 	if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) {
778 		ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc);
779 		if (!ret)
780 			clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
781 	}
782 }
783 
784 static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id,
785 				       u16 stream_id, bool enable)
786 {
787 	struct hclgevf_cfg_com_tqp_queue_cmd *req;
788 	struct hclge_desc desc;
789 
790 	req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
791 
792 	hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
793 	req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
794 	req->stream_id = cpu_to_le16(stream_id);
795 	if (enable)
796 		req->enable |= 1U << HCLGEVF_TQP_ENABLE_B;
797 
798 	return hclgevf_cmd_send(&hdev->hw, &desc, 1);
799 }
800 
801 static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable)
802 {
803 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
804 	int ret;
805 	u16 i;
806 
807 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
808 		ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable);
809 		if (ret)
810 			return ret;
811 	}
812 
813 	return 0;
814 }
815 
816 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
817 {
818 	struct hclge_vf_to_pf_msg send_msg;
819 	u8 host_mac[ETH_ALEN];
820 	int status;
821 
822 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0);
823 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac,
824 				      ETH_ALEN);
825 	if (status) {
826 		dev_err(&hdev->pdev->dev,
827 			"fail to get VF MAC from host %d", status);
828 		return status;
829 	}
830 
831 	ether_addr_copy(p, host_mac);
832 
833 	return 0;
834 }
835 
836 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
837 {
838 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
839 	u8 host_mac_addr[ETH_ALEN];
840 
841 	if (hclgevf_get_host_mac_addr(hdev, host_mac_addr))
842 		return;
843 
844 	hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr);
845 	if (hdev->has_pf_mac)
846 		ether_addr_copy(p, host_mac_addr);
847 	else
848 		ether_addr_copy(p, hdev->hw.mac.mac_addr);
849 }
850 
851 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p,
852 				bool is_first)
853 {
854 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
855 	u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
856 	struct hclge_vf_to_pf_msg send_msg;
857 	u8 *new_mac_addr = (u8 *)p;
858 	int status;
859 
860 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0);
861 	send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY;
862 	ether_addr_copy(send_msg.data, new_mac_addr);
863 	if (is_first && !hdev->has_pf_mac)
864 		eth_zero_addr(&send_msg.data[ETH_ALEN]);
865 	else
866 		ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
867 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
868 	if (!status)
869 		ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
870 
871 	return status;
872 }
873 
874 static struct hclgevf_mac_addr_node *
875 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr)
876 {
877 	struct hclgevf_mac_addr_node *mac_node, *tmp;
878 
879 	list_for_each_entry_safe(mac_node, tmp, list, node)
880 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
881 			return mac_node;
882 
883 	return NULL;
884 }
885 
886 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node,
887 				    enum HCLGEVF_MAC_NODE_STATE state)
888 {
889 	switch (state) {
890 	/* from set_rx_mode or tmp_add_list */
891 	case HCLGEVF_MAC_TO_ADD:
892 		if (mac_node->state == HCLGEVF_MAC_TO_DEL)
893 			mac_node->state = HCLGEVF_MAC_ACTIVE;
894 		break;
895 	/* only from set_rx_mode */
896 	case HCLGEVF_MAC_TO_DEL:
897 		if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
898 			list_del(&mac_node->node);
899 			kfree(mac_node);
900 		} else {
901 			mac_node->state = HCLGEVF_MAC_TO_DEL;
902 		}
903 		break;
904 	/* only from tmp_add_list, the mac_node->state won't be
905 	 * HCLGEVF_MAC_ACTIVE
906 	 */
907 	case HCLGEVF_MAC_ACTIVE:
908 		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
909 			mac_node->state = HCLGEVF_MAC_ACTIVE;
910 		break;
911 	}
912 }
913 
914 static int hclgevf_update_mac_list(struct hnae3_handle *handle,
915 				   enum HCLGEVF_MAC_NODE_STATE state,
916 				   enum HCLGEVF_MAC_ADDR_TYPE mac_type,
917 				   const unsigned char *addr)
918 {
919 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
920 	struct hclgevf_mac_addr_node *mac_node;
921 	struct list_head *list;
922 
923 	list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
924 	       &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
925 
926 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
927 
928 	/* if the mac addr is already in the mac list, no need to add a new
929 	 * one into it, just check the mac addr state, convert it to a new
930 	 * state, or just remove it, or do nothing.
931 	 */
932 	mac_node = hclgevf_find_mac_node(list, addr);
933 	if (mac_node) {
934 		hclgevf_update_mac_node(mac_node, state);
935 		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
936 		return 0;
937 	}
938 	/* if this address is never added, unnecessary to delete */
939 	if (state == HCLGEVF_MAC_TO_DEL) {
940 		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
941 		return -ENOENT;
942 	}
943 
944 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
945 	if (!mac_node) {
946 		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
947 		return -ENOMEM;
948 	}
949 
950 	mac_node->state = state;
951 	ether_addr_copy(mac_node->mac_addr, addr);
952 	list_add_tail(&mac_node->node, list);
953 
954 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
955 	return 0;
956 }
957 
958 static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
959 			       const unsigned char *addr)
960 {
961 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
962 				       HCLGEVF_MAC_ADDR_UC, addr);
963 }
964 
965 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
966 			      const unsigned char *addr)
967 {
968 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
969 				       HCLGEVF_MAC_ADDR_UC, addr);
970 }
971 
972 static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
973 			       const unsigned char *addr)
974 {
975 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
976 				       HCLGEVF_MAC_ADDR_MC, addr);
977 }
978 
979 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
980 			      const unsigned char *addr)
981 {
982 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
983 				       HCLGEVF_MAC_ADDR_MC, addr);
984 }
985 
986 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev,
987 				    struct hclgevf_mac_addr_node *mac_node,
988 				    enum HCLGEVF_MAC_ADDR_TYPE mac_type)
989 {
990 	struct hclge_vf_to_pf_msg send_msg;
991 	u8 code, subcode;
992 
993 	if (mac_type == HCLGEVF_MAC_ADDR_UC) {
994 		code = HCLGE_MBX_SET_UNICAST;
995 		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
996 			subcode = HCLGE_MBX_MAC_VLAN_UC_ADD;
997 		else
998 			subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE;
999 	} else {
1000 		code = HCLGE_MBX_SET_MULTICAST;
1001 		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1002 			subcode = HCLGE_MBX_MAC_VLAN_MC_ADD;
1003 		else
1004 			subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE;
1005 	}
1006 
1007 	hclgevf_build_send_msg(&send_msg, code, subcode);
1008 	ether_addr_copy(send_msg.data, mac_node->mac_addr);
1009 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1010 }
1011 
1012 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
1013 				    struct list_head *list,
1014 				    enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1015 {
1016 	char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
1017 	struct hclgevf_mac_addr_node *mac_node, *tmp;
1018 	int ret;
1019 
1020 	list_for_each_entry_safe(mac_node, tmp, list, node) {
1021 		ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
1022 		if  (ret) {
1023 			hnae3_format_mac_addr(format_mac_addr,
1024 					      mac_node->mac_addr);
1025 			dev_err(&hdev->pdev->dev,
1026 				"failed to configure mac %s, state = %d, ret = %d\n",
1027 				format_mac_addr, mac_node->state, ret);
1028 			return;
1029 		}
1030 		if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
1031 			mac_node->state = HCLGEVF_MAC_ACTIVE;
1032 		} else {
1033 			list_del(&mac_node->node);
1034 			kfree(mac_node);
1035 		}
1036 	}
1037 }
1038 
1039 static void hclgevf_sync_from_add_list(struct list_head *add_list,
1040 				       struct list_head *mac_list)
1041 {
1042 	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1043 
1044 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
1045 		/* if the mac address from tmp_add_list is not in the
1046 		 * uc/mc_mac_list, it means have received a TO_DEL request
1047 		 * during the time window of sending mac config request to PF
1048 		 * If mac_node state is ACTIVE, then change its state to TO_DEL,
1049 		 * then it will be removed at next time. If is TO_ADD, it means
1050 		 * send TO_ADD request failed, so just remove the mac node.
1051 		 */
1052 		new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
1053 		if (new_node) {
1054 			hclgevf_update_mac_node(new_node, mac_node->state);
1055 			list_del(&mac_node->node);
1056 			kfree(mac_node);
1057 		} else if (mac_node->state == HCLGEVF_MAC_ACTIVE) {
1058 			mac_node->state = HCLGEVF_MAC_TO_DEL;
1059 			list_move_tail(&mac_node->node, mac_list);
1060 		} else {
1061 			list_del(&mac_node->node);
1062 			kfree(mac_node);
1063 		}
1064 	}
1065 }
1066 
1067 static void hclgevf_sync_from_del_list(struct list_head *del_list,
1068 				       struct list_head *mac_list)
1069 {
1070 	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1071 
1072 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
1073 		new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
1074 		if (new_node) {
1075 			/* If the mac addr is exist in the mac list, it means
1076 			 * received a new request TO_ADD during the time window
1077 			 * of sending mac addr configurrequest to PF, so just
1078 			 * change the mac state to ACTIVE.
1079 			 */
1080 			new_node->state = HCLGEVF_MAC_ACTIVE;
1081 			list_del(&mac_node->node);
1082 			kfree(mac_node);
1083 		} else {
1084 			list_move_tail(&mac_node->node, mac_list);
1085 		}
1086 	}
1087 }
1088 
1089 static void hclgevf_clear_list(struct list_head *list)
1090 {
1091 	struct hclgevf_mac_addr_node *mac_node, *tmp;
1092 
1093 	list_for_each_entry_safe(mac_node, tmp, list, node) {
1094 		list_del(&mac_node->node);
1095 		kfree(mac_node);
1096 	}
1097 }
1098 
1099 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev,
1100 				  enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1101 {
1102 	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1103 	struct list_head tmp_add_list, tmp_del_list;
1104 	struct list_head *list;
1105 
1106 	INIT_LIST_HEAD(&tmp_add_list);
1107 	INIT_LIST_HEAD(&tmp_del_list);
1108 
1109 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
1110 	 * we can add/delete these mac addr outside the spin lock
1111 	 */
1112 	list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
1113 		&hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
1114 
1115 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
1116 
1117 	list_for_each_entry_safe(mac_node, tmp, list, node) {
1118 		switch (mac_node->state) {
1119 		case HCLGEVF_MAC_TO_DEL:
1120 			list_move_tail(&mac_node->node, &tmp_del_list);
1121 			break;
1122 		case HCLGEVF_MAC_TO_ADD:
1123 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
1124 			if (!new_node)
1125 				goto stop_traverse;
1126 
1127 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
1128 			new_node->state = mac_node->state;
1129 			list_add_tail(&new_node->node, &tmp_add_list);
1130 			break;
1131 		default:
1132 			break;
1133 		}
1134 	}
1135 
1136 stop_traverse:
1137 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1138 
1139 	/* delete first, in order to get max mac table space for adding */
1140 	hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type);
1141 	hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type);
1142 
1143 	/* if some mac addresses were added/deleted fail, move back to the
1144 	 * mac_list, and retry at next time.
1145 	 */
1146 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
1147 
1148 	hclgevf_sync_from_del_list(&tmp_del_list, list);
1149 	hclgevf_sync_from_add_list(&tmp_add_list, list);
1150 
1151 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1152 }
1153 
1154 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev)
1155 {
1156 	hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC);
1157 	hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC);
1158 }
1159 
1160 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev)
1161 {
1162 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
1163 
1164 	hclgevf_clear_list(&hdev->mac_table.uc_mac_list);
1165 	hclgevf_clear_list(&hdev->mac_table.mc_mac_list);
1166 
1167 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1168 }
1169 
1170 static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
1171 {
1172 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1173 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
1174 	struct hclge_vf_to_pf_msg send_msg;
1175 
1176 	if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
1177 		return -EOPNOTSUPP;
1178 
1179 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1180 			       HCLGE_MBX_ENABLE_VLAN_FILTER);
1181 	send_msg.data[0] = enable ? 1 : 0;
1182 
1183 	return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1184 }
1185 
1186 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
1187 				   __be16 proto, u16 vlan_id,
1188 				   bool is_kill)
1189 {
1190 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1191 	struct hclge_mbx_vlan_filter *vlan_filter;
1192 	struct hclge_vf_to_pf_msg send_msg;
1193 	int ret;
1194 
1195 	if (vlan_id > HCLGEVF_MAX_VLAN_ID)
1196 		return -EINVAL;
1197 
1198 	if (proto != htons(ETH_P_8021Q))
1199 		return -EPROTONOSUPPORT;
1200 
1201 	/* When device is resetting or reset failed, firmware is unable to
1202 	 * handle mailbox. Just record the vlan id, and remove it after
1203 	 * reset finished.
1204 	 */
1205 	if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
1206 	     test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
1207 		set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1208 		return -EBUSY;
1209 	}
1210 
1211 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1212 			       HCLGE_MBX_VLAN_FILTER);
1213 	vlan_filter = (struct hclge_mbx_vlan_filter *)send_msg.data;
1214 	vlan_filter->is_kill = is_kill;
1215 	vlan_filter->vlan_id = cpu_to_le16(vlan_id);
1216 	vlan_filter->proto = cpu_to_le16(be16_to_cpu(proto));
1217 
1218 	/* when remove hw vlan filter failed, record the vlan id,
1219 	 * and try to remove it from hw later, to be consistence
1220 	 * with stack.
1221 	 */
1222 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1223 	if (is_kill && ret)
1224 		set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1225 
1226 	return ret;
1227 }
1228 
1229 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
1230 {
1231 #define HCLGEVF_MAX_SYNC_COUNT	60
1232 	struct hnae3_handle *handle = &hdev->nic;
1233 	int ret, sync_cnt = 0;
1234 	u16 vlan_id;
1235 
1236 	vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
1237 	while (vlan_id != VLAN_N_VID) {
1238 		ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
1239 					      vlan_id, true);
1240 		if (ret)
1241 			return;
1242 
1243 		clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
1244 		sync_cnt++;
1245 		if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
1246 			return;
1247 
1248 		vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
1249 	}
1250 }
1251 
1252 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
1253 {
1254 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1255 	struct hclge_vf_to_pf_msg send_msg;
1256 
1257 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1258 			       HCLGE_MBX_VLAN_RX_OFF_CFG);
1259 	send_msg.data[0] = enable ? 1 : 0;
1260 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1261 }
1262 
1263 static int hclgevf_reset_tqp(struct hnae3_handle *handle)
1264 {
1265 #define HCLGEVF_RESET_ALL_QUEUE_DONE	1U
1266 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1267 	struct hclge_vf_to_pf_msg send_msg;
1268 	u8 return_status = 0;
1269 	int ret;
1270 	u16 i;
1271 
1272 	/* disable vf queue before send queue reset msg to PF */
1273 	ret = hclgevf_tqp_enable(handle, false);
1274 	if (ret) {
1275 		dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n",
1276 			ret);
1277 		return ret;
1278 	}
1279 
1280 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
1281 
1282 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status,
1283 				   sizeof(return_status));
1284 	if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE)
1285 		return ret;
1286 
1287 	for (i = 1; i < handle->kinfo.num_tqps; i++) {
1288 		hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
1289 		*(__le16 *)send_msg.data = cpu_to_le16(i);
1290 		ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1291 		if (ret)
1292 			return ret;
1293 	}
1294 
1295 	return 0;
1296 }
1297 
1298 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
1299 {
1300 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1301 	struct hclge_mbx_mtu_info *mtu_info;
1302 	struct hclge_vf_to_pf_msg send_msg;
1303 
1304 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0);
1305 	mtu_info = (struct hclge_mbx_mtu_info *)send_msg.data;
1306 	mtu_info->mtu = cpu_to_le32(new_mtu);
1307 
1308 	return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1309 }
1310 
1311 static int hclgevf_notify_client(struct hclgevf_dev *hdev,
1312 				 enum hnae3_reset_notify_type type)
1313 {
1314 	struct hnae3_client *client = hdev->nic_client;
1315 	struct hnae3_handle *handle = &hdev->nic;
1316 	int ret;
1317 
1318 	if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) ||
1319 	    !client)
1320 		return 0;
1321 
1322 	if (!client->ops->reset_notify)
1323 		return -EOPNOTSUPP;
1324 
1325 	ret = client->ops->reset_notify(handle, type);
1326 	if (ret)
1327 		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
1328 			type, ret);
1329 
1330 	return ret;
1331 }
1332 
1333 static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
1334 				      enum hnae3_reset_notify_type type)
1335 {
1336 	struct hnae3_client *client = hdev->roce_client;
1337 	struct hnae3_handle *handle = &hdev->roce;
1338 	int ret;
1339 
1340 	if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client)
1341 		return 0;
1342 
1343 	if (!client->ops->reset_notify)
1344 		return -EOPNOTSUPP;
1345 
1346 	ret = client->ops->reset_notify(handle, type);
1347 	if (ret)
1348 		dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
1349 			type, ret);
1350 	return ret;
1351 }
1352 
1353 static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
1354 {
1355 #define HCLGEVF_RESET_WAIT_US	20000
1356 #define HCLGEVF_RESET_WAIT_CNT	2000
1357 #define HCLGEVF_RESET_WAIT_TIMEOUT_US	\
1358 	(HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
1359 
1360 	u32 val;
1361 	int ret;
1362 
1363 	if (hdev->reset_type == HNAE3_VF_RESET)
1364 		ret = readl_poll_timeout(hdev->hw.hw.io_base +
1365 					 HCLGEVF_VF_RST_ING, val,
1366 					 !(val & HCLGEVF_VF_RST_ING_BIT),
1367 					 HCLGEVF_RESET_WAIT_US,
1368 					 HCLGEVF_RESET_WAIT_TIMEOUT_US);
1369 	else
1370 		ret = readl_poll_timeout(hdev->hw.hw.io_base +
1371 					 HCLGEVF_RST_ING, val,
1372 					 !(val & HCLGEVF_RST_ING_BITS),
1373 					 HCLGEVF_RESET_WAIT_US,
1374 					 HCLGEVF_RESET_WAIT_TIMEOUT_US);
1375 
1376 	/* hardware completion status should be available by this time */
1377 	if (ret) {
1378 		dev_err(&hdev->pdev->dev,
1379 			"couldn't get reset done status from h/w, timeout!\n");
1380 		return ret;
1381 	}
1382 
1383 	/* we will wait a bit more to let reset of the stack to complete. This
1384 	 * might happen in case reset assertion was made by PF. Yes, this also
1385 	 * means we might end up waiting bit more even for VF reset.
1386 	 */
1387 	if (hdev->reset_type == HNAE3_VF_FULL_RESET)
1388 		msleep(5000);
1389 	else
1390 		msleep(500);
1391 
1392 	return 0;
1393 }
1394 
1395 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable)
1396 {
1397 	u32 reg_val;
1398 
1399 	reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
1400 	if (enable)
1401 		reg_val |= HCLGEVF_NIC_SW_RST_RDY;
1402 	else
1403 		reg_val &= ~HCLGEVF_NIC_SW_RST_RDY;
1404 
1405 	hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG,
1406 			  reg_val);
1407 }
1408 
1409 static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
1410 {
1411 	int ret;
1412 
1413 	/* uninitialize the nic client */
1414 	ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1415 	if (ret)
1416 		return ret;
1417 
1418 	/* re-initialize the hclge device */
1419 	ret = hclgevf_reset_hdev(hdev);
1420 	if (ret) {
1421 		dev_err(&hdev->pdev->dev,
1422 			"hclge device re-init failed, VF is disabled!\n");
1423 		return ret;
1424 	}
1425 
1426 	/* bring up the nic client again */
1427 	ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
1428 	if (ret)
1429 		return ret;
1430 
1431 	/* clear handshake status with IMP */
1432 	hclgevf_reset_handshake(hdev, false);
1433 
1434 	/* bring up the nic to enable TX/RX again */
1435 	return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1436 }
1437 
1438 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
1439 {
1440 #define HCLGEVF_RESET_SYNC_TIME 100
1441 
1442 	if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
1443 		struct hclge_vf_to_pf_msg send_msg;
1444 		int ret;
1445 
1446 		hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
1447 		ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1448 		if (ret) {
1449 			dev_err(&hdev->pdev->dev,
1450 				"failed to assert VF reset, ret = %d\n", ret);
1451 			return ret;
1452 		}
1453 		hdev->rst_stats.vf_func_rst_cnt++;
1454 	}
1455 
1456 	set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
1457 	/* inform hardware that preparatory work is done */
1458 	msleep(HCLGEVF_RESET_SYNC_TIME);
1459 	hclgevf_reset_handshake(hdev, true);
1460 	dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n",
1461 		 hdev->reset_type);
1462 
1463 	return 0;
1464 }
1465 
1466 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
1467 {
1468 	dev_info(&hdev->pdev->dev, "VF function reset count: %u\n",
1469 		 hdev->rst_stats.vf_func_rst_cnt);
1470 	dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
1471 		 hdev->rst_stats.flr_rst_cnt);
1472 	dev_info(&hdev->pdev->dev, "VF reset count: %u\n",
1473 		 hdev->rst_stats.vf_rst_cnt);
1474 	dev_info(&hdev->pdev->dev, "reset done count: %u\n",
1475 		 hdev->rst_stats.rst_done_cnt);
1476 	dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
1477 		 hdev->rst_stats.hw_rst_done_cnt);
1478 	dev_info(&hdev->pdev->dev, "reset count: %u\n",
1479 		 hdev->rst_stats.rst_cnt);
1480 	dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
1481 		 hdev->rst_stats.rst_fail_cnt);
1482 	dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
1483 		 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
1484 	dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
1485 		 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG));
1486 	dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
1487 		 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG));
1488 	dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
1489 		 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
1490 	dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
1491 }
1492 
1493 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
1494 {
1495 	/* recover handshake status with IMP when reset fail */
1496 	hclgevf_reset_handshake(hdev, true);
1497 	hdev->rst_stats.rst_fail_cnt++;
1498 	dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n",
1499 		hdev->rst_stats.rst_fail_cnt);
1500 
1501 	if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
1502 		set_bit(hdev->reset_type, &hdev->reset_pending);
1503 
1504 	if (hclgevf_is_reset_pending(hdev)) {
1505 		set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1506 		hclgevf_reset_task_schedule(hdev);
1507 	} else {
1508 		set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
1509 		hclgevf_dump_rst_info(hdev);
1510 	}
1511 }
1512 
1513 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
1514 {
1515 	int ret;
1516 
1517 	hdev->rst_stats.rst_cnt++;
1518 
1519 	/* perform reset of the stack & ae device for a client */
1520 	ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
1521 	if (ret)
1522 		return ret;
1523 
1524 	rtnl_lock();
1525 	/* bring down the nic to stop any ongoing TX/RX */
1526 	ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
1527 	rtnl_unlock();
1528 	if (ret)
1529 		return ret;
1530 
1531 	return hclgevf_reset_prepare_wait(hdev);
1532 }
1533 
1534 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
1535 {
1536 	int ret;
1537 
1538 	hdev->rst_stats.hw_rst_done_cnt++;
1539 	ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
1540 	if (ret)
1541 		return ret;
1542 
1543 	rtnl_lock();
1544 	/* now, re-initialize the nic client and ae device */
1545 	ret = hclgevf_reset_stack(hdev);
1546 	rtnl_unlock();
1547 	if (ret) {
1548 		dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
1549 		return ret;
1550 	}
1551 
1552 	ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
1553 	/* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1
1554 	 * times
1555 	 */
1556 	if (ret &&
1557 	    hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1)
1558 		return ret;
1559 
1560 	ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT);
1561 	if (ret)
1562 		return ret;
1563 
1564 	hdev->last_reset_time = jiffies;
1565 	hdev->rst_stats.rst_done_cnt++;
1566 	hdev->rst_stats.rst_fail_cnt = 0;
1567 	clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
1568 
1569 	return 0;
1570 }
1571 
1572 static void hclgevf_reset(struct hclgevf_dev *hdev)
1573 {
1574 	if (hclgevf_reset_prepare(hdev))
1575 		goto err_reset;
1576 
1577 	/* check if VF could successfully fetch the hardware reset completion
1578 	 * status from the hardware
1579 	 */
1580 	if (hclgevf_reset_wait(hdev)) {
1581 		/* can't do much in this situation, will disable VF */
1582 		dev_err(&hdev->pdev->dev,
1583 			"failed to fetch H/W reset completion status\n");
1584 		goto err_reset;
1585 	}
1586 
1587 	if (hclgevf_reset_rebuild(hdev))
1588 		goto err_reset;
1589 
1590 	return;
1591 
1592 err_reset:
1593 	hclgevf_reset_err_handle(hdev);
1594 }
1595 
1596 static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr)
1597 {
1598 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
1599 
1600 	/* return the highest priority reset level amongst all */
1601 	if (test_bit(HNAE3_VF_RESET, addr)) {
1602 		rst_level = HNAE3_VF_RESET;
1603 		clear_bit(HNAE3_VF_RESET, addr);
1604 		clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1605 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1606 	} else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
1607 		rst_level = HNAE3_VF_FULL_RESET;
1608 		clear_bit(HNAE3_VF_FULL_RESET, addr);
1609 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1610 	} else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
1611 		rst_level = HNAE3_VF_PF_FUNC_RESET;
1612 		clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1613 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1614 	} else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
1615 		rst_level = HNAE3_VF_FUNC_RESET;
1616 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1617 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
1618 		rst_level = HNAE3_FLR_RESET;
1619 		clear_bit(HNAE3_FLR_RESET, addr);
1620 	}
1621 
1622 	return rst_level;
1623 }
1624 
1625 static void hclgevf_reset_event(struct pci_dev *pdev,
1626 				struct hnae3_handle *handle)
1627 {
1628 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1629 	struct hclgevf_dev *hdev = ae_dev->priv;
1630 
1631 	dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
1632 
1633 	if (hdev->default_reset_request)
1634 		hdev->reset_level =
1635 			hclgevf_get_reset_level(&hdev->default_reset_request);
1636 	else
1637 		hdev->reset_level = HNAE3_VF_FUNC_RESET;
1638 
1639 	/* reset of this VF requested */
1640 	set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
1641 	hclgevf_reset_task_schedule(hdev);
1642 
1643 	hdev->last_reset_time = jiffies;
1644 }
1645 
1646 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
1647 					  enum hnae3_reset_type rst_type)
1648 {
1649 	struct hclgevf_dev *hdev = ae_dev->priv;
1650 
1651 	set_bit(rst_type, &hdev->default_reset_request);
1652 }
1653 
1654 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
1655 {
1656 	writel(en ? 1 : 0, vector->addr);
1657 }
1658 
1659 static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
1660 					  enum hnae3_reset_type rst_type)
1661 {
1662 #define HCLGEVF_RESET_RETRY_WAIT_MS	500
1663 #define HCLGEVF_RESET_RETRY_CNT		5
1664 
1665 	struct hclgevf_dev *hdev = ae_dev->priv;
1666 	int retry_cnt = 0;
1667 	int ret;
1668 
1669 	while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) {
1670 		down(&hdev->reset_sem);
1671 		set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1672 		hdev->reset_type = rst_type;
1673 		ret = hclgevf_reset_prepare(hdev);
1674 		if (!ret && !hdev->reset_pending)
1675 			break;
1676 
1677 		dev_err(&hdev->pdev->dev,
1678 			"failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
1679 			ret, hdev->reset_pending, retry_cnt);
1680 		clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1681 		up(&hdev->reset_sem);
1682 		msleep(HCLGEVF_RESET_RETRY_WAIT_MS);
1683 	}
1684 
1685 	/* disable misc vector before reset done */
1686 	hclgevf_enable_vector(&hdev->misc_vector, false);
1687 
1688 	if (hdev->reset_type == HNAE3_FLR_RESET)
1689 		hdev->rst_stats.flr_rst_cnt++;
1690 }
1691 
1692 static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev)
1693 {
1694 	struct hclgevf_dev *hdev = ae_dev->priv;
1695 	int ret;
1696 
1697 	hclgevf_enable_vector(&hdev->misc_vector, true);
1698 
1699 	ret = hclgevf_reset_rebuild(hdev);
1700 	if (ret)
1701 		dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n",
1702 			 ret);
1703 
1704 	hdev->reset_type = HNAE3_NONE_RESET;
1705 	clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1706 	up(&hdev->reset_sem);
1707 }
1708 
1709 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
1710 {
1711 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1712 
1713 	return hdev->fw_version;
1714 }
1715 
1716 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
1717 {
1718 	struct hclgevf_misc_vector *vector = &hdev->misc_vector;
1719 
1720 	vector->vector_irq = pci_irq_vector(hdev->pdev,
1721 					    HCLGEVF_MISC_VECTOR_NUM);
1722 	vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
1723 	/* vector status always valid for Vector 0 */
1724 	hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
1725 	hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
1726 
1727 	hdev->num_msi_left -= 1;
1728 	hdev->num_msi_used += 1;
1729 }
1730 
1731 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
1732 {
1733 	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
1734 	    test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) &&
1735 	    !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
1736 			      &hdev->state))
1737 		mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
1738 }
1739 
1740 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
1741 {
1742 	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
1743 	    !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED,
1744 			      &hdev->state))
1745 		mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
1746 }
1747 
1748 static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
1749 				  unsigned long delay)
1750 {
1751 	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
1752 	    !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
1753 		mod_delayed_work(hclgevf_wq, &hdev->service_task, delay);
1754 }
1755 
1756 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
1757 {
1758 #define	HCLGEVF_MAX_RESET_ATTEMPTS_CNT	3
1759 
1760 	if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state))
1761 		return;
1762 
1763 	down(&hdev->reset_sem);
1764 	set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1765 
1766 	if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
1767 			       &hdev->reset_state)) {
1768 		/* PF has intimated that it is about to reset the hardware.
1769 		 * We now have to poll & check if hardware has actually
1770 		 * completed the reset sequence. On hardware reset completion,
1771 		 * VF needs to reset the client and ae device.
1772 		 */
1773 		hdev->reset_attempts = 0;
1774 
1775 		hdev->last_reset_time = jiffies;
1776 		hdev->reset_type =
1777 			hclgevf_get_reset_level(&hdev->reset_pending);
1778 		if (hdev->reset_type != HNAE3_NONE_RESET)
1779 			hclgevf_reset(hdev);
1780 	} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
1781 				      &hdev->reset_state)) {
1782 		/* we could be here when either of below happens:
1783 		 * 1. reset was initiated due to watchdog timeout caused by
1784 		 *    a. IMP was earlier reset and our TX got choked down and
1785 		 *       which resulted in watchdog reacting and inducing VF
1786 		 *       reset. This also means our cmdq would be unreliable.
1787 		 *    b. problem in TX due to other lower layer(example link
1788 		 *       layer not functioning properly etc.)
1789 		 * 2. VF reset might have been initiated due to some config
1790 		 *    change.
1791 		 *
1792 		 * NOTE: Theres no clear way to detect above cases than to react
1793 		 * to the response of PF for this reset request. PF will ack the
1794 		 * 1b and 2. cases but we will not get any intimation about 1a
1795 		 * from PF as cmdq would be in unreliable state i.e. mailbox
1796 		 * communication between PF and VF would be broken.
1797 		 *
1798 		 * if we are never geting into pending state it means either:
1799 		 * 1. PF is not receiving our request which could be due to IMP
1800 		 *    reset
1801 		 * 2. PF is screwed
1802 		 * We cannot do much for 2. but to check first we can try reset
1803 		 * our PCIe + stack and see if it alleviates the problem.
1804 		 */
1805 		if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
1806 			/* prepare for full reset of stack + pcie interface */
1807 			set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
1808 
1809 			/* "defer" schedule the reset task again */
1810 			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1811 		} else {
1812 			hdev->reset_attempts++;
1813 
1814 			set_bit(hdev->reset_level, &hdev->reset_pending);
1815 			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1816 		}
1817 		hclgevf_reset_task_schedule(hdev);
1818 	}
1819 
1820 	hdev->reset_type = HNAE3_NONE_RESET;
1821 	clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1822 	up(&hdev->reset_sem);
1823 }
1824 
1825 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev)
1826 {
1827 	if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
1828 		return;
1829 
1830 	if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
1831 		return;
1832 
1833 	hclgevf_mbx_async_handler(hdev);
1834 
1835 	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1836 }
1837 
1838 static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
1839 {
1840 	struct hclge_vf_to_pf_msg send_msg;
1841 	int ret;
1842 
1843 	if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
1844 		return;
1845 
1846 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0);
1847 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1848 	if (ret)
1849 		dev_err(&hdev->pdev->dev,
1850 			"VF sends keep alive cmd failed(=%d)\n", ret);
1851 }
1852 
1853 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
1854 {
1855 	unsigned long delta = round_jiffies_relative(HZ);
1856 	struct hnae3_handle *handle = &hdev->nic;
1857 
1858 	if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
1859 		return;
1860 
1861 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
1862 		delta = jiffies - hdev->last_serv_processed;
1863 
1864 		if (delta < round_jiffies_relative(HZ)) {
1865 			delta = round_jiffies_relative(HZ) - delta;
1866 			goto out;
1867 		}
1868 	}
1869 
1870 	hdev->serv_processed_cnt++;
1871 	if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL))
1872 		hclgevf_keep_alive(hdev);
1873 
1874 	if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) {
1875 		hdev->last_serv_processed = jiffies;
1876 		goto out;
1877 	}
1878 
1879 	if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
1880 		hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
1881 
1882 	/* VF does not need to request link status when this bit is set, because
1883 	 * PF will push its link status to VFs when link status changed.
1884 	 */
1885 	if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state))
1886 		hclgevf_request_link_info(hdev);
1887 
1888 	hclgevf_update_link_mode(hdev);
1889 
1890 	hclgevf_sync_vlan_filter(hdev);
1891 
1892 	hclgevf_sync_mac_table(hdev);
1893 
1894 	hclgevf_sync_promisc_mode(hdev);
1895 
1896 	hdev->last_serv_processed = jiffies;
1897 
1898 out:
1899 	hclgevf_task_schedule(hdev, delta);
1900 }
1901 
1902 static void hclgevf_service_task(struct work_struct *work)
1903 {
1904 	struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev,
1905 						service_task.work);
1906 
1907 	hclgevf_reset_service_task(hdev);
1908 	hclgevf_mailbox_service_task(hdev);
1909 	hclgevf_periodic_service_task(hdev);
1910 
1911 	/* Handle reset and mbx again in case periodical task delays the
1912 	 * handling by calling hclgevf_task_schedule() in
1913 	 * hclgevf_periodic_service_task()
1914 	 */
1915 	hclgevf_reset_service_task(hdev);
1916 	hclgevf_mailbox_service_task(hdev);
1917 }
1918 
1919 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
1920 {
1921 	hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr);
1922 }
1923 
1924 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
1925 						      u32 *clearval)
1926 {
1927 	u32 val, cmdq_stat_reg, rst_ing_reg;
1928 
1929 	/* fetch the events from their corresponding regs */
1930 	cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
1931 					 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG);
1932 	if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
1933 		rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
1934 		dev_info(&hdev->pdev->dev,
1935 			 "receive reset interrupt 0x%x!\n", rst_ing_reg);
1936 		set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
1937 		set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1938 		set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
1939 		*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
1940 		hdev->rst_stats.vf_rst_cnt++;
1941 		/* set up VF hardware reset status, its PF will clear
1942 		 * this status when PF has initialized done.
1943 		 */
1944 		val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING);
1945 		hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING,
1946 				  val | HCLGEVF_VF_RST_ING_BIT);
1947 		return HCLGEVF_VECTOR0_EVENT_RST;
1948 	}
1949 
1950 	/* check for vector0 mailbox(=CMDQ RX) event source */
1951 	if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
1952 		/* for revision 0x21, clearing interrupt is writing bit 0
1953 		 * to the clear register, writing bit 1 means to keep the
1954 		 * old value.
1955 		 * for revision 0x20, the clear register is a read & write
1956 		 * register, so we should just write 0 to the bit we are
1957 		 * handling, and keep other bits as cmdq_stat_reg.
1958 		 */
1959 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
1960 			*clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
1961 		else
1962 			*clearval = cmdq_stat_reg &
1963 				    ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
1964 
1965 		return HCLGEVF_VECTOR0_EVENT_MBX;
1966 	}
1967 
1968 	/* print other vector0 event source */
1969 	dev_info(&hdev->pdev->dev,
1970 		 "vector 0 interrupt from unknown source, cmdq_src = %#x\n",
1971 		 cmdq_stat_reg);
1972 
1973 	return HCLGEVF_VECTOR0_EVENT_OTHER;
1974 }
1975 
1976 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
1977 {
1978 	enum hclgevf_evt_cause event_cause;
1979 	struct hclgevf_dev *hdev = data;
1980 	u32 clearval;
1981 
1982 	hclgevf_enable_vector(&hdev->misc_vector, false);
1983 	event_cause = hclgevf_check_evt_cause(hdev, &clearval);
1984 	if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
1985 		hclgevf_clear_event_cause(hdev, clearval);
1986 
1987 	switch (event_cause) {
1988 	case HCLGEVF_VECTOR0_EVENT_RST:
1989 		hclgevf_reset_task_schedule(hdev);
1990 		break;
1991 	case HCLGEVF_VECTOR0_EVENT_MBX:
1992 		hclgevf_mbx_handler(hdev);
1993 		break;
1994 	default:
1995 		break;
1996 	}
1997 
1998 	hclgevf_enable_vector(&hdev->misc_vector, true);
1999 
2000 	return IRQ_HANDLED;
2001 }
2002 
2003 static int hclgevf_configure(struct hclgevf_dev *hdev)
2004 {
2005 	int ret;
2006 
2007 	hdev->gro_en = true;
2008 
2009 	ret = hclgevf_get_basic_info(hdev);
2010 	if (ret)
2011 		return ret;
2012 
2013 	/* get current port based vlan state from PF */
2014 	ret = hclgevf_get_port_base_vlan_filter_state(hdev);
2015 	if (ret)
2016 		return ret;
2017 
2018 	/* get queue configuration from PF */
2019 	ret = hclgevf_get_queue_info(hdev);
2020 	if (ret)
2021 		return ret;
2022 
2023 	/* get queue depth info from PF */
2024 	ret = hclgevf_get_queue_depth(hdev);
2025 	if (ret)
2026 		return ret;
2027 
2028 	return hclgevf_get_pf_media_type(hdev);
2029 }
2030 
2031 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
2032 {
2033 	struct pci_dev *pdev = ae_dev->pdev;
2034 	struct hclgevf_dev *hdev;
2035 
2036 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
2037 	if (!hdev)
2038 		return -ENOMEM;
2039 
2040 	hdev->pdev = pdev;
2041 	hdev->ae_dev = ae_dev;
2042 	ae_dev->priv = hdev;
2043 
2044 	return 0;
2045 }
2046 
2047 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
2048 {
2049 	struct hnae3_handle *roce = &hdev->roce;
2050 	struct hnae3_handle *nic = &hdev->nic;
2051 
2052 	roce->rinfo.num_vectors = hdev->num_roce_msix;
2053 
2054 	if (hdev->num_msi_left < roce->rinfo.num_vectors ||
2055 	    hdev->num_msi_left == 0)
2056 		return -EINVAL;
2057 
2058 	roce->rinfo.base_vector = hdev->roce_base_msix_offset;
2059 
2060 	roce->rinfo.netdev = nic->kinfo.netdev;
2061 	roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
2062 	roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
2063 
2064 	roce->pdev = nic->pdev;
2065 	roce->ae_algo = nic->ae_algo;
2066 	roce->numa_node_mask = nic->numa_node_mask;
2067 
2068 	return 0;
2069 }
2070 
2071 static int hclgevf_config_gro(struct hclgevf_dev *hdev)
2072 {
2073 	struct hclgevf_cfg_gro_status_cmd *req;
2074 	struct hclge_desc desc;
2075 	int ret;
2076 
2077 	if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
2078 		return 0;
2079 
2080 	hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG,
2081 				     false);
2082 	req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
2083 
2084 	req->gro_en = hdev->gro_en ? 1 : 0;
2085 
2086 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
2087 	if (ret)
2088 		dev_err(&hdev->pdev->dev,
2089 			"VF GRO hardware config cmd failed, ret = %d.\n", ret);
2090 
2091 	return ret;
2092 }
2093 
2094 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
2095 {
2096 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
2097 	u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
2098 	u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
2099 	u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
2100 	int ret;
2101 
2102 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2103 		ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw,
2104 						  rss_cfg->rss_algo,
2105 						  rss_cfg->rss_hash_key);
2106 		if (ret)
2107 			return ret;
2108 
2109 		ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, rss_cfg);
2110 		if (ret)
2111 			return ret;
2112 	}
2113 
2114 	ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
2115 					     rss_cfg->rss_indirection_tbl);
2116 	if (ret)
2117 		return ret;
2118 
2119 	hclge_comm_get_rss_tc_info(rss_cfg->rss_size, hdev->hw_tc_map,
2120 				   tc_offset, tc_valid, tc_size);
2121 
2122 	return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
2123 					  tc_valid, tc_size);
2124 }
2125 
2126 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
2127 {
2128 	struct hnae3_handle *nic = &hdev->nic;
2129 	int ret;
2130 
2131 	ret = hclgevf_en_hw_strip_rxvtag(nic, true);
2132 	if (ret) {
2133 		dev_err(&hdev->pdev->dev,
2134 			"failed to enable rx vlan offload, ret = %d\n", ret);
2135 		return ret;
2136 	}
2137 
2138 	return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
2139 				       false);
2140 }
2141 
2142 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev)
2143 {
2144 #define HCLGEVF_FLUSH_LINK_TIMEOUT	100000
2145 
2146 	unsigned long last = hdev->serv_processed_cnt;
2147 	int i = 0;
2148 
2149 	while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) &&
2150 	       i++ < HCLGEVF_FLUSH_LINK_TIMEOUT &&
2151 	       last == hdev->serv_processed_cnt)
2152 		usleep_range(1, 1);
2153 }
2154 
2155 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
2156 {
2157 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2158 
2159 	if (enable) {
2160 		hclgevf_task_schedule(hdev, 0);
2161 	} else {
2162 		set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2163 
2164 		/* flush memory to make sure DOWN is seen by service task */
2165 		smp_mb__before_atomic();
2166 		hclgevf_flush_link_update(hdev);
2167 	}
2168 }
2169 
2170 static int hclgevf_ae_start(struct hnae3_handle *handle)
2171 {
2172 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2173 
2174 	clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2175 	clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state);
2176 
2177 	hclge_comm_reset_tqp_stats(handle);
2178 
2179 	hclgevf_request_link_info(hdev);
2180 
2181 	hclgevf_update_link_mode(hdev);
2182 
2183 	return 0;
2184 }
2185 
2186 static void hclgevf_ae_stop(struct hnae3_handle *handle)
2187 {
2188 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2189 
2190 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2191 
2192 	if (hdev->reset_type != HNAE3_VF_RESET)
2193 		hclgevf_reset_tqp(handle);
2194 
2195 	hclge_comm_reset_tqp_stats(handle);
2196 	hclgevf_update_link_status(hdev, 0);
2197 }
2198 
2199 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
2200 {
2201 #define HCLGEVF_STATE_ALIVE	1
2202 #define HCLGEVF_STATE_NOT_ALIVE	0
2203 
2204 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2205 	struct hclge_vf_to_pf_msg send_msg;
2206 
2207 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0);
2208 	send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE :
2209 				HCLGEVF_STATE_NOT_ALIVE;
2210 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2211 }
2212 
2213 static int hclgevf_client_start(struct hnae3_handle *handle)
2214 {
2215 	return hclgevf_set_alive(handle, true);
2216 }
2217 
2218 static void hclgevf_client_stop(struct hnae3_handle *handle)
2219 {
2220 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2221 	int ret;
2222 
2223 	ret = hclgevf_set_alive(handle, false);
2224 	if (ret)
2225 		dev_warn(&hdev->pdev->dev,
2226 			 "%s failed %d\n", __func__, ret);
2227 }
2228 
2229 static void hclgevf_state_init(struct hclgevf_dev *hdev)
2230 {
2231 	clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
2232 	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
2233 	clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
2234 
2235 	INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
2236 
2237 	mutex_init(&hdev->mbx_resp.mbx_mutex);
2238 	sema_init(&hdev->reset_sem, 1);
2239 
2240 	spin_lock_init(&hdev->mac_table.mac_list_lock);
2241 	INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list);
2242 	INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list);
2243 
2244 	/* bring the device down */
2245 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2246 }
2247 
2248 static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
2249 {
2250 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2251 	set_bit(HCLGEVF_STATE_REMOVING, &hdev->state);
2252 
2253 	if (hdev->service_task.work.func)
2254 		cancel_delayed_work_sync(&hdev->service_task);
2255 
2256 	mutex_destroy(&hdev->mbx_resp.mbx_mutex);
2257 }
2258 
2259 static int hclgevf_init_msi(struct hclgevf_dev *hdev)
2260 {
2261 	struct pci_dev *pdev = hdev->pdev;
2262 	int vectors;
2263 	int i;
2264 
2265 	if (hnae3_dev_roce_supported(hdev))
2266 		vectors = pci_alloc_irq_vectors(pdev,
2267 						hdev->roce_base_msix_offset + 1,
2268 						hdev->num_msi,
2269 						PCI_IRQ_MSIX);
2270 	else
2271 		vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2272 						hdev->num_msi,
2273 						PCI_IRQ_MSI | PCI_IRQ_MSIX);
2274 
2275 	if (vectors < 0) {
2276 		dev_err(&pdev->dev,
2277 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2278 			vectors);
2279 		return vectors;
2280 	}
2281 	if (vectors < hdev->num_msi)
2282 		dev_warn(&hdev->pdev->dev,
2283 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2284 			 hdev->num_msi, vectors);
2285 
2286 	hdev->num_msi = vectors;
2287 	hdev->num_msi_left = vectors;
2288 
2289 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2290 					   sizeof(u16), GFP_KERNEL);
2291 	if (!hdev->vector_status) {
2292 		pci_free_irq_vectors(pdev);
2293 		return -ENOMEM;
2294 	}
2295 
2296 	for (i = 0; i < hdev->num_msi; i++)
2297 		hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
2298 
2299 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2300 					sizeof(int), GFP_KERNEL);
2301 	if (!hdev->vector_irq) {
2302 		devm_kfree(&pdev->dev, hdev->vector_status);
2303 		pci_free_irq_vectors(pdev);
2304 		return -ENOMEM;
2305 	}
2306 
2307 	return 0;
2308 }
2309 
2310 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
2311 {
2312 	struct pci_dev *pdev = hdev->pdev;
2313 
2314 	devm_kfree(&pdev->dev, hdev->vector_status);
2315 	devm_kfree(&pdev->dev, hdev->vector_irq);
2316 	pci_free_irq_vectors(pdev);
2317 }
2318 
2319 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
2320 {
2321 	int ret;
2322 
2323 	hclgevf_get_misc_vector(hdev);
2324 
2325 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
2326 		 HCLGEVF_NAME, pci_name(hdev->pdev));
2327 	ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
2328 			  0, hdev->misc_vector.name, hdev);
2329 	if (ret) {
2330 		dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
2331 			hdev->misc_vector.vector_irq);
2332 		return ret;
2333 	}
2334 
2335 	hclgevf_clear_event_cause(hdev, 0);
2336 
2337 	/* enable misc. vector(vector 0) */
2338 	hclgevf_enable_vector(&hdev->misc_vector, true);
2339 
2340 	return ret;
2341 }
2342 
2343 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
2344 {
2345 	/* disable misc vector(vector 0) */
2346 	hclgevf_enable_vector(&hdev->misc_vector, false);
2347 	synchronize_irq(hdev->misc_vector.vector_irq);
2348 	free_irq(hdev->misc_vector.vector_irq, hdev);
2349 	hclgevf_free_vector(hdev, 0);
2350 }
2351 
2352 static void hclgevf_info_show(struct hclgevf_dev *hdev)
2353 {
2354 	struct device *dev = &hdev->pdev->dev;
2355 
2356 	dev_info(dev, "VF info begin:\n");
2357 
2358 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
2359 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
2360 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
2361 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
2362 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
2363 	dev_info(dev, "PF media type of this VF: %u\n",
2364 		 hdev->hw.mac.media_type);
2365 
2366 	dev_info(dev, "VF info end.\n");
2367 }
2368 
2369 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
2370 					    struct hnae3_client *client)
2371 {
2372 	struct hclgevf_dev *hdev = ae_dev->priv;
2373 	int rst_cnt = hdev->rst_stats.rst_cnt;
2374 	int ret;
2375 
2376 	ret = client->ops->init_instance(&hdev->nic);
2377 	if (ret)
2378 		return ret;
2379 
2380 	set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2381 	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
2382 	    rst_cnt != hdev->rst_stats.rst_cnt) {
2383 		clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2384 
2385 		client->ops->uninit_instance(&hdev->nic, 0);
2386 		return -EBUSY;
2387 	}
2388 
2389 	hnae3_set_client_init_flag(client, ae_dev, 1);
2390 
2391 	if (netif_msg_drv(&hdev->nic))
2392 		hclgevf_info_show(hdev);
2393 
2394 	return 0;
2395 }
2396 
2397 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
2398 					     struct hnae3_client *client)
2399 {
2400 	struct hclgevf_dev *hdev = ae_dev->priv;
2401 	int ret;
2402 
2403 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
2404 	    !hdev->nic_client)
2405 		return 0;
2406 
2407 	ret = hclgevf_init_roce_base_info(hdev);
2408 	if (ret)
2409 		return ret;
2410 
2411 	ret = client->ops->init_instance(&hdev->roce);
2412 	if (ret)
2413 		return ret;
2414 
2415 	set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
2416 	hnae3_set_client_init_flag(client, ae_dev, 1);
2417 
2418 	return 0;
2419 }
2420 
2421 static int hclgevf_init_client_instance(struct hnae3_client *client,
2422 					struct hnae3_ae_dev *ae_dev)
2423 {
2424 	struct hclgevf_dev *hdev = ae_dev->priv;
2425 	int ret;
2426 
2427 	switch (client->type) {
2428 	case HNAE3_CLIENT_KNIC:
2429 		hdev->nic_client = client;
2430 		hdev->nic.client = client;
2431 
2432 		ret = hclgevf_init_nic_client_instance(ae_dev, client);
2433 		if (ret)
2434 			goto clear_nic;
2435 
2436 		ret = hclgevf_init_roce_client_instance(ae_dev,
2437 							hdev->roce_client);
2438 		if (ret)
2439 			goto clear_roce;
2440 
2441 		break;
2442 	case HNAE3_CLIENT_ROCE:
2443 		if (hnae3_dev_roce_supported(hdev)) {
2444 			hdev->roce_client = client;
2445 			hdev->roce.client = client;
2446 		}
2447 
2448 		ret = hclgevf_init_roce_client_instance(ae_dev, client);
2449 		if (ret)
2450 			goto clear_roce;
2451 
2452 		break;
2453 	default:
2454 		return -EINVAL;
2455 	}
2456 
2457 	return 0;
2458 
2459 clear_nic:
2460 	hdev->nic_client = NULL;
2461 	hdev->nic.client = NULL;
2462 	return ret;
2463 clear_roce:
2464 	hdev->roce_client = NULL;
2465 	hdev->roce.client = NULL;
2466 	return ret;
2467 }
2468 
2469 static void hclgevf_uninit_client_instance(struct hnae3_client *client,
2470 					   struct hnae3_ae_dev *ae_dev)
2471 {
2472 	struct hclgevf_dev *hdev = ae_dev->priv;
2473 
2474 	/* un-init roce, if it exists */
2475 	if (hdev->roce_client) {
2476 		while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
2477 			msleep(HCLGEVF_WAIT_RESET_DONE);
2478 		clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
2479 
2480 		hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
2481 		hdev->roce_client = NULL;
2482 		hdev->roce.client = NULL;
2483 	}
2484 
2485 	/* un-init nic/unic, if this was not called by roce client */
2486 	if (client->ops->uninit_instance && hdev->nic_client &&
2487 	    client->type != HNAE3_CLIENT_ROCE) {
2488 		while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
2489 			msleep(HCLGEVF_WAIT_RESET_DONE);
2490 		clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2491 
2492 		client->ops->uninit_instance(&hdev->nic, 0);
2493 		hdev->nic_client = NULL;
2494 		hdev->nic.client = NULL;
2495 	}
2496 }
2497 
2498 static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev)
2499 {
2500 	struct pci_dev *pdev = hdev->pdev;
2501 	struct hclgevf_hw *hw = &hdev->hw;
2502 
2503 	/* for device does not have device memory, return directly */
2504 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR)))
2505 		return 0;
2506 
2507 	hw->hw.mem_base =
2508 		devm_ioremap_wc(&pdev->dev,
2509 				pci_resource_start(pdev, HCLGEVF_MEM_BAR),
2510 				pci_resource_len(pdev, HCLGEVF_MEM_BAR));
2511 	if (!hw->hw.mem_base) {
2512 		dev_err(&pdev->dev, "failed to map device memory\n");
2513 		return -EFAULT;
2514 	}
2515 
2516 	return 0;
2517 }
2518 
2519 static int hclgevf_pci_init(struct hclgevf_dev *hdev)
2520 {
2521 	struct pci_dev *pdev = hdev->pdev;
2522 	struct hclgevf_hw *hw;
2523 	int ret;
2524 
2525 	ret = pci_enable_device(pdev);
2526 	if (ret) {
2527 		dev_err(&pdev->dev, "failed to enable PCI device\n");
2528 		return ret;
2529 	}
2530 
2531 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2532 	if (ret) {
2533 		dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
2534 		goto err_disable_device;
2535 	}
2536 
2537 	ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
2538 	if (ret) {
2539 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
2540 		goto err_disable_device;
2541 	}
2542 
2543 	pci_set_master(pdev);
2544 	hw = &hdev->hw;
2545 	hw->hw.io_base = pci_iomap(pdev, 2, 0);
2546 	if (!hw->hw.io_base) {
2547 		dev_err(&pdev->dev, "can't map configuration register space\n");
2548 		ret = -ENOMEM;
2549 		goto err_release_regions;
2550 	}
2551 
2552 	ret = hclgevf_dev_mem_map(hdev);
2553 	if (ret)
2554 		goto err_unmap_io_base;
2555 
2556 	return 0;
2557 
2558 err_unmap_io_base:
2559 	pci_iounmap(pdev, hdev->hw.hw.io_base);
2560 err_release_regions:
2561 	pci_release_regions(pdev);
2562 err_disable_device:
2563 	pci_disable_device(pdev);
2564 
2565 	return ret;
2566 }
2567 
2568 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
2569 {
2570 	struct pci_dev *pdev = hdev->pdev;
2571 
2572 	if (hdev->hw.hw.mem_base)
2573 		devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
2574 
2575 	pci_iounmap(pdev, hdev->hw.hw.io_base);
2576 	pci_release_regions(pdev);
2577 	pci_disable_device(pdev);
2578 }
2579 
2580 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
2581 {
2582 	struct hclgevf_query_res_cmd *req;
2583 	struct hclge_desc desc;
2584 	int ret;
2585 
2586 	hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RSRC, true);
2587 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
2588 	if (ret) {
2589 		dev_err(&hdev->pdev->dev,
2590 			"query vf resource failed, ret = %d.\n", ret);
2591 		return ret;
2592 	}
2593 
2594 	req = (struct hclgevf_query_res_cmd *)desc.data;
2595 
2596 	if (hnae3_dev_roce_supported(hdev)) {
2597 		hdev->roce_base_msix_offset =
2598 		hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
2599 				HCLGEVF_MSIX_OFT_ROCEE_M,
2600 				HCLGEVF_MSIX_OFT_ROCEE_S);
2601 		hdev->num_roce_msix =
2602 		hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
2603 				HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2604 
2605 		/* nic's msix numbers is always equals to the roce's. */
2606 		hdev->num_nic_msix = hdev->num_roce_msix;
2607 
2608 		/* VF should have NIC vectors and Roce vectors, NIC vectors
2609 		 * are queued before Roce vectors. The offset is fixed to 64.
2610 		 */
2611 		hdev->num_msi = hdev->num_roce_msix +
2612 				hdev->roce_base_msix_offset;
2613 	} else {
2614 		hdev->num_msi =
2615 		hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
2616 				HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2617 
2618 		hdev->num_nic_msix = hdev->num_msi;
2619 	}
2620 
2621 	if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) {
2622 		dev_err(&hdev->pdev->dev,
2623 			"Just %u msi resources, not enough for vf(min:2).\n",
2624 			hdev->num_nic_msix);
2625 		return -EINVAL;
2626 	}
2627 
2628 	return 0;
2629 }
2630 
2631 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
2632 {
2633 #define HCLGEVF_MAX_NON_TSO_BD_NUM			8U
2634 
2635 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2636 
2637 	ae_dev->dev_specs.max_non_tso_bd_num =
2638 					HCLGEVF_MAX_NON_TSO_BD_NUM;
2639 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
2640 	ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
2641 	ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
2642 	ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME;
2643 }
2644 
2645 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
2646 				    struct hclge_desc *desc)
2647 {
2648 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2649 	struct hclgevf_dev_specs_0_cmd *req0;
2650 	struct hclgevf_dev_specs_1_cmd *req1;
2651 
2652 	req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data;
2653 	req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data;
2654 
2655 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
2656 	ae_dev->dev_specs.rss_ind_tbl_size =
2657 					le16_to_cpu(req0->rss_ind_tbl_size);
2658 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
2659 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
2660 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
2661 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
2662 }
2663 
2664 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
2665 {
2666 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
2667 
2668 	if (!dev_specs->max_non_tso_bd_num)
2669 		dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM;
2670 	if (!dev_specs->rss_ind_tbl_size)
2671 		dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
2672 	if (!dev_specs->rss_key_size)
2673 		dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
2674 	if (!dev_specs->max_int_gl)
2675 		dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
2676 	if (!dev_specs->max_frm_size)
2677 		dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME;
2678 }
2679 
2680 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
2681 {
2682 	struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
2683 	int ret;
2684 	int i;
2685 
2686 	/* set default specifications as devices lower than version V3 do not
2687 	 * support querying specifications from firmware.
2688 	 */
2689 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
2690 		hclgevf_set_default_dev_specs(hdev);
2691 		return 0;
2692 	}
2693 
2694 	for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
2695 		hclgevf_cmd_setup_basic_desc(&desc[i],
2696 					     HCLGE_OPC_QUERY_DEV_SPECS, true);
2697 		desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2698 	}
2699 	hclgevf_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
2700 
2701 	ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM);
2702 	if (ret)
2703 		return ret;
2704 
2705 	hclgevf_parse_dev_specs(hdev, desc);
2706 	hclgevf_check_dev_specs(hdev);
2707 
2708 	return 0;
2709 }
2710 
2711 static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
2712 {
2713 	struct pci_dev *pdev = hdev->pdev;
2714 	int ret = 0;
2715 
2716 	if ((hdev->reset_type == HNAE3_VF_FULL_RESET ||
2717 	     hdev->reset_type == HNAE3_FLR_RESET) &&
2718 	    test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2719 		hclgevf_misc_irq_uninit(hdev);
2720 		hclgevf_uninit_msi(hdev);
2721 		clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2722 	}
2723 
2724 	if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2725 		pci_set_master(pdev);
2726 		ret = hclgevf_init_msi(hdev);
2727 		if (ret) {
2728 			dev_err(&pdev->dev,
2729 				"failed(%d) to init MSI/MSI-X\n", ret);
2730 			return ret;
2731 		}
2732 
2733 		ret = hclgevf_misc_irq_init(hdev);
2734 		if (ret) {
2735 			hclgevf_uninit_msi(hdev);
2736 			dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
2737 				ret);
2738 			return ret;
2739 		}
2740 
2741 		set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2742 	}
2743 
2744 	return ret;
2745 }
2746 
2747 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev)
2748 {
2749 	struct hclge_vf_to_pf_msg send_msg;
2750 
2751 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL,
2752 			       HCLGE_MBX_VPORT_LIST_CLEAR);
2753 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2754 }
2755 
2756 static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev)
2757 {
2758 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
2759 		hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1);
2760 }
2761 
2762 static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev)
2763 {
2764 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
2765 		hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0);
2766 }
2767 
2768 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
2769 {
2770 	struct pci_dev *pdev = hdev->pdev;
2771 	int ret;
2772 
2773 	ret = hclgevf_pci_reset(hdev);
2774 	if (ret) {
2775 		dev_err(&pdev->dev, "pci reset failed %d\n", ret);
2776 		return ret;
2777 	}
2778 
2779 	hclgevf_arq_init(hdev);
2780 	ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
2781 				  &hdev->fw_version, false,
2782 				  hdev->reset_pending);
2783 	if (ret) {
2784 		dev_err(&pdev->dev, "cmd failed %d\n", ret);
2785 		return ret;
2786 	}
2787 
2788 	ret = hclgevf_rss_init_hw(hdev);
2789 	if (ret) {
2790 		dev_err(&hdev->pdev->dev,
2791 			"failed(%d) to initialize RSS\n", ret);
2792 		return ret;
2793 	}
2794 
2795 	ret = hclgevf_config_gro(hdev);
2796 	if (ret)
2797 		return ret;
2798 
2799 	ret = hclgevf_init_vlan_config(hdev);
2800 	if (ret) {
2801 		dev_err(&hdev->pdev->dev,
2802 			"failed(%d) to initialize VLAN config\n", ret);
2803 		return ret;
2804 	}
2805 
2806 	/* get current port based vlan state from PF */
2807 	ret = hclgevf_get_port_base_vlan_filter_state(hdev);
2808 	if (ret)
2809 		return ret;
2810 
2811 	set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
2812 
2813 	hclgevf_init_rxd_adv_layout(hdev);
2814 
2815 	dev_info(&hdev->pdev->dev, "Reset done\n");
2816 
2817 	return 0;
2818 }
2819 
2820 static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
2821 {
2822 	struct pci_dev *pdev = hdev->pdev;
2823 	int ret;
2824 
2825 	ret = hclgevf_pci_init(hdev);
2826 	if (ret)
2827 		return ret;
2828 
2829 	ret = hclgevf_devlink_init(hdev);
2830 	if (ret)
2831 		goto err_devlink_init;
2832 
2833 	ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
2834 	if (ret)
2835 		goto err_cmd_queue_init;
2836 
2837 	hclgevf_arq_init(hdev);
2838 	ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
2839 				  &hdev->fw_version, false,
2840 				  hdev->reset_pending);
2841 	if (ret)
2842 		goto err_cmd_init;
2843 
2844 	/* Get vf resource */
2845 	ret = hclgevf_query_vf_resource(hdev);
2846 	if (ret)
2847 		goto err_cmd_init;
2848 
2849 	ret = hclgevf_query_dev_specs(hdev);
2850 	if (ret) {
2851 		dev_err(&pdev->dev,
2852 			"failed to query dev specifications, ret = %d\n", ret);
2853 		goto err_cmd_init;
2854 	}
2855 
2856 	ret = hclgevf_init_msi(hdev);
2857 	if (ret) {
2858 		dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
2859 		goto err_cmd_init;
2860 	}
2861 
2862 	hclgevf_state_init(hdev);
2863 	hdev->reset_level = HNAE3_VF_FUNC_RESET;
2864 	hdev->reset_type = HNAE3_NONE_RESET;
2865 
2866 	ret = hclgevf_misc_irq_init(hdev);
2867 	if (ret)
2868 		goto err_misc_irq_init;
2869 
2870 	set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2871 
2872 	ret = hclgevf_configure(hdev);
2873 	if (ret) {
2874 		dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
2875 		goto err_config;
2876 	}
2877 
2878 	ret = hclgevf_alloc_tqps(hdev);
2879 	if (ret) {
2880 		dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
2881 		goto err_config;
2882 	}
2883 
2884 	ret = hclgevf_set_handle_info(hdev);
2885 	if (ret)
2886 		goto err_config;
2887 
2888 	ret = hclgevf_config_gro(hdev);
2889 	if (ret)
2890 		goto err_config;
2891 
2892 	/* Initialize RSS for this VF */
2893 	ret = hclge_comm_rss_init_cfg(&hdev->nic, hdev->ae_dev,
2894 				      &hdev->rss_cfg);
2895 	if (ret) {
2896 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
2897 		goto err_config;
2898 	}
2899 
2900 	ret = hclgevf_rss_init_hw(hdev);
2901 	if (ret) {
2902 		dev_err(&hdev->pdev->dev,
2903 			"failed(%d) to initialize RSS\n", ret);
2904 		goto err_config;
2905 	}
2906 
2907 	/* ensure vf tbl list as empty before init */
2908 	ret = hclgevf_clear_vport_list(hdev);
2909 	if (ret) {
2910 		dev_err(&pdev->dev,
2911 			"failed to clear tbl list configuration, ret = %d.\n",
2912 			ret);
2913 		goto err_config;
2914 	}
2915 
2916 	ret = hclgevf_init_vlan_config(hdev);
2917 	if (ret) {
2918 		dev_err(&hdev->pdev->dev,
2919 			"failed(%d) to initialize VLAN config\n", ret);
2920 		goto err_config;
2921 	}
2922 
2923 	hclgevf_init_rxd_adv_layout(hdev);
2924 
2925 	set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
2926 
2927 	hdev->last_reset_time = jiffies;
2928 	dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
2929 		 HCLGEVF_DRIVER_NAME);
2930 
2931 	hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
2932 
2933 	return 0;
2934 
2935 err_config:
2936 	hclgevf_misc_irq_uninit(hdev);
2937 err_misc_irq_init:
2938 	hclgevf_state_uninit(hdev);
2939 	hclgevf_uninit_msi(hdev);
2940 err_cmd_init:
2941 	hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
2942 err_cmd_queue_init:
2943 	hclgevf_devlink_uninit(hdev);
2944 err_devlink_init:
2945 	hclgevf_pci_uninit(hdev);
2946 	clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2947 	return ret;
2948 }
2949 
2950 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
2951 {
2952 	struct hclge_vf_to_pf_msg send_msg;
2953 
2954 	hclgevf_state_uninit(hdev);
2955 	hclgevf_uninit_rxd_adv_layout(hdev);
2956 
2957 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0);
2958 	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2959 
2960 	if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2961 		hclgevf_misc_irq_uninit(hdev);
2962 		hclgevf_uninit_msi(hdev);
2963 	}
2964 
2965 	hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
2966 	hclgevf_devlink_uninit(hdev);
2967 	hclgevf_pci_uninit(hdev);
2968 	hclgevf_uninit_mac_list(hdev);
2969 }
2970 
2971 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
2972 {
2973 	struct pci_dev *pdev = ae_dev->pdev;
2974 	int ret;
2975 
2976 	ret = hclgevf_alloc_hdev(ae_dev);
2977 	if (ret) {
2978 		dev_err(&pdev->dev, "hclge device allocation failed\n");
2979 		return ret;
2980 	}
2981 
2982 	ret = hclgevf_init_hdev(ae_dev->priv);
2983 	if (ret) {
2984 		dev_err(&pdev->dev, "hclge device initialization failed\n");
2985 		return ret;
2986 	}
2987 
2988 	return 0;
2989 }
2990 
2991 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
2992 {
2993 	struct hclgevf_dev *hdev = ae_dev->priv;
2994 
2995 	hclgevf_uninit_hdev(hdev);
2996 	ae_dev->priv = NULL;
2997 }
2998 
2999 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
3000 {
3001 	struct hnae3_handle *nic = &hdev->nic;
3002 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
3003 
3004 	return min_t(u32, hdev->rss_size_max,
3005 		     hdev->num_tqps / kinfo->tc_info.num_tc);
3006 }
3007 
3008 /**
3009  * hclgevf_get_channels - Get the current channels enabled and max supported.
3010  * @handle: hardware information for network interface
3011  * @ch: ethtool channels structure
3012  *
3013  * We don't support separate tx and rx queues as channels. The other count
3014  * represents how many queues are being used for control. max_combined counts
3015  * how many queue pairs we can support. They may not be mapped 1 to 1 with
3016  * q_vectors since we support a lot more queue pairs than q_vectors.
3017  **/
3018 static void hclgevf_get_channels(struct hnae3_handle *handle,
3019 				 struct ethtool_channels *ch)
3020 {
3021 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3022 
3023 	ch->max_combined = hclgevf_get_max_channels(hdev);
3024 	ch->other_count = 0;
3025 	ch->max_other = 0;
3026 	ch->combined_count = handle->kinfo.rss_size;
3027 }
3028 
3029 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
3030 					  u16 *alloc_tqps, u16 *max_rss_size)
3031 {
3032 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3033 
3034 	*alloc_tqps = hdev->num_tqps;
3035 	*max_rss_size = hdev->rss_size_max;
3036 }
3037 
3038 static void hclgevf_update_rss_size(struct hnae3_handle *handle,
3039 				    u32 new_tqps_num)
3040 {
3041 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3042 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3043 	u16 max_rss_size;
3044 
3045 	kinfo->req_rss_size = new_tqps_num;
3046 
3047 	max_rss_size = min_t(u16, hdev->rss_size_max,
3048 			     hdev->num_tqps / kinfo->tc_info.num_tc);
3049 
3050 	/* Use the user's configuration when it is not larger than
3051 	 * max_rss_size, otherwise, use the maximum specification value.
3052 	 */
3053 	if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
3054 	    kinfo->req_rss_size <= max_rss_size)
3055 		kinfo->rss_size = kinfo->req_rss_size;
3056 	else if (kinfo->rss_size > max_rss_size ||
3057 		 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size))
3058 		kinfo->rss_size = max_rss_size;
3059 
3060 	kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size;
3061 }
3062 
3063 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
3064 				bool rxfh_configured)
3065 {
3066 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3067 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3068 	u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
3069 	u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
3070 	u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
3071 	u16 cur_rss_size = kinfo->rss_size;
3072 	u16 cur_tqps = kinfo->num_tqps;
3073 	u32 *rss_indir;
3074 	unsigned int i;
3075 	int ret;
3076 
3077 	hclgevf_update_rss_size(handle, new_tqps_num);
3078 
3079 	hclge_comm_get_rss_tc_info(kinfo->rss_size, hdev->hw_tc_map,
3080 				   tc_offset, tc_valid, tc_size);
3081 	ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
3082 					 tc_valid, tc_size);
3083 	if (ret)
3084 		return ret;
3085 
3086 	/* RSS indirection table has been configured by user */
3087 	if (rxfh_configured)
3088 		goto out;
3089 
3090 	/* Reinitializes the rss indirect table according to the new RSS size */
3091 	rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size,
3092 			    sizeof(u32), GFP_KERNEL);
3093 	if (!rss_indir)
3094 		return -ENOMEM;
3095 
3096 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
3097 		rss_indir[i] = i % kinfo->rss_size;
3098 
3099 	hdev->rss_cfg.rss_size = kinfo->rss_size;
3100 
3101 	ret = hclgevf_set_rss(handle, rss_indir, NULL, 0);
3102 	if (ret)
3103 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
3104 			ret);
3105 
3106 	kfree(rss_indir);
3107 
3108 out:
3109 	if (!ret)
3110 		dev_info(&hdev->pdev->dev,
3111 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
3112 			 cur_rss_size, kinfo->rss_size,
3113 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
3114 
3115 	return ret;
3116 }
3117 
3118 static int hclgevf_get_status(struct hnae3_handle *handle)
3119 {
3120 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3121 
3122 	return hdev->hw.mac.link;
3123 }
3124 
3125 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
3126 					    u8 *auto_neg, u32 *speed,
3127 					    u8 *duplex, u32 *lane_num)
3128 {
3129 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3130 
3131 	if (speed)
3132 		*speed = hdev->hw.mac.speed;
3133 	if (duplex)
3134 		*duplex = hdev->hw.mac.duplex;
3135 	if (auto_neg)
3136 		*auto_neg = AUTONEG_DISABLE;
3137 }
3138 
3139 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
3140 				 u8 duplex)
3141 {
3142 	hdev->hw.mac.speed = speed;
3143 	hdev->hw.mac.duplex = duplex;
3144 }
3145 
3146 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
3147 {
3148 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3149 	bool gro_en_old = hdev->gro_en;
3150 	int ret;
3151 
3152 	hdev->gro_en = enable;
3153 	ret = hclgevf_config_gro(hdev);
3154 	if (ret)
3155 		hdev->gro_en = gro_en_old;
3156 
3157 	return ret;
3158 }
3159 
3160 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type,
3161 				   u8 *module_type)
3162 {
3163 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3164 
3165 	if (media_type)
3166 		*media_type = hdev->hw.mac.media_type;
3167 
3168 	if (module_type)
3169 		*module_type = hdev->hw.mac.module_type;
3170 }
3171 
3172 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
3173 {
3174 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3175 
3176 	return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
3177 }
3178 
3179 static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle)
3180 {
3181 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3182 
3183 	return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3184 }
3185 
3186 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
3187 {
3188 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3189 
3190 	return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
3191 }
3192 
3193 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
3194 {
3195 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3196 
3197 	return hdev->rst_stats.hw_rst_done_cnt;
3198 }
3199 
3200 static void hclgevf_get_link_mode(struct hnae3_handle *handle,
3201 				  unsigned long *supported,
3202 				  unsigned long *advertising)
3203 {
3204 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3205 
3206 	*supported = hdev->hw.mac.supported;
3207 	*advertising = hdev->hw.mac.advertising;
3208 }
3209 
3210 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
3211 				struct hclge_mbx_port_base_vlan *port_base_vlan)
3212 {
3213 	struct hnae3_handle *nic = &hdev->nic;
3214 	struct hclge_vf_to_pf_msg send_msg;
3215 	int ret;
3216 
3217 	rtnl_lock();
3218 
3219 	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
3220 	    test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
3221 		dev_warn(&hdev->pdev->dev,
3222 			 "is resetting when updating port based vlan info\n");
3223 		rtnl_unlock();
3224 		return;
3225 	}
3226 
3227 	ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
3228 	if (ret) {
3229 		rtnl_unlock();
3230 		return;
3231 	}
3232 
3233 	/* send msg to PF and wait update port based vlan info */
3234 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
3235 			       HCLGE_MBX_PORT_BASE_VLAN_CFG);
3236 	memcpy(send_msg.data, port_base_vlan, sizeof(*port_base_vlan));
3237 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3238 	if (!ret) {
3239 		if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
3240 			nic->port_base_vlan_state = state;
3241 		else
3242 			nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
3243 	}
3244 
3245 	hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
3246 	rtnl_unlock();
3247 }
3248 
3249 static const struct hnae3_ae_ops hclgevf_ops = {
3250 	.init_ae_dev = hclgevf_init_ae_dev,
3251 	.uninit_ae_dev = hclgevf_uninit_ae_dev,
3252 	.reset_prepare = hclgevf_reset_prepare_general,
3253 	.reset_done = hclgevf_reset_done,
3254 	.init_client_instance = hclgevf_init_client_instance,
3255 	.uninit_client_instance = hclgevf_uninit_client_instance,
3256 	.start = hclgevf_ae_start,
3257 	.stop = hclgevf_ae_stop,
3258 	.client_start = hclgevf_client_start,
3259 	.client_stop = hclgevf_client_stop,
3260 	.map_ring_to_vector = hclgevf_map_ring_to_vector,
3261 	.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
3262 	.get_vector = hclgevf_get_vector,
3263 	.put_vector = hclgevf_put_vector,
3264 	.reset_queue = hclgevf_reset_tqp,
3265 	.get_mac_addr = hclgevf_get_mac_addr,
3266 	.set_mac_addr = hclgevf_set_mac_addr,
3267 	.add_uc_addr = hclgevf_add_uc_addr,
3268 	.rm_uc_addr = hclgevf_rm_uc_addr,
3269 	.add_mc_addr = hclgevf_add_mc_addr,
3270 	.rm_mc_addr = hclgevf_rm_mc_addr,
3271 	.get_stats = hclgevf_get_stats,
3272 	.update_stats = hclgevf_update_stats,
3273 	.get_strings = hclgevf_get_strings,
3274 	.get_sset_count = hclgevf_get_sset_count,
3275 	.get_rss_key_size = hclge_comm_get_rss_key_size,
3276 	.get_rss = hclgevf_get_rss,
3277 	.set_rss = hclgevf_set_rss,
3278 	.get_rss_tuple = hclgevf_get_rss_tuple,
3279 	.set_rss_tuple = hclgevf_set_rss_tuple,
3280 	.get_tc_size = hclgevf_get_tc_size,
3281 	.get_fw_version = hclgevf_get_fw_version,
3282 	.set_vlan_filter = hclgevf_set_vlan_filter,
3283 	.enable_vlan_filter = hclgevf_enable_vlan_filter,
3284 	.enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
3285 	.reset_event = hclgevf_reset_event,
3286 	.set_default_reset_request = hclgevf_set_def_reset_request,
3287 	.set_channels = hclgevf_set_channels,
3288 	.get_channels = hclgevf_get_channels,
3289 	.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
3290 	.get_regs_len = hclgevf_get_regs_len,
3291 	.get_regs = hclgevf_get_regs,
3292 	.get_status = hclgevf_get_status,
3293 	.get_ksettings_an_result = hclgevf_get_ksettings_an_result,
3294 	.get_media_type = hclgevf_get_media_type,
3295 	.get_hw_reset_stat = hclgevf_get_hw_reset_stat,
3296 	.ae_dev_resetting = hclgevf_ae_dev_resetting,
3297 	.ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
3298 	.set_gro_en = hclgevf_gro_en,
3299 	.set_mtu = hclgevf_set_mtu,
3300 	.get_global_queue_id = hclgevf_get_qid_global,
3301 	.set_timer_task = hclgevf_set_timer_task,
3302 	.get_link_mode = hclgevf_get_link_mode,
3303 	.set_promisc_mode = hclgevf_set_promisc_mode,
3304 	.request_update_promisc_mode = hclgevf_request_update_promisc_mode,
3305 	.get_cmdq_stat = hclgevf_get_cmdq_stat,
3306 };
3307 
3308 static struct hnae3_ae_algo ae_algovf = {
3309 	.ops = &hclgevf_ops,
3310 	.pdev_id_table = ae_algovf_pci_tbl,
3311 };
3312 
3313 static int __init hclgevf_init(void)
3314 {
3315 	pr_info("%s is initializing\n", HCLGEVF_NAME);
3316 
3317 	hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME);
3318 	if (!hclgevf_wq) {
3319 		pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
3320 		return -ENOMEM;
3321 	}
3322 
3323 	hnae3_register_ae_algo(&ae_algovf);
3324 
3325 	return 0;
3326 }
3327 
3328 static void __exit hclgevf_exit(void)
3329 {
3330 	hnae3_unregister_ae_algo(&ae_algovf);
3331 	destroy_workqueue(hclgevf_wq);
3332 }
3333 module_init(hclgevf_init);
3334 module_exit(hclgevf_exit);
3335 
3336 MODULE_LICENSE("GPL");
3337 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3338 MODULE_DESCRIPTION("HCLGEVF Driver");
3339 MODULE_VERSION(HCLGEVF_MOD_VERSION);
3340