1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/etherdevice.h>
5 #include <linux/iopoll.h>
6 #include <net/rtnetlink.h>
7 #include "hclgevf_cmd.h"
8 #include "hclgevf_main.h"
9 #include "hclge_mbx.h"
10 #include "hnae3.h"
11 
12 #define HCLGEVF_NAME	"hclgevf"
13 
14 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
15 static struct hnae3_ae_algo ae_algovf;
16 
17 static const struct pci_device_id ae_algovf_pci_tbl[] = {
18 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
19 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
20 	/* required last entry */
21 	{0, }
22 };
23 
24 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
25 
26 static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
27 	struct hnae3_handle *handle)
28 {
29 	return container_of(handle, struct hclgevf_dev, nic);
30 }
31 
32 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
33 {
34 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
35 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
36 	struct hclgevf_desc desc;
37 	struct hclgevf_tqp *tqp;
38 	int status;
39 	int i;
40 
41 	for (i = 0; i < kinfo->num_tqps; i++) {
42 		tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
43 		hclgevf_cmd_setup_basic_desc(&desc,
44 					     HCLGEVF_OPC_QUERY_RX_STATUS,
45 					     true);
46 
47 		desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
48 		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
49 		if (status) {
50 			dev_err(&hdev->pdev->dev,
51 				"Query tqp stat fail, status = %d,queue = %d\n",
52 				status,	i);
53 			return status;
54 		}
55 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
56 			le32_to_cpu(desc.data[1]);
57 
58 		hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
59 					     true);
60 
61 		desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
62 		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
63 		if (status) {
64 			dev_err(&hdev->pdev->dev,
65 				"Query tqp stat fail, status = %d,queue = %d\n",
66 				status, i);
67 			return status;
68 		}
69 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
70 			le32_to_cpu(desc.data[1]);
71 	}
72 
73 	return 0;
74 }
75 
76 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
77 {
78 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
79 	struct hclgevf_tqp *tqp;
80 	u64 *buff = data;
81 	int i;
82 
83 	for (i = 0; i < kinfo->num_tqps; i++) {
84 		tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
85 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
86 	}
87 	for (i = 0; i < kinfo->num_tqps; i++) {
88 		tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
89 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
90 	}
91 
92 	return buff;
93 }
94 
95 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
96 {
97 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
98 
99 	return kinfo->num_tqps * 2;
100 }
101 
102 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
103 {
104 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
105 	u8 *buff = data;
106 	int i = 0;
107 
108 	for (i = 0; i < kinfo->num_tqps; i++) {
109 		struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
110 						       struct hclgevf_tqp, q);
111 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
112 			 tqp->index);
113 		buff += ETH_GSTRING_LEN;
114 	}
115 
116 	for (i = 0; i < kinfo->num_tqps; i++) {
117 		struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
118 						       struct hclgevf_tqp, q);
119 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
120 			 tqp->index);
121 		buff += ETH_GSTRING_LEN;
122 	}
123 
124 	return buff;
125 }
126 
127 static void hclgevf_update_stats(struct hnae3_handle *handle,
128 				 struct net_device_stats *net_stats)
129 {
130 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
131 	int status;
132 
133 	status = hclgevf_tqps_update_stats(handle);
134 	if (status)
135 		dev_err(&hdev->pdev->dev,
136 			"VF update of TQPS stats fail, status = %d.\n",
137 			status);
138 }
139 
140 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
141 {
142 	if (strset == ETH_SS_TEST)
143 		return -EOPNOTSUPP;
144 	else if (strset == ETH_SS_STATS)
145 		return hclgevf_tqps_get_sset_count(handle, strset);
146 
147 	return 0;
148 }
149 
150 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
151 				u8 *data)
152 {
153 	u8 *p = (char *)data;
154 
155 	if (strset == ETH_SS_STATS)
156 		p = hclgevf_tqps_get_strings(handle, p);
157 }
158 
159 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
160 {
161 	hclgevf_tqps_get_stats(handle, data);
162 }
163 
164 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
165 {
166 	u8 resp_msg;
167 	int status;
168 
169 	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
170 				      true, &resp_msg, sizeof(u8));
171 	if (status) {
172 		dev_err(&hdev->pdev->dev,
173 			"VF request to get TC info from PF failed %d",
174 			status);
175 		return status;
176 	}
177 
178 	hdev->hw_tc_map = resp_msg;
179 
180 	return 0;
181 }
182 
183 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
184 {
185 #define HCLGEVF_TQPS_RSS_INFO_LEN	8
186 	u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
187 	int status;
188 
189 	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
190 				      true, resp_msg,
191 				      HCLGEVF_TQPS_RSS_INFO_LEN);
192 	if (status) {
193 		dev_err(&hdev->pdev->dev,
194 			"VF request to get tqp info from PF failed %d",
195 			status);
196 		return status;
197 	}
198 
199 	memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
200 	memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
201 	memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
202 	memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
203 
204 	return 0;
205 }
206 
207 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
208 {
209 	struct hclgevf_tqp *tqp;
210 	int i;
211 
212 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
213 				  sizeof(struct hclgevf_tqp), GFP_KERNEL);
214 	if (!hdev->htqp)
215 		return -ENOMEM;
216 
217 	tqp = hdev->htqp;
218 
219 	for (i = 0; i < hdev->num_tqps; i++) {
220 		tqp->dev = &hdev->pdev->dev;
221 		tqp->index = i;
222 
223 		tqp->q.ae_algo = &ae_algovf;
224 		tqp->q.buf_size = hdev->rx_buf_len;
225 		tqp->q.desc_num = hdev->num_desc;
226 		tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
227 			i * HCLGEVF_TQP_REG_SIZE;
228 
229 		tqp++;
230 	}
231 
232 	return 0;
233 }
234 
235 static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
236 {
237 	struct hnae3_handle *nic = &hdev->nic;
238 	struct hnae3_knic_private_info *kinfo;
239 	u16 new_tqps = hdev->num_tqps;
240 	int i;
241 
242 	kinfo = &nic->kinfo;
243 	kinfo->num_tc = 0;
244 	kinfo->num_desc = hdev->num_desc;
245 	kinfo->rx_buf_len = hdev->rx_buf_len;
246 	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
247 		if (hdev->hw_tc_map & BIT(i))
248 			kinfo->num_tc++;
249 
250 	kinfo->rss_size
251 		= min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
252 	new_tqps = kinfo->rss_size * kinfo->num_tc;
253 	kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
254 
255 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
256 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
257 	if (!kinfo->tqp)
258 		return -ENOMEM;
259 
260 	for (i = 0; i < kinfo->num_tqps; i++) {
261 		hdev->htqp[i].q.handle = &hdev->nic;
262 		hdev->htqp[i].q.tqp_index = i;
263 		kinfo->tqp[i] = &hdev->htqp[i].q;
264 	}
265 
266 	return 0;
267 }
268 
269 static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
270 {
271 	int status;
272 	u8 resp_msg;
273 
274 	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
275 				      0, false, &resp_msg, sizeof(u8));
276 	if (status)
277 		dev_err(&hdev->pdev->dev,
278 			"VF failed to fetch link status(%d) from PF", status);
279 }
280 
281 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
282 {
283 	struct hnae3_handle *handle = &hdev->nic;
284 	struct hnae3_client *client;
285 
286 	client = handle->client;
287 
288 	link_state =
289 		test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
290 
291 	if (link_state != hdev->hw.mac.link) {
292 		client->ops->link_status_change(handle, !!link_state);
293 		hdev->hw.mac.link = link_state;
294 	}
295 }
296 
297 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
298 {
299 	struct hnae3_handle *nic = &hdev->nic;
300 	int ret;
301 
302 	nic->ae_algo = &ae_algovf;
303 	nic->pdev = hdev->pdev;
304 	nic->numa_node_mask = hdev->numa_node_mask;
305 	nic->flags |= HNAE3_SUPPORT_VF;
306 
307 	if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
308 		dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
309 			hdev->ae_dev->dev_type);
310 		return -EINVAL;
311 	}
312 
313 	ret = hclgevf_knic_setup(hdev);
314 	if (ret)
315 		dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
316 			ret);
317 	return ret;
318 }
319 
320 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
321 {
322 	if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
323 		dev_warn(&hdev->pdev->dev,
324 			 "vector(vector_id %d) has been freed.\n", vector_id);
325 		return;
326 	}
327 
328 	hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
329 	hdev->num_msi_left += 1;
330 	hdev->num_msi_used -= 1;
331 }
332 
333 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
334 			      struct hnae3_vector_info *vector_info)
335 {
336 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
337 	struct hnae3_vector_info *vector = vector_info;
338 	int alloc = 0;
339 	int i, j;
340 
341 	vector_num = min(hdev->num_msi_left, vector_num);
342 
343 	for (j = 0; j < vector_num; j++) {
344 		for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
345 			if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
346 				vector->vector = pci_irq_vector(hdev->pdev, i);
347 				vector->io_addr = hdev->hw.io_base +
348 					HCLGEVF_VECTOR_REG_BASE +
349 					(i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
350 				hdev->vector_status[i] = 0;
351 				hdev->vector_irq[i] = vector->vector;
352 
353 				vector++;
354 				alloc++;
355 
356 				break;
357 			}
358 		}
359 	}
360 	hdev->num_msi_left -= alloc;
361 	hdev->num_msi_used += alloc;
362 
363 	return alloc;
364 }
365 
366 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
367 {
368 	int i;
369 
370 	for (i = 0; i < hdev->num_msi; i++)
371 		if (vector == hdev->vector_irq[i])
372 			return i;
373 
374 	return -EINVAL;
375 }
376 
377 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
378 				    const u8 hfunc, const u8 *key)
379 {
380 	struct hclgevf_rss_config_cmd *req;
381 	struct hclgevf_desc desc;
382 	int key_offset;
383 	int key_size;
384 	int ret;
385 
386 	req = (struct hclgevf_rss_config_cmd *)desc.data;
387 
388 	for (key_offset = 0; key_offset < 3; key_offset++) {
389 		hclgevf_cmd_setup_basic_desc(&desc,
390 					     HCLGEVF_OPC_RSS_GENERIC_CONFIG,
391 					     false);
392 
393 		req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK);
394 		req->hash_config |=
395 			(key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
396 
397 		if (key_offset == 2)
398 			key_size =
399 			HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
400 		else
401 			key_size = HCLGEVF_RSS_HASH_KEY_NUM;
402 
403 		memcpy(req->hash_key,
404 		       key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
405 
406 		ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
407 		if (ret) {
408 			dev_err(&hdev->pdev->dev,
409 				"Configure RSS config fail, status = %d\n",
410 				ret);
411 			return ret;
412 		}
413 	}
414 
415 	return 0;
416 }
417 
418 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
419 {
420 	return HCLGEVF_RSS_KEY_SIZE;
421 }
422 
423 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
424 {
425 	return HCLGEVF_RSS_IND_TBL_SIZE;
426 }
427 
428 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
429 {
430 	const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
431 	struct hclgevf_rss_indirection_table_cmd *req;
432 	struct hclgevf_desc desc;
433 	int status;
434 	int i, j;
435 
436 	req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
437 
438 	for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
439 		hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
440 					     false);
441 		req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
442 		req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
443 		for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
444 			req->rss_result[j] =
445 				indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
446 
447 		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
448 		if (status) {
449 			dev_err(&hdev->pdev->dev,
450 				"VF failed(=%d) to set RSS indirection table\n",
451 				status);
452 			return status;
453 		}
454 	}
455 
456 	return 0;
457 }
458 
459 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev,  u16 rss_size)
460 {
461 	struct hclgevf_rss_tc_mode_cmd *req;
462 	u16 tc_offset[HCLGEVF_MAX_TC_NUM];
463 	u16 tc_valid[HCLGEVF_MAX_TC_NUM];
464 	u16 tc_size[HCLGEVF_MAX_TC_NUM];
465 	struct hclgevf_desc desc;
466 	u16 roundup_size;
467 	int status;
468 	int i;
469 
470 	req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
471 
472 	roundup_size = roundup_pow_of_two(rss_size);
473 	roundup_size = ilog2(roundup_size);
474 
475 	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
476 		tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
477 		tc_size[i] = roundup_size;
478 		tc_offset[i] = rss_size * i;
479 	}
480 
481 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
482 	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
483 		hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
484 			      (tc_valid[i] & 0x1));
485 		hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
486 				HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
487 		hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
488 				HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
489 	}
490 	status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
491 	if (status)
492 		dev_err(&hdev->pdev->dev,
493 			"VF failed(=%d) to set rss tc mode\n", status);
494 
495 	return status;
496 }
497 
498 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
499 			   u8 *hfunc)
500 {
501 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
502 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
503 	int i;
504 
505 	if (handle->pdev->revision >= 0x21) {
506 		/* Get hash algorithm */
507 		if (hfunc) {
508 			switch (rss_cfg->hash_algo) {
509 			case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ:
510 				*hfunc = ETH_RSS_HASH_TOP;
511 				break;
512 			case HCLGEVF_RSS_HASH_ALGO_SIMPLE:
513 				*hfunc = ETH_RSS_HASH_XOR;
514 				break;
515 			default:
516 				*hfunc = ETH_RSS_HASH_UNKNOWN;
517 				break;
518 			}
519 		}
520 
521 		/* Get the RSS Key required by the user */
522 		if (key)
523 			memcpy(key, rss_cfg->rss_hash_key,
524 			       HCLGEVF_RSS_KEY_SIZE);
525 	}
526 
527 	if (indir)
528 		for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
529 			indir[i] = rss_cfg->rss_indirection_tbl[i];
530 
531 	return 0;
532 }
533 
534 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
535 			   const  u8 *key, const  u8 hfunc)
536 {
537 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
538 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
539 	int ret, i;
540 
541 	if (handle->pdev->revision >= 0x21) {
542 		/* Set the RSS Hash Key if specififed by the user */
543 		if (key) {
544 			switch (hfunc) {
545 			case ETH_RSS_HASH_TOP:
546 				rss_cfg->hash_algo =
547 					HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
548 				break;
549 			case ETH_RSS_HASH_XOR:
550 				rss_cfg->hash_algo =
551 					HCLGEVF_RSS_HASH_ALGO_SIMPLE;
552 				break;
553 			case ETH_RSS_HASH_NO_CHANGE:
554 				break;
555 			default:
556 				return -EINVAL;
557 			}
558 
559 			ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
560 						       key);
561 			if (ret)
562 				return ret;
563 
564 			/* Update the shadow RSS key with user specified qids */
565 			memcpy(rss_cfg->rss_hash_key, key,
566 			       HCLGEVF_RSS_KEY_SIZE);
567 		}
568 	}
569 
570 	/* update the shadow RSS table with user specified qids */
571 	for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
572 		rss_cfg->rss_indirection_tbl[i] = indir[i];
573 
574 	/* update the hardware */
575 	return hclgevf_set_rss_indir_table(hdev);
576 }
577 
578 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
579 {
580 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0;
581 
582 	if (nfc->data & RXH_L4_B_2_3)
583 		hash_sets |= HCLGEVF_D_PORT_BIT;
584 	else
585 		hash_sets &= ~HCLGEVF_D_PORT_BIT;
586 
587 	if (nfc->data & RXH_IP_SRC)
588 		hash_sets |= HCLGEVF_S_IP_BIT;
589 	else
590 		hash_sets &= ~HCLGEVF_S_IP_BIT;
591 
592 	if (nfc->data & RXH_IP_DST)
593 		hash_sets |= HCLGEVF_D_IP_BIT;
594 	else
595 		hash_sets &= ~HCLGEVF_D_IP_BIT;
596 
597 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
598 		hash_sets |= HCLGEVF_V_TAG_BIT;
599 
600 	return hash_sets;
601 }
602 
603 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
604 				 struct ethtool_rxnfc *nfc)
605 {
606 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
607 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
608 	struct hclgevf_rss_input_tuple_cmd *req;
609 	struct hclgevf_desc desc;
610 	u8 tuple_sets;
611 	int ret;
612 
613 	if (handle->pdev->revision == 0x20)
614 		return -EOPNOTSUPP;
615 
616 	if (nfc->data &
617 	    ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
618 		return -EINVAL;
619 
620 	req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
621 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
622 
623 	req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
624 	req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
625 	req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
626 	req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
627 	req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
628 	req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
629 	req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
630 	req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
631 
632 	tuple_sets = hclgevf_get_rss_hash_bits(nfc);
633 	switch (nfc->flow_type) {
634 	case TCP_V4_FLOW:
635 		req->ipv4_tcp_en = tuple_sets;
636 		break;
637 	case TCP_V6_FLOW:
638 		req->ipv6_tcp_en = tuple_sets;
639 		break;
640 	case UDP_V4_FLOW:
641 		req->ipv4_udp_en = tuple_sets;
642 		break;
643 	case UDP_V6_FLOW:
644 		req->ipv6_udp_en = tuple_sets;
645 		break;
646 	case SCTP_V4_FLOW:
647 		req->ipv4_sctp_en = tuple_sets;
648 		break;
649 	case SCTP_V6_FLOW:
650 		if ((nfc->data & RXH_L4_B_0_1) ||
651 		    (nfc->data & RXH_L4_B_2_3))
652 			return -EINVAL;
653 
654 		req->ipv6_sctp_en = tuple_sets;
655 		break;
656 	case IPV4_FLOW:
657 		req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
658 		break;
659 	case IPV6_FLOW:
660 		req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
661 		break;
662 	default:
663 		return -EINVAL;
664 	}
665 
666 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
667 	if (ret) {
668 		dev_err(&hdev->pdev->dev,
669 			"Set rss tuple fail, status = %d\n", ret);
670 		return ret;
671 	}
672 
673 	rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
674 	rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
675 	rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
676 	rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
677 	rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
678 	rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
679 	rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
680 	rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
681 	return 0;
682 }
683 
684 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
685 				 struct ethtool_rxnfc *nfc)
686 {
687 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
688 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
689 	u8 tuple_sets;
690 
691 	if (handle->pdev->revision == 0x20)
692 		return -EOPNOTSUPP;
693 
694 	nfc->data = 0;
695 
696 	switch (nfc->flow_type) {
697 	case TCP_V4_FLOW:
698 		tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
699 		break;
700 	case UDP_V4_FLOW:
701 		tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
702 		break;
703 	case TCP_V6_FLOW:
704 		tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
705 		break;
706 	case UDP_V6_FLOW:
707 		tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
708 		break;
709 	case SCTP_V4_FLOW:
710 		tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
711 		break;
712 	case SCTP_V6_FLOW:
713 		tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
714 		break;
715 	case IPV4_FLOW:
716 	case IPV6_FLOW:
717 		tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
718 		break;
719 	default:
720 		return -EINVAL;
721 	}
722 
723 	if (!tuple_sets)
724 		return 0;
725 
726 	if (tuple_sets & HCLGEVF_D_PORT_BIT)
727 		nfc->data |= RXH_L4_B_2_3;
728 	if (tuple_sets & HCLGEVF_S_PORT_BIT)
729 		nfc->data |= RXH_L4_B_0_1;
730 	if (tuple_sets & HCLGEVF_D_IP_BIT)
731 		nfc->data |= RXH_IP_DST;
732 	if (tuple_sets & HCLGEVF_S_IP_BIT)
733 		nfc->data |= RXH_IP_SRC;
734 
735 	return 0;
736 }
737 
738 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev,
739 				       struct hclgevf_rss_cfg *rss_cfg)
740 {
741 	struct hclgevf_rss_input_tuple_cmd *req;
742 	struct hclgevf_desc desc;
743 	int ret;
744 
745 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
746 
747 	req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
748 
749 	req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
750 	req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
751 	req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
752 	req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
753 	req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
754 	req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
755 	req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
756 	req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
757 
758 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
759 	if (ret)
760 		dev_err(&hdev->pdev->dev,
761 			"Configure rss input fail, status = %d\n", ret);
762 	return ret;
763 }
764 
765 static int hclgevf_get_tc_size(struct hnae3_handle *handle)
766 {
767 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
768 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
769 
770 	return rss_cfg->rss_size;
771 }
772 
773 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
774 				       int vector_id,
775 				       struct hnae3_ring_chain_node *ring_chain)
776 {
777 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
778 	struct hnae3_ring_chain_node *node;
779 	struct hclge_mbx_vf_to_pf_cmd *req;
780 	struct hclgevf_desc desc;
781 	int i = 0;
782 	int status;
783 	u8 type;
784 
785 	req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
786 
787 	for (node = ring_chain; node; node = node->next) {
788 		int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
789 					HCLGE_MBX_RING_NODE_VARIABLE_NUM * i;
790 
791 		if (i == 0) {
792 			hclgevf_cmd_setup_basic_desc(&desc,
793 						     HCLGEVF_OPC_MBX_VF_TO_PF,
794 						     false);
795 			type = en ?
796 				HCLGE_MBX_MAP_RING_TO_VECTOR :
797 				HCLGE_MBX_UNMAP_RING_TO_VECTOR;
798 			req->msg[0] = type;
799 			req->msg[1] = vector_id;
800 		}
801 
802 		req->msg[idx_offset] =
803 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
804 		req->msg[idx_offset + 1] = node->tqp_index;
805 		req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx,
806 							   HNAE3_RING_GL_IDX_M,
807 							   HNAE3_RING_GL_IDX_S);
808 
809 		i++;
810 		if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
811 		     HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
812 		     HCLGE_MBX_RING_NODE_VARIABLE_NUM) ||
813 		    !node->next) {
814 			req->msg[2] = i;
815 
816 			status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
817 			if (status) {
818 				dev_err(&hdev->pdev->dev,
819 					"Map TQP fail, status is %d.\n",
820 					status);
821 				return status;
822 			}
823 			i = 0;
824 			hclgevf_cmd_setup_basic_desc(&desc,
825 						     HCLGEVF_OPC_MBX_VF_TO_PF,
826 						     false);
827 			req->msg[0] = type;
828 			req->msg[1] = vector_id;
829 		}
830 	}
831 
832 	return 0;
833 }
834 
835 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
836 				      struct hnae3_ring_chain_node *ring_chain)
837 {
838 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
839 	int vector_id;
840 
841 	vector_id = hclgevf_get_vector_index(hdev, vector);
842 	if (vector_id < 0) {
843 		dev_err(&handle->pdev->dev,
844 			"Get vector index fail. ret =%d\n", vector_id);
845 		return vector_id;
846 	}
847 
848 	return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
849 }
850 
851 static int hclgevf_unmap_ring_from_vector(
852 				struct hnae3_handle *handle,
853 				int vector,
854 				struct hnae3_ring_chain_node *ring_chain)
855 {
856 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
857 	int ret, vector_id;
858 
859 	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
860 		return 0;
861 
862 	vector_id = hclgevf_get_vector_index(hdev, vector);
863 	if (vector_id < 0) {
864 		dev_err(&handle->pdev->dev,
865 			"Get vector index fail. ret =%d\n", vector_id);
866 		return vector_id;
867 	}
868 
869 	ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
870 	if (ret)
871 		dev_err(&handle->pdev->dev,
872 			"Unmap ring from vector fail. vector=%d, ret =%d\n",
873 			vector_id,
874 			ret);
875 
876 	return ret;
877 }
878 
879 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
880 {
881 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
882 	int vector_id;
883 
884 	vector_id = hclgevf_get_vector_index(hdev, vector);
885 	if (vector_id < 0) {
886 		dev_err(&handle->pdev->dev,
887 			"hclgevf_put_vector get vector index fail. ret =%d\n",
888 			vector_id);
889 		return vector_id;
890 	}
891 
892 	hclgevf_free_vector(hdev, vector_id);
893 
894 	return 0;
895 }
896 
897 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
898 					bool en_uc_pmc, bool en_mc_pmc)
899 {
900 	struct hclge_mbx_vf_to_pf_cmd *req;
901 	struct hclgevf_desc desc;
902 	int status;
903 
904 	req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
905 
906 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
907 	req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
908 	req->msg[1] = en_uc_pmc ? 1 : 0;
909 	req->msg[2] = en_mc_pmc ? 1 : 0;
910 
911 	status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
912 	if (status)
913 		dev_err(&hdev->pdev->dev,
914 			"Set promisc mode fail, status is %d.\n", status);
915 
916 	return status;
917 }
918 
919 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle,
920 				    bool en_uc_pmc, bool en_mc_pmc)
921 {
922 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
923 
924 	return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
925 }
926 
927 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
928 			      int stream_id, bool enable)
929 {
930 	struct hclgevf_cfg_com_tqp_queue_cmd *req;
931 	struct hclgevf_desc desc;
932 	int status;
933 
934 	req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
935 
936 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
937 				     false);
938 	req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
939 	req->stream_id = cpu_to_le16(stream_id);
940 	req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
941 
942 	status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
943 	if (status)
944 		dev_err(&hdev->pdev->dev,
945 			"TQP enable fail, status =%d.\n", status);
946 
947 	return status;
948 }
949 
950 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
951 {
952 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
953 	struct hclgevf_tqp *tqp;
954 	int i;
955 
956 	for (i = 0; i < kinfo->num_tqps; i++) {
957 		tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
958 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
959 	}
960 }
961 
962 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
963 {
964 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
965 
966 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
967 }
968 
969 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
970 				bool is_first)
971 {
972 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
973 	u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
974 	u8 *new_mac_addr = (u8 *)p;
975 	u8 msg_data[ETH_ALEN * 2];
976 	u16 subcode;
977 	int status;
978 
979 	ether_addr_copy(msg_data, new_mac_addr);
980 	ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
981 
982 	subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD :
983 			HCLGE_MBX_MAC_VLAN_UC_MODIFY;
984 
985 	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
986 				      subcode, msg_data, ETH_ALEN * 2,
987 				      true, NULL, 0);
988 	if (!status)
989 		ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
990 
991 	return status;
992 }
993 
994 static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
995 			       const unsigned char *addr)
996 {
997 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
998 
999 	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
1000 				    HCLGE_MBX_MAC_VLAN_UC_ADD,
1001 				    addr, ETH_ALEN, false, NULL, 0);
1002 }
1003 
1004 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
1005 			      const unsigned char *addr)
1006 {
1007 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1008 
1009 	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
1010 				    HCLGE_MBX_MAC_VLAN_UC_REMOVE,
1011 				    addr, ETH_ALEN, false, NULL, 0);
1012 }
1013 
1014 static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
1015 			       const unsigned char *addr)
1016 {
1017 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1018 
1019 	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
1020 				    HCLGE_MBX_MAC_VLAN_MC_ADD,
1021 				    addr, ETH_ALEN, false, NULL, 0);
1022 }
1023 
1024 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
1025 			      const unsigned char *addr)
1026 {
1027 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1028 
1029 	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
1030 				    HCLGE_MBX_MAC_VLAN_MC_REMOVE,
1031 				    addr, ETH_ALEN, false, NULL, 0);
1032 }
1033 
1034 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
1035 				   __be16 proto, u16 vlan_id,
1036 				   bool is_kill)
1037 {
1038 #define HCLGEVF_VLAN_MBX_MSG_LEN 5
1039 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1040 	u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
1041 
1042 	if (vlan_id > 4095)
1043 		return -EINVAL;
1044 
1045 	if (proto != htons(ETH_P_8021Q))
1046 		return -EPROTONOSUPPORT;
1047 
1048 	msg_data[0] = is_kill;
1049 	memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
1050 	memcpy(&msg_data[3], &proto, sizeof(proto));
1051 	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
1052 				    HCLGE_MBX_VLAN_FILTER, msg_data,
1053 				    HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
1054 }
1055 
1056 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
1057 {
1058 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1059 	u8 msg_data;
1060 
1061 	msg_data = enable ? 1 : 0;
1062 	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
1063 				    HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data,
1064 				    1, false, NULL, 0);
1065 }
1066 
1067 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
1068 {
1069 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1070 	u8 msg_data[2];
1071 	int ret;
1072 
1073 	memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
1074 
1075 	/* disable vf queue before send queue reset msg to PF */
1076 	ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
1077 	if (ret)
1078 		return ret;
1079 
1080 	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
1081 				    2, true, NULL, 0);
1082 }
1083 
1084 static int hclgevf_notify_client(struct hclgevf_dev *hdev,
1085 				 enum hnae3_reset_notify_type type)
1086 {
1087 	struct hnae3_client *client = hdev->nic_client;
1088 	struct hnae3_handle *handle = &hdev->nic;
1089 	int ret;
1090 
1091 	if (!client->ops->reset_notify)
1092 		return -EOPNOTSUPP;
1093 
1094 	ret = client->ops->reset_notify(handle, type);
1095 	if (ret)
1096 		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
1097 			type, ret);
1098 
1099 	return ret;
1100 }
1101 
1102 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
1103 {
1104 	struct hclgevf_dev *hdev = ae_dev->priv;
1105 
1106 	set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
1107 }
1108 
1109 static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev,
1110 				    unsigned long delay_us,
1111 				    unsigned long wait_cnt)
1112 {
1113 	unsigned long cnt = 0;
1114 
1115 	while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
1116 	       cnt++ < wait_cnt)
1117 		usleep_range(delay_us, delay_us * 2);
1118 
1119 	if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
1120 		dev_err(&hdev->pdev->dev,
1121 			"flr wait timeout\n");
1122 		return -ETIMEDOUT;
1123 	}
1124 
1125 	return 0;
1126 }
1127 
1128 static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
1129 {
1130 #define HCLGEVF_RESET_WAIT_US	20000
1131 #define HCLGEVF_RESET_WAIT_CNT	2000
1132 #define HCLGEVF_RESET_WAIT_TIMEOUT_US	\
1133 	(HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
1134 
1135 	u32 val;
1136 	int ret;
1137 
1138 	/* wait to check the hardware reset completion status */
1139 	val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
1140 	dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val);
1141 
1142 	if (hdev->reset_type == HNAE3_FLR_RESET)
1143 		return hclgevf_flr_poll_timeout(hdev,
1144 						HCLGEVF_RESET_WAIT_US,
1145 						HCLGEVF_RESET_WAIT_CNT);
1146 
1147 	ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val,
1148 				 !(val & HCLGEVF_RST_ING_BITS),
1149 				 HCLGEVF_RESET_WAIT_US,
1150 				 HCLGEVF_RESET_WAIT_TIMEOUT_US);
1151 
1152 	/* hardware completion status should be available by this time */
1153 	if (ret) {
1154 		dev_err(&hdev->pdev->dev,
1155 			"could'nt get reset done status from h/w, timeout!\n");
1156 		return ret;
1157 	}
1158 
1159 	/* we will wait a bit more to let reset of the stack to complete. This
1160 	 * might happen in case reset assertion was made by PF. Yes, this also
1161 	 * means we might end up waiting bit more even for VF reset.
1162 	 */
1163 	msleep(5000);
1164 
1165 	return 0;
1166 }
1167 
1168 static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
1169 {
1170 	int ret;
1171 
1172 	/* uninitialize the nic client */
1173 	ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1174 	if (ret)
1175 		return ret;
1176 
1177 	/* re-initialize the hclge device */
1178 	ret = hclgevf_reset_hdev(hdev);
1179 	if (ret) {
1180 		dev_err(&hdev->pdev->dev,
1181 			"hclge device re-init failed, VF is disabled!\n");
1182 		return ret;
1183 	}
1184 
1185 	/* bring up the nic client again */
1186 	ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
1187 	if (ret)
1188 		return ret;
1189 
1190 	return 0;
1191 }
1192 
1193 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
1194 {
1195 	int ret = 0;
1196 
1197 	switch (hdev->reset_type) {
1198 	case HNAE3_VF_FUNC_RESET:
1199 		ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
1200 					   0, true, NULL, sizeof(u8));
1201 		break;
1202 	case HNAE3_FLR_RESET:
1203 		set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
1204 		break;
1205 	default:
1206 		break;
1207 	}
1208 
1209 	set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
1210 
1211 	dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
1212 		 hdev->reset_type, ret);
1213 
1214 	return ret;
1215 }
1216 
1217 static int hclgevf_reset(struct hclgevf_dev *hdev)
1218 {
1219 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1220 	int ret;
1221 
1222 	/* Initialize ae_dev reset status as well, in case enet layer wants to
1223 	 * know if device is undergoing reset
1224 	 */
1225 	ae_dev->reset_type = hdev->reset_type;
1226 	hdev->reset_count++;
1227 	rtnl_lock();
1228 
1229 	/* bring down the nic to stop any ongoing TX/RX */
1230 	ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
1231 	if (ret)
1232 		goto err_reset_lock;
1233 
1234 	rtnl_unlock();
1235 
1236 	ret = hclgevf_reset_prepare_wait(hdev);
1237 	if (ret)
1238 		goto err_reset;
1239 
1240 	/* check if VF could successfully fetch the hardware reset completion
1241 	 * status from the hardware
1242 	 */
1243 	ret = hclgevf_reset_wait(hdev);
1244 	if (ret) {
1245 		/* can't do much in this situation, will disable VF */
1246 		dev_err(&hdev->pdev->dev,
1247 			"VF failed(=%d) to fetch H/W reset completion status\n",
1248 			ret);
1249 		goto err_reset;
1250 	}
1251 
1252 	rtnl_lock();
1253 
1254 	/* now, re-initialize the nic client and ae device*/
1255 	ret = hclgevf_reset_stack(hdev);
1256 	if (ret) {
1257 		dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
1258 		goto err_reset_lock;
1259 	}
1260 
1261 	/* bring up the nic to enable TX/RX again */
1262 	ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1263 	if (ret)
1264 		goto err_reset_lock;
1265 
1266 	rtnl_unlock();
1267 
1268 	return ret;
1269 err_reset_lock:
1270 	rtnl_unlock();
1271 err_reset:
1272 	/* When VF reset failed, only the higher level reset asserted by PF
1273 	 * can restore it, so re-initialize the command queue to receive
1274 	 * this higher reset event.
1275 	 */
1276 	hclgevf_cmd_init(hdev);
1277 	dev_err(&hdev->pdev->dev, "failed to reset VF\n");
1278 
1279 	return ret;
1280 }
1281 
1282 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
1283 						     unsigned long *addr)
1284 {
1285 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
1286 
1287 	/* return the highest priority reset level amongst all */
1288 	if (test_bit(HNAE3_VF_RESET, addr)) {
1289 		rst_level = HNAE3_VF_RESET;
1290 		clear_bit(HNAE3_VF_RESET, addr);
1291 		clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1292 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1293 	} else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
1294 		rst_level = HNAE3_VF_FULL_RESET;
1295 		clear_bit(HNAE3_VF_FULL_RESET, addr);
1296 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1297 	} else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
1298 		rst_level = HNAE3_VF_PF_FUNC_RESET;
1299 		clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1300 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1301 	} else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
1302 		rst_level = HNAE3_VF_FUNC_RESET;
1303 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1304 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
1305 		rst_level = HNAE3_FLR_RESET;
1306 		clear_bit(HNAE3_FLR_RESET, addr);
1307 	}
1308 
1309 	return rst_level;
1310 }
1311 
1312 static void hclgevf_reset_event(struct pci_dev *pdev,
1313 				struct hnae3_handle *handle)
1314 {
1315 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1316 	struct hclgevf_dev *hdev = ae_dev->priv;
1317 
1318 	dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
1319 
1320 	if (hdev->default_reset_request)
1321 		hdev->reset_level =
1322 			hclgevf_get_reset_level(hdev,
1323 						&hdev->default_reset_request);
1324 	else
1325 		hdev->reset_level = HNAE3_VF_FUNC_RESET;
1326 
1327 	/* reset of this VF requested */
1328 	set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
1329 	hclgevf_reset_task_schedule(hdev);
1330 
1331 	hdev->last_reset_time = jiffies;
1332 }
1333 
1334 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
1335 					  enum hnae3_reset_type rst_type)
1336 {
1337 	struct hclgevf_dev *hdev = ae_dev->priv;
1338 
1339 	set_bit(rst_type, &hdev->default_reset_request);
1340 }
1341 
1342 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
1343 {
1344 #define HCLGEVF_FLR_WAIT_MS	100
1345 #define HCLGEVF_FLR_WAIT_CNT	50
1346 	struct hclgevf_dev *hdev = ae_dev->priv;
1347 	int cnt = 0;
1348 
1349 	clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
1350 	clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
1351 	set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
1352 	hclgevf_reset_event(hdev->pdev, NULL);
1353 
1354 	while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
1355 	       cnt++ < HCLGEVF_FLR_WAIT_CNT)
1356 		msleep(HCLGEVF_FLR_WAIT_MS);
1357 
1358 	if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
1359 		dev_err(&hdev->pdev->dev,
1360 			"flr wait down timeout: %d\n", cnt);
1361 }
1362 
1363 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
1364 {
1365 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1366 
1367 	return hdev->fw_version;
1368 }
1369 
1370 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
1371 {
1372 	struct hclgevf_misc_vector *vector = &hdev->misc_vector;
1373 
1374 	vector->vector_irq = pci_irq_vector(hdev->pdev,
1375 					    HCLGEVF_MISC_VECTOR_NUM);
1376 	vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
1377 	/* vector status always valid for Vector 0 */
1378 	hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
1379 	hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
1380 
1381 	hdev->num_msi_left -= 1;
1382 	hdev->num_msi_used += 1;
1383 }
1384 
1385 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
1386 {
1387 	if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
1388 	    !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) {
1389 		set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1390 		schedule_work(&hdev->rst_service_task);
1391 	}
1392 }
1393 
1394 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
1395 {
1396 	if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) &&
1397 	    !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) {
1398 		set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1399 		schedule_work(&hdev->mbx_service_task);
1400 	}
1401 }
1402 
1403 static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
1404 {
1405 	if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state)  &&
1406 	    !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
1407 		schedule_work(&hdev->service_task);
1408 }
1409 
1410 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev)
1411 {
1412 	/* if we have any pending mailbox event then schedule the mbx task */
1413 	if (hdev->mbx_event_pending)
1414 		hclgevf_mbx_task_schedule(hdev);
1415 
1416 	if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state))
1417 		hclgevf_reset_task_schedule(hdev);
1418 }
1419 
1420 static void hclgevf_service_timer(struct timer_list *t)
1421 {
1422 	struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
1423 
1424 	mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
1425 
1426 	hclgevf_task_schedule(hdev);
1427 }
1428 
1429 static void hclgevf_reset_service_task(struct work_struct *work)
1430 {
1431 	struct hclgevf_dev *hdev =
1432 		container_of(work, struct hclgevf_dev, rst_service_task);
1433 	int ret;
1434 
1435 	if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
1436 		return;
1437 
1438 	clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1439 
1440 	if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
1441 			       &hdev->reset_state)) {
1442 		/* PF has initmated that it is about to reset the hardware.
1443 		 * We now have to poll & check if harware has actually completed
1444 		 * the reset sequence. On hardware reset completion, VF needs to
1445 		 * reset the client and ae device.
1446 		 */
1447 		hdev->reset_attempts = 0;
1448 
1449 		hdev->last_reset_time = jiffies;
1450 		while ((hdev->reset_type =
1451 			hclgevf_get_reset_level(hdev, &hdev->reset_pending))
1452 		       != HNAE3_NONE_RESET) {
1453 			ret = hclgevf_reset(hdev);
1454 			if (ret)
1455 				dev_err(&hdev->pdev->dev,
1456 					"VF stack reset failed %d.\n", ret);
1457 		}
1458 	} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
1459 				      &hdev->reset_state)) {
1460 		/* we could be here when either of below happens:
1461 		 * 1. reset was initiated due to watchdog timeout due to
1462 		 *    a. IMP was earlier reset and our TX got choked down and
1463 		 *       which resulted in watchdog reacting and inducing VF
1464 		 *       reset. This also means our cmdq would be unreliable.
1465 		 *    b. problem in TX due to other lower layer(example link
1466 		 *       layer not functioning properly etc.)
1467 		 * 2. VF reset might have been initiated due to some config
1468 		 *    change.
1469 		 *
1470 		 * NOTE: Theres no clear way to detect above cases than to react
1471 		 * to the response of PF for this reset request. PF will ack the
1472 		 * 1b and 2. cases but we will not get any intimation about 1a
1473 		 * from PF as cmdq would be in unreliable state i.e. mailbox
1474 		 * communication between PF and VF would be broken.
1475 		 */
1476 
1477 		/* if we are never geting into pending state it means either:
1478 		 * 1. PF is not receiving our request which could be due to IMP
1479 		 *    reset
1480 		 * 2. PF is screwed
1481 		 * We cannot do much for 2. but to check first we can try reset
1482 		 * our PCIe + stack and see if it alleviates the problem.
1483 		 */
1484 		if (hdev->reset_attempts > 3) {
1485 			/* prepare for full reset of stack + pcie interface */
1486 			set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
1487 
1488 			/* "defer" schedule the reset task again */
1489 			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1490 		} else {
1491 			hdev->reset_attempts++;
1492 
1493 			set_bit(hdev->reset_level, &hdev->reset_pending);
1494 			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1495 		}
1496 		hclgevf_reset_task_schedule(hdev);
1497 	}
1498 
1499 	clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1500 }
1501 
1502 static void hclgevf_mailbox_service_task(struct work_struct *work)
1503 {
1504 	struct hclgevf_dev *hdev;
1505 
1506 	hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
1507 
1508 	if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
1509 		return;
1510 
1511 	clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1512 
1513 	hclgevf_mbx_async_handler(hdev);
1514 
1515 	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1516 }
1517 
1518 static void hclgevf_service_task(struct work_struct *work)
1519 {
1520 	struct hclgevf_dev *hdev;
1521 
1522 	hdev = container_of(work, struct hclgevf_dev, service_task);
1523 
1524 	/* request the link status from the PF. PF would be able to tell VF
1525 	 * about such updates in future so we might remove this later
1526 	 */
1527 	hclgevf_request_link_info(hdev);
1528 
1529 	hclgevf_deferred_task_schedule(hdev);
1530 
1531 	clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1532 }
1533 
1534 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
1535 {
1536 	hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
1537 }
1538 
1539 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
1540 						      u32 *clearval)
1541 {
1542 	u32 cmdq_src_reg, rst_ing_reg;
1543 
1544 	/* fetch the events from their corresponding regs */
1545 	cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
1546 					HCLGEVF_VECTOR0_CMDQ_SRC_REG);
1547 
1548 	if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) {
1549 		rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
1550 		dev_info(&hdev->pdev->dev,
1551 			 "receive reset interrupt 0x%x!\n", rst_ing_reg);
1552 		set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
1553 		set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1554 		set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
1555 		cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
1556 		*clearval = cmdq_src_reg;
1557 		return HCLGEVF_VECTOR0_EVENT_RST;
1558 	}
1559 
1560 	/* check for vector0 mailbox(=CMDQ RX) event source */
1561 	if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
1562 		cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
1563 		*clearval = cmdq_src_reg;
1564 		return HCLGEVF_VECTOR0_EVENT_MBX;
1565 	}
1566 
1567 	dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
1568 
1569 	return HCLGEVF_VECTOR0_EVENT_OTHER;
1570 }
1571 
1572 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
1573 {
1574 	writel(en ? 1 : 0, vector->addr);
1575 }
1576 
1577 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
1578 {
1579 	enum hclgevf_evt_cause event_cause;
1580 	struct hclgevf_dev *hdev = data;
1581 	u32 clearval;
1582 
1583 	hclgevf_enable_vector(&hdev->misc_vector, false);
1584 	event_cause = hclgevf_check_evt_cause(hdev, &clearval);
1585 
1586 	switch (event_cause) {
1587 	case HCLGEVF_VECTOR0_EVENT_RST:
1588 		hclgevf_reset_task_schedule(hdev);
1589 		break;
1590 	case HCLGEVF_VECTOR0_EVENT_MBX:
1591 		hclgevf_mbx_handler(hdev);
1592 		break;
1593 	default:
1594 		break;
1595 	}
1596 
1597 	if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
1598 		hclgevf_clear_event_cause(hdev, clearval);
1599 		hclgevf_enable_vector(&hdev->misc_vector, true);
1600 	}
1601 
1602 	return IRQ_HANDLED;
1603 }
1604 
1605 static int hclgevf_configure(struct hclgevf_dev *hdev)
1606 {
1607 	int ret;
1608 
1609 	hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE;
1610 
1611 	/* get queue configuration from PF */
1612 	ret = hclgevf_get_queue_info(hdev);
1613 	if (ret)
1614 		return ret;
1615 	/* get tc configuration from PF */
1616 	return hclgevf_get_tc_info(hdev);
1617 }
1618 
1619 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
1620 {
1621 	struct pci_dev *pdev = ae_dev->pdev;
1622 	struct hclgevf_dev *hdev = ae_dev->priv;
1623 
1624 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
1625 	if (!hdev)
1626 		return -ENOMEM;
1627 
1628 	hdev->pdev = pdev;
1629 	hdev->ae_dev = ae_dev;
1630 	ae_dev->priv = hdev;
1631 
1632 	return 0;
1633 }
1634 
1635 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
1636 {
1637 	struct hnae3_handle *roce = &hdev->roce;
1638 	struct hnae3_handle *nic = &hdev->nic;
1639 
1640 	roce->rinfo.num_vectors = hdev->num_roce_msix;
1641 
1642 	if (hdev->num_msi_left < roce->rinfo.num_vectors ||
1643 	    hdev->num_msi_left == 0)
1644 		return -EINVAL;
1645 
1646 	roce->rinfo.base_vector = hdev->roce_base_vector;
1647 
1648 	roce->rinfo.netdev = nic->kinfo.netdev;
1649 	roce->rinfo.roce_io_base = hdev->hw.io_base;
1650 
1651 	roce->pdev = nic->pdev;
1652 	roce->ae_algo = nic->ae_algo;
1653 	roce->numa_node_mask = nic->numa_node_mask;
1654 
1655 	return 0;
1656 }
1657 
1658 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
1659 {
1660 	struct hclgevf_cfg_gro_status_cmd *req;
1661 	struct hclgevf_desc desc;
1662 	int ret;
1663 
1664 	if (!hnae3_dev_gro_supported(hdev))
1665 		return 0;
1666 
1667 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
1668 				     false);
1669 	req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
1670 
1671 	req->gro_en = cpu_to_le16(en ? 1 : 0);
1672 
1673 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1674 	if (ret)
1675 		dev_err(&hdev->pdev->dev,
1676 			"VF GRO hardware config cmd failed, ret = %d.\n", ret);
1677 
1678 	return ret;
1679 }
1680 
1681 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
1682 {
1683 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
1684 	int i, ret;
1685 
1686 	rss_cfg->rss_size = hdev->rss_size_max;
1687 
1688 	if (hdev->pdev->revision >= 0x21) {
1689 		rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
1690 		netdev_rss_key_fill(rss_cfg->rss_hash_key,
1691 				    HCLGEVF_RSS_KEY_SIZE);
1692 
1693 		ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
1694 					       rss_cfg->rss_hash_key);
1695 		if (ret)
1696 			return ret;
1697 
1698 		rss_cfg->rss_tuple_sets.ipv4_tcp_en =
1699 					HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1700 		rss_cfg->rss_tuple_sets.ipv4_udp_en =
1701 					HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1702 		rss_cfg->rss_tuple_sets.ipv4_sctp_en =
1703 					HCLGEVF_RSS_INPUT_TUPLE_SCTP;
1704 		rss_cfg->rss_tuple_sets.ipv4_fragment_en =
1705 					HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1706 		rss_cfg->rss_tuple_sets.ipv6_tcp_en =
1707 					HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1708 		rss_cfg->rss_tuple_sets.ipv6_udp_en =
1709 					HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1710 		rss_cfg->rss_tuple_sets.ipv6_sctp_en =
1711 					HCLGEVF_RSS_INPUT_TUPLE_SCTP;
1712 		rss_cfg->rss_tuple_sets.ipv6_fragment_en =
1713 					HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1714 
1715 		ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
1716 		if (ret)
1717 			return ret;
1718 
1719 	}
1720 
1721 	/* Initialize RSS indirect table for each vport */
1722 	for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
1723 		rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
1724 
1725 	ret = hclgevf_set_rss_indir_table(hdev);
1726 	if (ret)
1727 		return ret;
1728 
1729 	return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
1730 }
1731 
1732 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
1733 {
1734 	/* other vlan config(like, VLAN TX/RX offload) would also be added
1735 	 * here later
1736 	 */
1737 	return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
1738 				       false);
1739 }
1740 
1741 static int hclgevf_ae_start(struct hnae3_handle *handle)
1742 {
1743 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1744 
1745 	/* reset tqp stats */
1746 	hclgevf_reset_tqp_stats(handle);
1747 
1748 	hclgevf_request_link_info(hdev);
1749 
1750 	clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1751 	mod_timer(&hdev->service_timer, jiffies + HZ);
1752 
1753 	return 0;
1754 }
1755 
1756 static void hclgevf_ae_stop(struct hnae3_handle *handle)
1757 {
1758 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1759 
1760 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1761 
1762 	/* reset tqp stats */
1763 	hclgevf_reset_tqp_stats(handle);
1764 	del_timer_sync(&hdev->service_timer);
1765 	cancel_work_sync(&hdev->service_task);
1766 	clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1767 	hclgevf_update_link_status(hdev, 0);
1768 }
1769 
1770 static void hclgevf_state_init(struct hclgevf_dev *hdev)
1771 {
1772 	/* setup tasks for the MBX */
1773 	INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
1774 	clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1775 	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1776 
1777 	/* setup tasks for service timer */
1778 	timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
1779 
1780 	INIT_WORK(&hdev->service_task, hclgevf_service_task);
1781 	clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1782 
1783 	INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task);
1784 
1785 	mutex_init(&hdev->mbx_resp.mbx_mutex);
1786 
1787 	/* bring the device down */
1788 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1789 }
1790 
1791 static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
1792 {
1793 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1794 
1795 	if (hdev->service_timer.function)
1796 		del_timer_sync(&hdev->service_timer);
1797 	if (hdev->service_task.func)
1798 		cancel_work_sync(&hdev->service_task);
1799 	if (hdev->mbx_service_task.func)
1800 		cancel_work_sync(&hdev->mbx_service_task);
1801 	if (hdev->rst_service_task.func)
1802 		cancel_work_sync(&hdev->rst_service_task);
1803 
1804 	mutex_destroy(&hdev->mbx_resp.mbx_mutex);
1805 }
1806 
1807 static int hclgevf_init_msi(struct hclgevf_dev *hdev)
1808 {
1809 	struct pci_dev *pdev = hdev->pdev;
1810 	int vectors;
1811 	int i;
1812 
1813 	if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
1814 		vectors = pci_alloc_irq_vectors(pdev,
1815 						hdev->roce_base_msix_offset + 1,
1816 						hdev->num_msi,
1817 						PCI_IRQ_MSIX);
1818 	else
1819 		vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1820 						PCI_IRQ_MSI | PCI_IRQ_MSIX);
1821 
1822 	if (vectors < 0) {
1823 		dev_err(&pdev->dev,
1824 			"failed(%d) to allocate MSI/MSI-X vectors\n",
1825 			vectors);
1826 		return vectors;
1827 	}
1828 	if (vectors < hdev->num_msi)
1829 		dev_warn(&hdev->pdev->dev,
1830 			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1831 			 hdev->num_msi, vectors);
1832 
1833 	hdev->num_msi = vectors;
1834 	hdev->num_msi_left = vectors;
1835 	hdev->base_msi_vector = pdev->irq;
1836 	hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
1837 
1838 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1839 					   sizeof(u16), GFP_KERNEL);
1840 	if (!hdev->vector_status) {
1841 		pci_free_irq_vectors(pdev);
1842 		return -ENOMEM;
1843 	}
1844 
1845 	for (i = 0; i < hdev->num_msi; i++)
1846 		hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
1847 
1848 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1849 					sizeof(int), GFP_KERNEL);
1850 	if (!hdev->vector_irq) {
1851 		devm_kfree(&pdev->dev, hdev->vector_status);
1852 		pci_free_irq_vectors(pdev);
1853 		return -ENOMEM;
1854 	}
1855 
1856 	return 0;
1857 }
1858 
1859 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
1860 {
1861 	struct pci_dev *pdev = hdev->pdev;
1862 
1863 	devm_kfree(&pdev->dev, hdev->vector_status);
1864 	devm_kfree(&pdev->dev, hdev->vector_irq);
1865 	pci_free_irq_vectors(pdev);
1866 }
1867 
1868 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
1869 {
1870 	int ret = 0;
1871 
1872 	hclgevf_get_misc_vector(hdev);
1873 
1874 	ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
1875 			  0, "hclgevf_cmd", hdev);
1876 	if (ret) {
1877 		dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
1878 			hdev->misc_vector.vector_irq);
1879 		return ret;
1880 	}
1881 
1882 	hclgevf_clear_event_cause(hdev, 0);
1883 
1884 	/* enable misc. vector(vector 0) */
1885 	hclgevf_enable_vector(&hdev->misc_vector, true);
1886 
1887 	return ret;
1888 }
1889 
1890 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
1891 {
1892 	/* disable misc vector(vector 0) */
1893 	hclgevf_enable_vector(&hdev->misc_vector, false);
1894 	synchronize_irq(hdev->misc_vector.vector_irq);
1895 	free_irq(hdev->misc_vector.vector_irq, hdev);
1896 	hclgevf_free_vector(hdev, 0);
1897 }
1898 
1899 static int hclgevf_init_client_instance(struct hnae3_client *client,
1900 					struct hnae3_ae_dev *ae_dev)
1901 {
1902 	struct hclgevf_dev *hdev = ae_dev->priv;
1903 	int ret;
1904 
1905 	switch (client->type) {
1906 	case HNAE3_CLIENT_KNIC:
1907 		hdev->nic_client = client;
1908 		hdev->nic.client = client;
1909 
1910 		ret = client->ops->init_instance(&hdev->nic);
1911 		if (ret)
1912 			goto clear_nic;
1913 
1914 		hnae3_set_client_init_flag(client, ae_dev, 1);
1915 
1916 		if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
1917 			struct hnae3_client *rc = hdev->roce_client;
1918 
1919 			ret = hclgevf_init_roce_base_info(hdev);
1920 			if (ret)
1921 				goto clear_roce;
1922 			ret = rc->ops->init_instance(&hdev->roce);
1923 			if (ret)
1924 				goto clear_roce;
1925 
1926 			hnae3_set_client_init_flag(hdev->roce_client, ae_dev,
1927 						   1);
1928 		}
1929 		break;
1930 	case HNAE3_CLIENT_UNIC:
1931 		hdev->nic_client = client;
1932 		hdev->nic.client = client;
1933 
1934 		ret = client->ops->init_instance(&hdev->nic);
1935 		if (ret)
1936 			goto clear_nic;
1937 
1938 		hnae3_set_client_init_flag(client, ae_dev, 1);
1939 		break;
1940 	case HNAE3_CLIENT_ROCE:
1941 		if (hnae3_dev_roce_supported(hdev)) {
1942 			hdev->roce_client = client;
1943 			hdev->roce.client = client;
1944 		}
1945 
1946 		if (hdev->roce_client && hdev->nic_client) {
1947 			ret = hclgevf_init_roce_base_info(hdev);
1948 			if (ret)
1949 				goto clear_roce;
1950 
1951 			ret = client->ops->init_instance(&hdev->roce);
1952 			if (ret)
1953 				goto clear_roce;
1954 		}
1955 
1956 		hnae3_set_client_init_flag(client, ae_dev, 1);
1957 		break;
1958 	default:
1959 		return -EINVAL;
1960 	}
1961 
1962 	return 0;
1963 
1964 clear_nic:
1965 	hdev->nic_client = NULL;
1966 	hdev->nic.client = NULL;
1967 	return ret;
1968 clear_roce:
1969 	hdev->roce_client = NULL;
1970 	hdev->roce.client = NULL;
1971 	return ret;
1972 }
1973 
1974 static void hclgevf_uninit_client_instance(struct hnae3_client *client,
1975 					   struct hnae3_ae_dev *ae_dev)
1976 {
1977 	struct hclgevf_dev *hdev = ae_dev->priv;
1978 
1979 	/* un-init roce, if it exists */
1980 	if (hdev->roce_client) {
1981 		hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
1982 		hdev->roce_client = NULL;
1983 		hdev->roce.client = NULL;
1984 	}
1985 
1986 	/* un-init nic/unic, if this was not called by roce client */
1987 	if (client->ops->uninit_instance && hdev->nic_client &&
1988 	    client->type != HNAE3_CLIENT_ROCE) {
1989 		client->ops->uninit_instance(&hdev->nic, 0);
1990 		hdev->nic_client = NULL;
1991 		hdev->nic.client = NULL;
1992 	}
1993 }
1994 
1995 static int hclgevf_pci_init(struct hclgevf_dev *hdev)
1996 {
1997 	struct pci_dev *pdev = hdev->pdev;
1998 	struct hclgevf_hw *hw;
1999 	int ret;
2000 
2001 	ret = pci_enable_device(pdev);
2002 	if (ret) {
2003 		dev_err(&pdev->dev, "failed to enable PCI device\n");
2004 		return ret;
2005 	}
2006 
2007 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2008 	if (ret) {
2009 		dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
2010 		goto err_disable_device;
2011 	}
2012 
2013 	ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
2014 	if (ret) {
2015 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
2016 		goto err_disable_device;
2017 	}
2018 
2019 	pci_set_master(pdev);
2020 	hw = &hdev->hw;
2021 	hw->hdev = hdev;
2022 	hw->io_base = pci_iomap(pdev, 2, 0);
2023 	if (!hw->io_base) {
2024 		dev_err(&pdev->dev, "can't map configuration register space\n");
2025 		ret = -ENOMEM;
2026 		goto err_clr_master;
2027 	}
2028 
2029 	return 0;
2030 
2031 err_clr_master:
2032 	pci_clear_master(pdev);
2033 	pci_release_regions(pdev);
2034 err_disable_device:
2035 	pci_disable_device(pdev);
2036 
2037 	return ret;
2038 }
2039 
2040 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
2041 {
2042 	struct pci_dev *pdev = hdev->pdev;
2043 
2044 	pci_iounmap(pdev, hdev->hw.io_base);
2045 	pci_clear_master(pdev);
2046 	pci_release_regions(pdev);
2047 	pci_disable_device(pdev);
2048 }
2049 
2050 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
2051 {
2052 	struct hclgevf_query_res_cmd *req;
2053 	struct hclgevf_desc desc;
2054 	int ret;
2055 
2056 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
2057 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
2058 	if (ret) {
2059 		dev_err(&hdev->pdev->dev,
2060 			"query vf resource failed, ret = %d.\n", ret);
2061 		return ret;
2062 	}
2063 
2064 	req = (struct hclgevf_query_res_cmd *)desc.data;
2065 
2066 	if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) {
2067 		hdev->roce_base_msix_offset =
2068 		hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
2069 				HCLGEVF_MSIX_OFT_ROCEE_M,
2070 				HCLGEVF_MSIX_OFT_ROCEE_S);
2071 		hdev->num_roce_msix =
2072 		hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
2073 				HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2074 
2075 		/* VF should have NIC vectors and Roce vectors, NIC vectors
2076 		 * are queued before Roce vectors. The offset is fixed to 64.
2077 		 */
2078 		hdev->num_msi = hdev->num_roce_msix +
2079 				hdev->roce_base_msix_offset;
2080 	} else {
2081 		hdev->num_msi =
2082 		hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
2083 				HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2084 	}
2085 
2086 	return 0;
2087 }
2088 
2089 static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
2090 {
2091 	struct pci_dev *pdev = hdev->pdev;
2092 	int ret = 0;
2093 
2094 	if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
2095 	    test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2096 		hclgevf_misc_irq_uninit(hdev);
2097 		hclgevf_uninit_msi(hdev);
2098 		clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2099 	}
2100 
2101 	if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2102 		pci_set_master(pdev);
2103 		ret = hclgevf_init_msi(hdev);
2104 		if (ret) {
2105 			dev_err(&pdev->dev,
2106 				"failed(%d) to init MSI/MSI-X\n", ret);
2107 			return ret;
2108 		}
2109 
2110 		ret = hclgevf_misc_irq_init(hdev);
2111 		if (ret) {
2112 			hclgevf_uninit_msi(hdev);
2113 			dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
2114 				ret);
2115 			return ret;
2116 		}
2117 
2118 		set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2119 	}
2120 
2121 	return ret;
2122 }
2123 
2124 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
2125 {
2126 	struct pci_dev *pdev = hdev->pdev;
2127 	int ret;
2128 
2129 	ret = hclgevf_pci_reset(hdev);
2130 	if (ret) {
2131 		dev_err(&pdev->dev, "pci reset failed %d\n", ret);
2132 		return ret;
2133 	}
2134 
2135 	ret = hclgevf_cmd_init(hdev);
2136 	if (ret) {
2137 		dev_err(&pdev->dev, "cmd failed %d\n", ret);
2138 		return ret;
2139 	}
2140 
2141 	ret = hclgevf_rss_init_hw(hdev);
2142 	if (ret) {
2143 		dev_err(&hdev->pdev->dev,
2144 			"failed(%d) to initialize RSS\n", ret);
2145 		return ret;
2146 	}
2147 
2148 	ret = hclgevf_config_gro(hdev, true);
2149 	if (ret)
2150 		return ret;
2151 
2152 	ret = hclgevf_init_vlan_config(hdev);
2153 	if (ret) {
2154 		dev_err(&hdev->pdev->dev,
2155 			"failed(%d) to initialize VLAN config\n", ret);
2156 		return ret;
2157 	}
2158 
2159 	dev_info(&hdev->pdev->dev, "Reset done\n");
2160 
2161 	return 0;
2162 }
2163 
2164 static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
2165 {
2166 	struct pci_dev *pdev = hdev->pdev;
2167 	int ret;
2168 
2169 	ret = hclgevf_pci_init(hdev);
2170 	if (ret) {
2171 		dev_err(&pdev->dev, "PCI initialization failed\n");
2172 		return ret;
2173 	}
2174 
2175 	ret = hclgevf_cmd_queue_init(hdev);
2176 	if (ret) {
2177 		dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret);
2178 		goto err_cmd_queue_init;
2179 	}
2180 
2181 	ret = hclgevf_cmd_init(hdev);
2182 	if (ret)
2183 		goto err_cmd_init;
2184 
2185 	/* Get vf resource */
2186 	ret = hclgevf_query_vf_resource(hdev);
2187 	if (ret) {
2188 		dev_err(&hdev->pdev->dev,
2189 			"Query vf status error, ret = %d.\n", ret);
2190 		goto err_cmd_init;
2191 	}
2192 
2193 	ret = hclgevf_init_msi(hdev);
2194 	if (ret) {
2195 		dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
2196 		goto err_cmd_init;
2197 	}
2198 
2199 	hclgevf_state_init(hdev);
2200 	hdev->reset_level = HNAE3_VF_FUNC_RESET;
2201 
2202 	ret = hclgevf_misc_irq_init(hdev);
2203 	if (ret) {
2204 		dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
2205 			ret);
2206 		goto err_misc_irq_init;
2207 	}
2208 
2209 	set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2210 
2211 	ret = hclgevf_configure(hdev);
2212 	if (ret) {
2213 		dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
2214 		goto err_config;
2215 	}
2216 
2217 	ret = hclgevf_alloc_tqps(hdev);
2218 	if (ret) {
2219 		dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
2220 		goto err_config;
2221 	}
2222 
2223 	ret = hclgevf_set_handle_info(hdev);
2224 	if (ret) {
2225 		dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
2226 		goto err_config;
2227 	}
2228 
2229 	ret = hclgevf_config_gro(hdev, true);
2230 	if (ret)
2231 		goto err_config;
2232 
2233 	/* Initialize RSS for this VF */
2234 	ret = hclgevf_rss_init_hw(hdev);
2235 	if (ret) {
2236 		dev_err(&hdev->pdev->dev,
2237 			"failed(%d) to initialize RSS\n", ret);
2238 		goto err_config;
2239 	}
2240 
2241 	ret = hclgevf_init_vlan_config(hdev);
2242 	if (ret) {
2243 		dev_err(&hdev->pdev->dev,
2244 			"failed(%d) to initialize VLAN config\n", ret);
2245 		goto err_config;
2246 	}
2247 
2248 	hdev->last_reset_time = jiffies;
2249 	pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
2250 
2251 	return 0;
2252 
2253 err_config:
2254 	hclgevf_misc_irq_uninit(hdev);
2255 err_misc_irq_init:
2256 	hclgevf_state_uninit(hdev);
2257 	hclgevf_uninit_msi(hdev);
2258 err_cmd_init:
2259 	hclgevf_cmd_uninit(hdev);
2260 err_cmd_queue_init:
2261 	hclgevf_pci_uninit(hdev);
2262 	clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2263 	return ret;
2264 }
2265 
2266 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
2267 {
2268 	hclgevf_state_uninit(hdev);
2269 
2270 	if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2271 		hclgevf_misc_irq_uninit(hdev);
2272 		hclgevf_uninit_msi(hdev);
2273 		hclgevf_pci_uninit(hdev);
2274 	}
2275 
2276 	hclgevf_cmd_uninit(hdev);
2277 }
2278 
2279 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
2280 {
2281 	struct pci_dev *pdev = ae_dev->pdev;
2282 	int ret;
2283 
2284 	ret = hclgevf_alloc_hdev(ae_dev);
2285 	if (ret) {
2286 		dev_err(&pdev->dev, "hclge device allocation failed\n");
2287 		return ret;
2288 	}
2289 
2290 	ret = hclgevf_init_hdev(ae_dev->priv);
2291 	if (ret)
2292 		dev_err(&pdev->dev, "hclge device initialization failed\n");
2293 
2294 	return ret;
2295 }
2296 
2297 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
2298 {
2299 	struct hclgevf_dev *hdev = ae_dev->priv;
2300 
2301 	hclgevf_uninit_hdev(hdev);
2302 	ae_dev->priv = NULL;
2303 }
2304 
2305 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
2306 {
2307 	struct hnae3_handle *nic = &hdev->nic;
2308 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
2309 
2310 	return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
2311 }
2312 
2313 /**
2314  * hclgevf_get_channels - Get the current channels enabled and max supported.
2315  * @handle: hardware information for network interface
2316  * @ch: ethtool channels structure
2317  *
2318  * We don't support separate tx and rx queues as channels. The other count
2319  * represents how many queues are being used for control. max_combined counts
2320  * how many queue pairs we can support. They may not be mapped 1 to 1 with
2321  * q_vectors since we support a lot more queue pairs than q_vectors.
2322  **/
2323 static void hclgevf_get_channels(struct hnae3_handle *handle,
2324 				 struct ethtool_channels *ch)
2325 {
2326 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2327 
2328 	ch->max_combined = hclgevf_get_max_channels(hdev);
2329 	ch->other_count = 0;
2330 	ch->max_other = 0;
2331 	ch->combined_count = hdev->num_tqps;
2332 }
2333 
2334 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
2335 					  u16 *alloc_tqps, u16 *max_rss_size)
2336 {
2337 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2338 
2339 	*alloc_tqps = hdev->num_tqps;
2340 	*max_rss_size = hdev->rss_size_max;
2341 }
2342 
2343 static int hclgevf_get_status(struct hnae3_handle *handle)
2344 {
2345 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2346 
2347 	return hdev->hw.mac.link;
2348 }
2349 
2350 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
2351 					    u8 *auto_neg, u32 *speed,
2352 					    u8 *duplex)
2353 {
2354 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2355 
2356 	if (speed)
2357 		*speed = hdev->hw.mac.speed;
2358 	if (duplex)
2359 		*duplex = hdev->hw.mac.duplex;
2360 	if (auto_neg)
2361 		*auto_neg = AUTONEG_DISABLE;
2362 }
2363 
2364 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
2365 				 u8 duplex)
2366 {
2367 	hdev->hw.mac.speed = speed;
2368 	hdev->hw.mac.duplex = duplex;
2369 }
2370 
2371 static int hclgevf_gro_en(struct hnae3_handle *handle, int enable)
2372 {
2373 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2374 
2375 	return hclgevf_config_gro(hdev, enable);
2376 }
2377 
2378 static void hclgevf_get_media_type(struct hnae3_handle *handle,
2379 				  u8 *media_type)
2380 {
2381 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2382 	if (media_type)
2383 		*media_type = hdev->hw.mac.media_type;
2384 }
2385 
2386 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
2387 {
2388 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2389 
2390 	return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
2391 }
2392 
2393 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
2394 {
2395 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2396 
2397 	return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2398 }
2399 
2400 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
2401 {
2402 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2403 
2404 	return hdev->reset_count;
2405 }
2406 
2407 static const struct hnae3_ae_ops hclgevf_ops = {
2408 	.init_ae_dev = hclgevf_init_ae_dev,
2409 	.uninit_ae_dev = hclgevf_uninit_ae_dev,
2410 	.flr_prepare = hclgevf_flr_prepare,
2411 	.flr_done = hclgevf_flr_done,
2412 	.init_client_instance = hclgevf_init_client_instance,
2413 	.uninit_client_instance = hclgevf_uninit_client_instance,
2414 	.start = hclgevf_ae_start,
2415 	.stop = hclgevf_ae_stop,
2416 	.map_ring_to_vector = hclgevf_map_ring_to_vector,
2417 	.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
2418 	.get_vector = hclgevf_get_vector,
2419 	.put_vector = hclgevf_put_vector,
2420 	.reset_queue = hclgevf_reset_tqp,
2421 	.set_promisc_mode = hclgevf_set_promisc_mode,
2422 	.get_mac_addr = hclgevf_get_mac_addr,
2423 	.set_mac_addr = hclgevf_set_mac_addr,
2424 	.add_uc_addr = hclgevf_add_uc_addr,
2425 	.rm_uc_addr = hclgevf_rm_uc_addr,
2426 	.add_mc_addr = hclgevf_add_mc_addr,
2427 	.rm_mc_addr = hclgevf_rm_mc_addr,
2428 	.get_stats = hclgevf_get_stats,
2429 	.update_stats = hclgevf_update_stats,
2430 	.get_strings = hclgevf_get_strings,
2431 	.get_sset_count = hclgevf_get_sset_count,
2432 	.get_rss_key_size = hclgevf_get_rss_key_size,
2433 	.get_rss_indir_size = hclgevf_get_rss_indir_size,
2434 	.get_rss = hclgevf_get_rss,
2435 	.set_rss = hclgevf_set_rss,
2436 	.get_rss_tuple = hclgevf_get_rss_tuple,
2437 	.set_rss_tuple = hclgevf_set_rss_tuple,
2438 	.get_tc_size = hclgevf_get_tc_size,
2439 	.get_fw_version = hclgevf_get_fw_version,
2440 	.set_vlan_filter = hclgevf_set_vlan_filter,
2441 	.enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
2442 	.reset_event = hclgevf_reset_event,
2443 	.set_default_reset_request = hclgevf_set_def_reset_request,
2444 	.get_channels = hclgevf_get_channels,
2445 	.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
2446 	.get_status = hclgevf_get_status,
2447 	.get_ksettings_an_result = hclgevf_get_ksettings_an_result,
2448 	.get_media_type = hclgevf_get_media_type,
2449 	.get_hw_reset_stat = hclgevf_get_hw_reset_stat,
2450 	.ae_dev_resetting = hclgevf_ae_dev_resetting,
2451 	.ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
2452 	.set_gro_en = hclgevf_gro_en,
2453 };
2454 
2455 static struct hnae3_ae_algo ae_algovf = {
2456 	.ops = &hclgevf_ops,
2457 	.pdev_id_table = ae_algovf_pci_tbl,
2458 };
2459 
2460 static int hclgevf_init(void)
2461 {
2462 	pr_info("%s is initializing\n", HCLGEVF_NAME);
2463 
2464 	hnae3_register_ae_algo(&ae_algovf);
2465 
2466 	return 0;
2467 }
2468 
2469 static void hclgevf_exit(void)
2470 {
2471 	hnae3_unregister_ae_algo(&ae_algovf);
2472 }
2473 module_init(hclgevf_init);
2474 module_exit(hclgevf_exit);
2475 
2476 MODULE_LICENSE("GPL");
2477 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
2478 MODULE_DESCRIPTION("HCLGEVF Driver");
2479 MODULE_VERSION(HCLGEVF_MOD_VERSION);
2480