1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/etherdevice.h>
5 #include "hclgevf_cmd.h"
6 #include "hclgevf_main.h"
7 #include "hclge_mbx.h"
8 #include "hnae3.h"
9 
10 #define HCLGEVF_NAME	"hclgevf"
11 
12 static struct hnae3_ae_algo ae_algovf;
13 
14 static const struct pci_device_id ae_algovf_pci_tbl[] = {
15 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
16 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
17 	/* required last entry */
18 	{0, }
19 };
20 
21 static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
22 	struct hnae3_handle *handle)
23 {
24 	return container_of(handle, struct hclgevf_dev, nic);
25 }
26 
27 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
28 {
29 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
30 	struct hnae3_queue *queue;
31 	struct hclgevf_desc desc;
32 	struct hclgevf_tqp *tqp;
33 	int status;
34 	int i;
35 
36 	for (i = 0; i < hdev->num_tqps; i++) {
37 		queue = handle->kinfo.tqp[i];
38 		tqp = container_of(queue, struct hclgevf_tqp, q);
39 		hclgevf_cmd_setup_basic_desc(&desc,
40 					     HCLGEVF_OPC_QUERY_RX_STATUS,
41 					     true);
42 
43 		desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
44 		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
45 		if (status) {
46 			dev_err(&hdev->pdev->dev,
47 				"Query tqp stat fail, status = %d,queue = %d\n",
48 				status,	i);
49 			return status;
50 		}
51 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
52 			le32_to_cpu(desc.data[1]);
53 
54 		hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
55 					     true);
56 
57 		desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
58 		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
59 		if (status) {
60 			dev_err(&hdev->pdev->dev,
61 				"Query tqp stat fail, status = %d,queue = %d\n",
62 				status, i);
63 			return status;
64 		}
65 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
66 			le32_to_cpu(desc.data[1]);
67 	}
68 
69 	return 0;
70 }
71 
72 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
73 {
74 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
75 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
76 	struct hclgevf_tqp *tqp;
77 	u64 *buff = data;
78 	int i;
79 
80 	for (i = 0; i < hdev->num_tqps; i++) {
81 		tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
82 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
83 	}
84 	for (i = 0; i < kinfo->num_tqps; i++) {
85 		tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
86 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
87 	}
88 
89 	return buff;
90 }
91 
92 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
93 {
94 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
95 
96 	return hdev->num_tqps * 2;
97 }
98 
99 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
100 {
101 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
102 	u8 *buff = data;
103 	int i = 0;
104 
105 	for (i = 0; i < hdev->num_tqps; i++) {
106 		struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
107 			struct hclgevf_tqp, q);
108 		snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
109 			 tqp->index);
110 		buff += ETH_GSTRING_LEN;
111 	}
112 
113 	for (i = 0; i < hdev->num_tqps; i++) {
114 		struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
115 			struct hclgevf_tqp, q);
116 		snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
117 			 tqp->index);
118 		buff += ETH_GSTRING_LEN;
119 	}
120 
121 	return buff;
122 }
123 
124 static void hclgevf_update_stats(struct hnae3_handle *handle,
125 				 struct net_device_stats *net_stats)
126 {
127 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
128 	int status;
129 
130 	status = hclgevf_tqps_update_stats(handle);
131 	if (status)
132 		dev_err(&hdev->pdev->dev,
133 			"VF update of TQPS stats fail, status = %d.\n",
134 			status);
135 }
136 
137 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
138 {
139 	if (strset == ETH_SS_TEST)
140 		return -EOPNOTSUPP;
141 	else if (strset == ETH_SS_STATS)
142 		return hclgevf_tqps_get_sset_count(handle, strset);
143 
144 	return 0;
145 }
146 
147 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
148 				u8 *data)
149 {
150 	u8 *p = (char *)data;
151 
152 	if (strset == ETH_SS_STATS)
153 		p = hclgevf_tqps_get_strings(handle, p);
154 }
155 
156 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
157 {
158 	hclgevf_tqps_get_stats(handle, data);
159 }
160 
161 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
162 {
163 	u8 resp_msg;
164 	int status;
165 
166 	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
167 				      true, &resp_msg, sizeof(u8));
168 	if (status) {
169 		dev_err(&hdev->pdev->dev,
170 			"VF request to get TC info from PF failed %d",
171 			status);
172 		return status;
173 	}
174 
175 	hdev->hw_tc_map = resp_msg;
176 
177 	return 0;
178 }
179 
180 static int hclge_get_queue_info(struct hclgevf_dev *hdev)
181 {
182 #define HCLGEVF_TQPS_RSS_INFO_LEN	8
183 	u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
184 	int status;
185 
186 	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
187 				      true, resp_msg,
188 				      HCLGEVF_TQPS_RSS_INFO_LEN);
189 	if (status) {
190 		dev_err(&hdev->pdev->dev,
191 			"VF request to get tqp info from PF failed %d",
192 			status);
193 		return status;
194 	}
195 
196 	memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
197 	memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
198 	memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
199 	memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
200 
201 	return 0;
202 }
203 
204 static int hclgevf_enable_tso(struct hclgevf_dev *hdev, int enable)
205 {
206 	struct hclgevf_cfg_tso_status_cmd *req;
207 	struct hclgevf_desc desc;
208 
209 	req = (struct hclgevf_cfg_tso_status_cmd *)desc.data;
210 
211 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_TSO_GENERIC_CONFIG,
212 				     false);
213 	hnae_set_bit(req->tso_enable, HCLGEVF_TSO_ENABLE_B, enable);
214 
215 	return hclgevf_cmd_send(&hdev->hw, &desc, 1);
216 }
217 
218 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
219 {
220 	struct hclgevf_tqp *tqp;
221 	int i;
222 
223 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
224 				  sizeof(struct hclgevf_tqp), GFP_KERNEL);
225 	if (!hdev->htqp)
226 		return -ENOMEM;
227 
228 	tqp = hdev->htqp;
229 
230 	for (i = 0; i < hdev->num_tqps; i++) {
231 		tqp->dev = &hdev->pdev->dev;
232 		tqp->index = i;
233 
234 		tqp->q.ae_algo = &ae_algovf;
235 		tqp->q.buf_size = hdev->rx_buf_len;
236 		tqp->q.desc_num = hdev->num_desc;
237 		tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
238 			i * HCLGEVF_TQP_REG_SIZE;
239 
240 		tqp++;
241 	}
242 
243 	return 0;
244 }
245 
246 static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
247 {
248 	struct hnae3_handle *nic = &hdev->nic;
249 	struct hnae3_knic_private_info *kinfo;
250 	u16 new_tqps = hdev->num_tqps;
251 	int i;
252 
253 	kinfo = &nic->kinfo;
254 	kinfo->num_tc = 0;
255 	kinfo->num_desc = hdev->num_desc;
256 	kinfo->rx_buf_len = hdev->rx_buf_len;
257 	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
258 		if (hdev->hw_tc_map & BIT(i))
259 			kinfo->num_tc++;
260 
261 	kinfo->rss_size
262 		= min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
263 	new_tqps = kinfo->rss_size * kinfo->num_tc;
264 	kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
265 
266 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
267 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
268 	if (!kinfo->tqp)
269 		return -ENOMEM;
270 
271 	for (i = 0; i < kinfo->num_tqps; i++) {
272 		hdev->htqp[i].q.handle = &hdev->nic;
273 		hdev->htqp[i].q.tqp_index = i;
274 		kinfo->tqp[i] = &hdev->htqp[i].q;
275 	}
276 
277 	return 0;
278 }
279 
280 static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
281 {
282 	int status;
283 	u8 resp_msg;
284 
285 	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
286 				      0, false, &resp_msg, sizeof(u8));
287 	if (status)
288 		dev_err(&hdev->pdev->dev,
289 			"VF failed to fetch link status(%d) from PF", status);
290 }
291 
292 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
293 {
294 	struct hnae3_handle *handle = &hdev->nic;
295 	struct hnae3_client *client;
296 
297 	client = handle->client;
298 
299 	if (link_state != hdev->hw.mac.link) {
300 		client->ops->link_status_change(handle, !!link_state);
301 		hdev->hw.mac.link = link_state;
302 	}
303 }
304 
305 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
306 {
307 	struct hnae3_handle *nic = &hdev->nic;
308 	int ret;
309 
310 	nic->ae_algo = &ae_algovf;
311 	nic->pdev = hdev->pdev;
312 	nic->numa_node_mask = hdev->numa_node_mask;
313 	nic->flags |= HNAE3_SUPPORT_VF;
314 
315 	if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
316 		dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
317 			hdev->ae_dev->dev_type);
318 		return -EINVAL;
319 	}
320 
321 	ret = hclgevf_knic_setup(hdev);
322 	if (ret)
323 		dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
324 			ret);
325 	return ret;
326 }
327 
328 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
329 {
330 	hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
331 	hdev->num_msi_left += 1;
332 	hdev->num_msi_used -= 1;
333 }
334 
335 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
336 			      struct hnae3_vector_info *vector_info)
337 {
338 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
339 	struct hnae3_vector_info *vector = vector_info;
340 	int alloc = 0;
341 	int i, j;
342 
343 	vector_num = min(hdev->num_msi_left, vector_num);
344 
345 	for (j = 0; j < vector_num; j++) {
346 		for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
347 			if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
348 				vector->vector = pci_irq_vector(hdev->pdev, i);
349 				vector->io_addr = hdev->hw.io_base +
350 					HCLGEVF_VECTOR_REG_BASE +
351 					(i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
352 				hdev->vector_status[i] = 0;
353 				hdev->vector_irq[i] = vector->vector;
354 
355 				vector++;
356 				alloc++;
357 
358 				break;
359 			}
360 		}
361 	}
362 	hdev->num_msi_left -= alloc;
363 	hdev->num_msi_used += alloc;
364 
365 	return alloc;
366 }
367 
368 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
369 {
370 	int i;
371 
372 	for (i = 0; i < hdev->num_msi; i++)
373 		if (vector == hdev->vector_irq[i])
374 			return i;
375 
376 	return -EINVAL;
377 }
378 
379 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
380 {
381 	return HCLGEVF_RSS_KEY_SIZE;
382 }
383 
384 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
385 {
386 	return HCLGEVF_RSS_IND_TBL_SIZE;
387 }
388 
389 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
390 {
391 	const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
392 	struct hclgevf_rss_indirection_table_cmd *req;
393 	struct hclgevf_desc desc;
394 	int status;
395 	int i, j;
396 
397 	req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
398 
399 	for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
400 		hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
401 					     false);
402 		req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
403 		req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
404 		for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
405 			req->rss_result[j] =
406 				indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
407 
408 		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
409 		if (status) {
410 			dev_err(&hdev->pdev->dev,
411 				"VF failed(=%d) to set RSS indirection table\n",
412 				status);
413 			return status;
414 		}
415 	}
416 
417 	return 0;
418 }
419 
420 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev,  u16 rss_size)
421 {
422 	struct hclgevf_rss_tc_mode_cmd *req;
423 	u16 tc_offset[HCLGEVF_MAX_TC_NUM];
424 	u16 tc_valid[HCLGEVF_MAX_TC_NUM];
425 	u16 tc_size[HCLGEVF_MAX_TC_NUM];
426 	struct hclgevf_desc desc;
427 	u16 roundup_size;
428 	int status;
429 	int i;
430 
431 	req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
432 
433 	roundup_size = roundup_pow_of_two(rss_size);
434 	roundup_size = ilog2(roundup_size);
435 
436 	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
437 		tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
438 		tc_size[i] = roundup_size;
439 		tc_offset[i] = rss_size * i;
440 	}
441 
442 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
443 	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
444 		hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
445 			     (tc_valid[i] & 0x1));
446 		hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
447 			       HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
448 		hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
449 			       HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
450 	}
451 	status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
452 	if (status)
453 		dev_err(&hdev->pdev->dev,
454 			"VF failed(=%d) to set rss tc mode\n", status);
455 
456 	return status;
457 }
458 
459 static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash,
460 				  u8 *key)
461 {
462 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
463 	struct hclgevf_rss_config_cmd *req;
464 	int lkup_times = key ? 3 : 1;
465 	struct hclgevf_desc desc;
466 	int key_offset;
467 	int key_size;
468 	int status;
469 
470 	req = (struct hclgevf_rss_config_cmd *)desc.data;
471 	lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0);
472 
473 	for (key_offset = 0; key_offset < lkup_times; key_offset++) {
474 		hclgevf_cmd_setup_basic_desc(&desc,
475 					     HCLGEVF_OPC_RSS_GENERIC_CONFIG,
476 					     true);
477 		req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET);
478 
479 		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
480 		if (status) {
481 			dev_err(&hdev->pdev->dev,
482 				"failed to get hardware RSS cfg, status = %d\n",
483 				status);
484 			return status;
485 		}
486 
487 		if (key_offset == 2)
488 			key_size =
489 			HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
490 		else
491 			key_size = HCLGEVF_RSS_HASH_KEY_NUM;
492 
493 		if (key)
494 			memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM,
495 			       req->hash_key,
496 			       key_size);
497 	}
498 
499 	if (hash) {
500 		if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ)
501 			*hash = ETH_RSS_HASH_TOP;
502 		else
503 			*hash = ETH_RSS_HASH_UNKNOWN;
504 	}
505 
506 	return 0;
507 }
508 
509 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
510 			   u8 *hfunc)
511 {
512 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
513 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
514 	int i;
515 
516 	if (indir)
517 		for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
518 			indir[i] = rss_cfg->rss_indirection_tbl[i];
519 
520 	return hclgevf_get_rss_hw_cfg(handle, hfunc, key);
521 }
522 
523 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
524 			   const  u8 *key, const  u8 hfunc)
525 {
526 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
527 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
528 	int i;
529 
530 	/* update the shadow RSS table with user specified qids */
531 	for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
532 		rss_cfg->rss_indirection_tbl[i] = indir[i];
533 
534 	/* update the hardware */
535 	return hclgevf_set_rss_indir_table(hdev);
536 }
537 
538 static int hclgevf_get_tc_size(struct hnae3_handle *handle)
539 {
540 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
541 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
542 
543 	return rss_cfg->rss_size;
544 }
545 
546 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
547 				       int vector,
548 				       struct hnae3_ring_chain_node *ring_chain)
549 {
550 #define HCLGEVF_RING_NODE_VARIABLE_NUM		3
551 #define HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM	3
552 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
553 	struct hnae3_ring_chain_node *node;
554 	struct hclge_mbx_vf_to_pf_cmd *req;
555 	struct hclgevf_desc desc;
556 	int i, vector_id;
557 	int status;
558 	u8 type;
559 
560 	req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
561 	vector_id = hclgevf_get_vector_index(hdev, vector);
562 	if (vector_id < 0) {
563 		dev_err(&handle->pdev->dev,
564 			"Get vector index fail. ret =%d\n", vector_id);
565 		return vector_id;
566 	}
567 
568 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
569 	type = en ?
570 		HCLGE_MBX_MAP_RING_TO_VECTOR : HCLGE_MBX_UNMAP_RING_TO_VECTOR;
571 	req->msg[0] = type;
572 	req->msg[1] = vector_id; /* vector_id should be id in VF */
573 
574 	i = 0;
575 	for (node = ring_chain; node; node = node->next) {
576 		i++;
577 		/* msg[2] is cause num */
578 		req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i] =
579 				hnae_get_bit(node->flag, HNAE3_RING_TYPE_B);
580 		req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 1] =
581 				node->tqp_index;
582 		if (i == (HCLGE_MBX_VF_MSG_DATA_NUM -
583 		    HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM) /
584 		    HCLGEVF_RING_NODE_VARIABLE_NUM) {
585 			req->msg[2] = i;
586 
587 			status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
588 			if (status) {
589 				dev_err(&hdev->pdev->dev,
590 					"Map TQP fail, status is %d.\n",
591 					status);
592 				return status;
593 			}
594 			i = 0;
595 			hclgevf_cmd_setup_basic_desc(&desc,
596 						     HCLGEVF_OPC_MBX_VF_TO_PF,
597 						     false);
598 			req->msg[0] = type;
599 			req->msg[1] = vector_id;
600 		}
601 	}
602 
603 	if (i > 0) {
604 		req->msg[2] = i;
605 
606 		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
607 		if (status) {
608 			dev_err(&hdev->pdev->dev,
609 				"Map TQP fail, status is %d.\n", status);
610 			return status;
611 		}
612 	}
613 
614 	return 0;
615 }
616 
617 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
618 				      struct hnae3_ring_chain_node *ring_chain)
619 {
620 	return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain);
621 }
622 
623 static int hclgevf_unmap_ring_from_vector(
624 				struct hnae3_handle *handle,
625 				int vector,
626 				struct hnae3_ring_chain_node *ring_chain)
627 {
628 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
629 	int ret, vector_id;
630 
631 	vector_id = hclgevf_get_vector_index(hdev, vector);
632 	if (vector_id < 0) {
633 		dev_err(&handle->pdev->dev,
634 			"Get vector index fail. ret =%d\n", vector_id);
635 		return vector_id;
636 	}
637 
638 	ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain);
639 	if (ret) {
640 		dev_err(&handle->pdev->dev,
641 			"Unmap ring from vector fail. vector=%d, ret =%d\n",
642 			vector_id,
643 			ret);
644 		return ret;
645 	}
646 
647 	hclgevf_free_vector(hdev, vector);
648 
649 	return 0;
650 }
651 
652 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en)
653 {
654 	struct hclge_mbx_vf_to_pf_cmd *req;
655 	struct hclgevf_desc desc;
656 	int status;
657 
658 	req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
659 
660 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
661 	req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
662 	req->msg[1] = en;
663 
664 	status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
665 	if (status)
666 		dev_err(&hdev->pdev->dev,
667 			"Set promisc mode fail, status is %d.\n", status);
668 
669 	return status;
670 }
671 
672 static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en)
673 {
674 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
675 
676 	hclgevf_cmd_set_promisc_mode(hdev, en);
677 }
678 
679 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
680 			      int stream_id, bool enable)
681 {
682 	struct hclgevf_cfg_com_tqp_queue_cmd *req;
683 	struct hclgevf_desc desc;
684 	int status;
685 
686 	req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
687 
688 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
689 				     false);
690 	req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
691 	req->stream_id = cpu_to_le16(stream_id);
692 	req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
693 
694 	status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
695 	if (status)
696 		dev_err(&hdev->pdev->dev,
697 			"TQP enable fail, status =%d.\n", status);
698 
699 	return status;
700 }
701 
702 static int hclgevf_get_queue_id(struct hnae3_queue *queue)
703 {
704 	struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
705 
706 	return tqp->index;
707 }
708 
709 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
710 {
711 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
712 	struct hnae3_queue *queue;
713 	struct hclgevf_tqp *tqp;
714 	int i;
715 
716 	for (i = 0; i < hdev->num_tqps; i++) {
717 		queue = handle->kinfo.tqp[i];
718 		tqp = container_of(queue, struct hclgevf_tqp, q);
719 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
720 	}
721 }
722 
723 static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en)
724 {
725 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
726 	u8 msg[2] = {0};
727 
728 	msg[0] = en;
729 	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
730 				    HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE,
731 				    msg, 1, false, NULL, 0);
732 }
733 
734 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
735 {
736 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
737 
738 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
739 }
740 
741 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p)
742 {
743 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
744 	u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
745 	u8 *new_mac_addr = (u8 *)p;
746 	u8 msg_data[ETH_ALEN * 2];
747 	int status;
748 
749 	ether_addr_copy(msg_data, new_mac_addr);
750 	ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
751 
752 	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
753 				      HCLGE_MBX_MAC_VLAN_UC_MODIFY,
754 				      msg_data, ETH_ALEN * 2,
755 				      false, NULL, 0);
756 	if (!status)
757 		ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
758 
759 	return status;
760 }
761 
762 static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
763 			       const unsigned char *addr)
764 {
765 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
766 
767 	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
768 				    HCLGE_MBX_MAC_VLAN_UC_ADD,
769 				    addr, ETH_ALEN, false, NULL, 0);
770 }
771 
772 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
773 			      const unsigned char *addr)
774 {
775 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
776 
777 	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
778 				    HCLGE_MBX_MAC_VLAN_UC_REMOVE,
779 				    addr, ETH_ALEN, false, NULL, 0);
780 }
781 
782 static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
783 			       const unsigned char *addr)
784 {
785 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
786 
787 	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
788 				    HCLGE_MBX_MAC_VLAN_MC_ADD,
789 				    addr, ETH_ALEN, false, NULL, 0);
790 }
791 
792 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
793 			      const unsigned char *addr)
794 {
795 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
796 
797 	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
798 				    HCLGE_MBX_MAC_VLAN_MC_REMOVE,
799 				    addr, ETH_ALEN, false, NULL, 0);
800 }
801 
802 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
803 				   __be16 proto, u16 vlan_id,
804 				   bool is_kill)
805 {
806 #define HCLGEVF_VLAN_MBX_MSG_LEN 5
807 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
808 	u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
809 
810 	if (vlan_id > 4095)
811 		return -EINVAL;
812 
813 	if (proto != htons(ETH_P_8021Q))
814 		return -EPROTONOSUPPORT;
815 
816 	msg_data[0] = is_kill;
817 	memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
818 	memcpy(&msg_data[3], &proto, sizeof(proto));
819 	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
820 				    HCLGE_MBX_VLAN_FILTER, msg_data,
821 				    HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
822 }
823 
824 static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
825 {
826 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
827 	u8 msg_data[2];
828 
829 	memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
830 
831 	hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 2, false,
832 			     NULL, 0);
833 }
834 
835 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
836 {
837 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
838 
839 	return hdev->fw_version;
840 }
841 
842 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
843 {
844 	struct hclgevf_misc_vector *vector = &hdev->misc_vector;
845 
846 	vector->vector_irq = pci_irq_vector(hdev->pdev,
847 					    HCLGEVF_MISC_VECTOR_NUM);
848 	vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
849 	/* vector status always valid for Vector 0 */
850 	hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
851 	hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
852 
853 	hdev->num_msi_left -= 1;
854 	hdev->num_msi_used += 1;
855 }
856 
857 static void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
858 {
859 	if (!test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
860 		schedule_work(&hdev->mbx_service_task);
861 }
862 
863 static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
864 {
865 	if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state)  &&
866 	    !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
867 		schedule_work(&hdev->service_task);
868 }
869 
870 static void hclgevf_service_timer(struct timer_list *t)
871 {
872 	struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
873 
874 	mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
875 
876 	hclgevf_task_schedule(hdev);
877 }
878 
879 static void hclgevf_mailbox_service_task(struct work_struct *work)
880 {
881 	struct hclgevf_dev *hdev;
882 
883 	hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
884 
885 	if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
886 		return;
887 
888 	clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
889 
890 	hclgevf_mbx_handler(hdev);
891 
892 	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
893 }
894 
895 static void hclgevf_service_task(struct work_struct *work)
896 {
897 	struct hclgevf_dev *hdev;
898 
899 	hdev = container_of(work, struct hclgevf_dev, service_task);
900 
901 	/* request the link status from the PF. PF would be able to tell VF
902 	 * about such updates in future so we might remove this later
903 	 */
904 	hclgevf_request_link_info(hdev);
905 
906 	clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
907 }
908 
909 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
910 {
911 	hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
912 }
913 
914 static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
915 {
916 	u32 cmdq_src_reg;
917 
918 	/* fetch the events from their corresponding regs */
919 	cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
920 					HCLGEVF_VECTOR0_CMDQ_SRC_REG);
921 
922 	/* check for vector0 mailbox(=CMDQ RX) event source */
923 	if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
924 		cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
925 		*clearval = cmdq_src_reg;
926 		return true;
927 	}
928 
929 	dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
930 
931 	return false;
932 }
933 
934 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
935 {
936 	writel(en ? 1 : 0, vector->addr);
937 }
938 
939 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
940 {
941 	struct hclgevf_dev *hdev = data;
942 	u32 clearval;
943 
944 	hclgevf_enable_vector(&hdev->misc_vector, false);
945 	if (!hclgevf_check_event_cause(hdev, &clearval))
946 		goto skip_sched;
947 
948 	/* schedule the VF mailbox service task, if not already scheduled */
949 	hclgevf_mbx_task_schedule(hdev);
950 
951 	hclgevf_clear_event_cause(hdev, clearval);
952 
953 skip_sched:
954 	hclgevf_enable_vector(&hdev->misc_vector, true);
955 
956 	return IRQ_HANDLED;
957 }
958 
959 static int hclgevf_configure(struct hclgevf_dev *hdev)
960 {
961 	int ret;
962 
963 	/* get queue configuration from PF */
964 	ret = hclge_get_queue_info(hdev);
965 	if (ret)
966 		return ret;
967 	/* get tc configuration from PF */
968 	return hclgevf_get_tc_info(hdev);
969 }
970 
971 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
972 {
973 	struct hnae3_handle *roce = &hdev->roce;
974 	struct hnae3_handle *nic = &hdev->nic;
975 
976 	roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM;
977 
978 	if (hdev->num_msi_left < roce->rinfo.num_vectors ||
979 	    hdev->num_msi_left == 0)
980 		return -EINVAL;
981 
982 	roce->rinfo.base_vector =
983 		hdev->vector_status[hdev->num_msi_used];
984 
985 	roce->rinfo.netdev = nic->kinfo.netdev;
986 	roce->rinfo.roce_io_base = hdev->hw.io_base;
987 
988 	roce->pdev = nic->pdev;
989 	roce->ae_algo = nic->ae_algo;
990 	roce->numa_node_mask = nic->numa_node_mask;
991 
992 	return 0;
993 }
994 
995 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
996 {
997 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
998 	int i, ret;
999 
1000 	rss_cfg->rss_size = hdev->rss_size_max;
1001 
1002 	/* Initialize RSS indirect table for each vport */
1003 	for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
1004 		rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
1005 
1006 	ret = hclgevf_set_rss_indir_table(hdev);
1007 	if (ret)
1008 		return ret;
1009 
1010 	return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
1011 }
1012 
1013 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
1014 {
1015 	/* other vlan config(like, VLAN TX/RX offload) would also be added
1016 	 * here later
1017 	 */
1018 	return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
1019 				       false);
1020 }
1021 
1022 static int hclgevf_ae_start(struct hnae3_handle *handle)
1023 {
1024 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1025 	int i, queue_id;
1026 
1027 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
1028 		/* ring enable */
1029 		queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
1030 		if (queue_id < 0) {
1031 			dev_warn(&hdev->pdev->dev,
1032 				 "Get invalid queue id, ignore it\n");
1033 			continue;
1034 		}
1035 
1036 		hclgevf_tqp_enable(hdev, queue_id, 0, true);
1037 	}
1038 
1039 	/* reset tqp stats */
1040 	hclgevf_reset_tqp_stats(handle);
1041 
1042 	hclgevf_request_link_info(hdev);
1043 
1044 	clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1045 	mod_timer(&hdev->service_timer, jiffies + HZ);
1046 
1047 	return 0;
1048 }
1049 
1050 static void hclgevf_ae_stop(struct hnae3_handle *handle)
1051 {
1052 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1053 	int i, queue_id;
1054 
1055 	for (i = 0; i < hdev->num_tqps; i++) {
1056 		/* Ring disable */
1057 		queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
1058 		if (queue_id < 0) {
1059 			dev_warn(&hdev->pdev->dev,
1060 				 "Get invalid queue id, ignore it\n");
1061 			continue;
1062 		}
1063 
1064 		hclgevf_tqp_enable(hdev, queue_id, 0, false);
1065 	}
1066 
1067 	/* reset tqp stats */
1068 	hclgevf_reset_tqp_stats(handle);
1069 }
1070 
1071 static void hclgevf_state_init(struct hclgevf_dev *hdev)
1072 {
1073 	/* setup tasks for the MBX */
1074 	INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
1075 	clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1076 	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1077 
1078 	/* setup tasks for service timer */
1079 	timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
1080 
1081 	INIT_WORK(&hdev->service_task, hclgevf_service_task);
1082 	clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1083 
1084 	mutex_init(&hdev->mbx_resp.mbx_mutex);
1085 
1086 	/* bring the device down */
1087 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1088 }
1089 
1090 static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
1091 {
1092 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1093 
1094 	if (hdev->service_timer.function)
1095 		del_timer_sync(&hdev->service_timer);
1096 	if (hdev->service_task.func)
1097 		cancel_work_sync(&hdev->service_task);
1098 	if (hdev->mbx_service_task.func)
1099 		cancel_work_sync(&hdev->mbx_service_task);
1100 
1101 	mutex_destroy(&hdev->mbx_resp.mbx_mutex);
1102 }
1103 
1104 static int hclgevf_init_msi(struct hclgevf_dev *hdev)
1105 {
1106 	struct pci_dev *pdev = hdev->pdev;
1107 	int vectors;
1108 	int i;
1109 
1110 	hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM;
1111 
1112 	vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1113 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
1114 	if (vectors < 0) {
1115 		dev_err(&pdev->dev,
1116 			"failed(%d) to allocate MSI/MSI-X vectors\n",
1117 			vectors);
1118 		return vectors;
1119 	}
1120 	if (vectors < hdev->num_msi)
1121 		dev_warn(&hdev->pdev->dev,
1122 			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1123 			 hdev->num_msi, vectors);
1124 
1125 	hdev->num_msi = vectors;
1126 	hdev->num_msi_left = vectors;
1127 	hdev->base_msi_vector = pdev->irq;
1128 
1129 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1130 					   sizeof(u16), GFP_KERNEL);
1131 	if (!hdev->vector_status) {
1132 		pci_free_irq_vectors(pdev);
1133 		return -ENOMEM;
1134 	}
1135 
1136 	for (i = 0; i < hdev->num_msi; i++)
1137 		hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
1138 
1139 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1140 					sizeof(int), GFP_KERNEL);
1141 	if (!hdev->vector_irq) {
1142 		pci_free_irq_vectors(pdev);
1143 		return -ENOMEM;
1144 	}
1145 
1146 	return 0;
1147 }
1148 
1149 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
1150 {
1151 	struct pci_dev *pdev = hdev->pdev;
1152 
1153 	pci_free_irq_vectors(pdev);
1154 }
1155 
1156 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
1157 {
1158 	int ret = 0;
1159 
1160 	hclgevf_get_misc_vector(hdev);
1161 
1162 	ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
1163 			  0, "hclgevf_cmd", hdev);
1164 	if (ret) {
1165 		dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
1166 			hdev->misc_vector.vector_irq);
1167 		return ret;
1168 	}
1169 
1170 	/* enable misc. vector(vector 0) */
1171 	hclgevf_enable_vector(&hdev->misc_vector, true);
1172 
1173 	return ret;
1174 }
1175 
1176 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
1177 {
1178 	/* disable misc vector(vector 0) */
1179 	hclgevf_enable_vector(&hdev->misc_vector, false);
1180 	free_irq(hdev->misc_vector.vector_irq, hdev);
1181 	hclgevf_free_vector(hdev, 0);
1182 }
1183 
1184 static int hclgevf_init_instance(struct hclgevf_dev *hdev,
1185 				 struct hnae3_client *client)
1186 {
1187 	int ret;
1188 
1189 	switch (client->type) {
1190 	case HNAE3_CLIENT_KNIC:
1191 		hdev->nic_client = client;
1192 		hdev->nic.client = client;
1193 
1194 		ret = client->ops->init_instance(&hdev->nic);
1195 		if (ret)
1196 			return ret;
1197 
1198 		if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
1199 			struct hnae3_client *rc = hdev->roce_client;
1200 
1201 			ret = hclgevf_init_roce_base_info(hdev);
1202 			if (ret)
1203 				return ret;
1204 			ret = rc->ops->init_instance(&hdev->roce);
1205 			if (ret)
1206 				return ret;
1207 		}
1208 		break;
1209 	case HNAE3_CLIENT_UNIC:
1210 		hdev->nic_client = client;
1211 		hdev->nic.client = client;
1212 
1213 		ret = client->ops->init_instance(&hdev->nic);
1214 		if (ret)
1215 			return ret;
1216 		break;
1217 	case HNAE3_CLIENT_ROCE:
1218 		hdev->roce_client = client;
1219 		hdev->roce.client = client;
1220 
1221 		if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
1222 			ret = hclgevf_init_roce_base_info(hdev);
1223 			if (ret)
1224 				return ret;
1225 
1226 			ret = client->ops->init_instance(&hdev->roce);
1227 			if (ret)
1228 				return ret;
1229 		}
1230 	}
1231 
1232 	return 0;
1233 }
1234 
1235 static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
1236 				    struct hnae3_client *client)
1237 {
1238 	/* un-init roce, if it exists */
1239 	if (hdev->roce_client)
1240 		hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
1241 
1242 	/* un-init nic/unic, if this was not called by roce client */
1243 	if ((client->ops->uninit_instance) &&
1244 	    (client->type != HNAE3_CLIENT_ROCE))
1245 		client->ops->uninit_instance(&hdev->nic, 0);
1246 }
1247 
1248 static int hclgevf_register_client(struct hnae3_client *client,
1249 				   struct hnae3_ae_dev *ae_dev)
1250 {
1251 	struct hclgevf_dev *hdev = ae_dev->priv;
1252 
1253 	return hclgevf_init_instance(hdev, client);
1254 }
1255 
1256 static void hclgevf_unregister_client(struct hnae3_client *client,
1257 				      struct hnae3_ae_dev *ae_dev)
1258 {
1259 	struct hclgevf_dev *hdev = ae_dev->priv;
1260 
1261 	hclgevf_uninit_instance(hdev, client);
1262 }
1263 
1264 static int hclgevf_pci_init(struct hclgevf_dev *hdev)
1265 {
1266 	struct pci_dev *pdev = hdev->pdev;
1267 	struct hclgevf_hw *hw;
1268 	int ret;
1269 
1270 	ret = pci_enable_device(pdev);
1271 	if (ret) {
1272 		dev_err(&pdev->dev, "failed to enable PCI device\n");
1273 		goto err_no_drvdata;
1274 	}
1275 
1276 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1277 	if (ret) {
1278 		dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
1279 		goto err_disable_device;
1280 	}
1281 
1282 	ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
1283 	if (ret) {
1284 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
1285 		goto err_disable_device;
1286 	}
1287 
1288 	pci_set_master(pdev);
1289 	hw = &hdev->hw;
1290 	hw->hdev = hdev;
1291 	hw->io_base = pci_iomap(pdev, 2, 0);
1292 	if (!hw->io_base) {
1293 		dev_err(&pdev->dev, "can't map configuration register space\n");
1294 		ret = -ENOMEM;
1295 		goto err_clr_master;
1296 	}
1297 
1298 	return 0;
1299 
1300 err_clr_master:
1301 	pci_clear_master(pdev);
1302 	pci_release_regions(pdev);
1303 err_disable_device:
1304 	pci_disable_device(pdev);
1305 err_no_drvdata:
1306 	pci_set_drvdata(pdev, NULL);
1307 	return ret;
1308 }
1309 
1310 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
1311 {
1312 	struct pci_dev *pdev = hdev->pdev;
1313 
1314 	pci_iounmap(pdev, hdev->hw.io_base);
1315 	pci_clear_master(pdev);
1316 	pci_release_regions(pdev);
1317 	pci_disable_device(pdev);
1318 	pci_set_drvdata(pdev, NULL);
1319 }
1320 
1321 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
1322 {
1323 	struct pci_dev *pdev = ae_dev->pdev;
1324 	struct hclgevf_dev *hdev;
1325 	int ret;
1326 
1327 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
1328 	if (!hdev)
1329 		return -ENOMEM;
1330 
1331 	hdev->pdev = pdev;
1332 	hdev->ae_dev = ae_dev;
1333 	ae_dev->priv = hdev;
1334 
1335 	ret = hclgevf_pci_init(hdev);
1336 	if (ret) {
1337 		dev_err(&pdev->dev, "PCI initialization failed\n");
1338 		return ret;
1339 	}
1340 
1341 	ret = hclgevf_init_msi(hdev);
1342 	if (ret) {
1343 		dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
1344 		goto err_irq_init;
1345 	}
1346 
1347 	hclgevf_state_init(hdev);
1348 
1349 	ret = hclgevf_misc_irq_init(hdev);
1350 	if (ret) {
1351 		dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
1352 			ret);
1353 		goto err_misc_irq_init;
1354 	}
1355 
1356 	ret = hclgevf_cmd_init(hdev);
1357 	if (ret)
1358 		goto err_cmd_init;
1359 
1360 	ret = hclgevf_configure(hdev);
1361 	if (ret) {
1362 		dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
1363 		goto err_config;
1364 	}
1365 
1366 	ret = hclgevf_alloc_tqps(hdev);
1367 	if (ret) {
1368 		dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
1369 		goto err_config;
1370 	}
1371 
1372 	ret = hclgevf_set_handle_info(hdev);
1373 	if (ret) {
1374 		dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
1375 		goto err_config;
1376 	}
1377 
1378 	ret = hclgevf_enable_tso(hdev, true);
1379 	if (ret) {
1380 		dev_err(&pdev->dev, "failed(%d) to enable tso\n", ret);
1381 		goto err_config;
1382 	}
1383 
1384 	/* Initialize VF's MTA */
1385 	hdev->accept_mta_mc = true;
1386 	ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc);
1387 	if (ret) {
1388 		dev_err(&hdev->pdev->dev,
1389 			"failed(%d) to set mta filter mode\n", ret);
1390 		goto err_config;
1391 	}
1392 
1393 	/* Initialize RSS for this VF */
1394 	ret = hclgevf_rss_init_hw(hdev);
1395 	if (ret) {
1396 		dev_err(&hdev->pdev->dev,
1397 			"failed(%d) to initialize RSS\n", ret);
1398 		goto err_config;
1399 	}
1400 
1401 	ret = hclgevf_init_vlan_config(hdev);
1402 	if (ret) {
1403 		dev_err(&hdev->pdev->dev,
1404 			"failed(%d) to initialize VLAN config\n", ret);
1405 		goto err_config;
1406 	}
1407 
1408 	pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
1409 
1410 	return 0;
1411 
1412 err_config:
1413 	hclgevf_cmd_uninit(hdev);
1414 err_cmd_init:
1415 	hclgevf_misc_irq_uninit(hdev);
1416 err_misc_irq_init:
1417 	hclgevf_state_uninit(hdev);
1418 	hclgevf_uninit_msi(hdev);
1419 err_irq_init:
1420 	hclgevf_pci_uninit(hdev);
1421 	return ret;
1422 }
1423 
1424 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
1425 {
1426 	struct hclgevf_dev *hdev = ae_dev->priv;
1427 
1428 	hclgevf_cmd_uninit(hdev);
1429 	hclgevf_misc_irq_uninit(hdev);
1430 	hclgevf_state_uninit(hdev);
1431 	hclgevf_uninit_msi(hdev);
1432 	hclgevf_pci_uninit(hdev);
1433 	ae_dev->priv = NULL;
1434 }
1435 
1436 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
1437 {
1438 	struct hnae3_handle *nic = &hdev->nic;
1439 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1440 
1441 	return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
1442 }
1443 
1444 /**
1445  * hclgevf_get_channels - Get the current channels enabled and max supported.
1446  * @handle: hardware information for network interface
1447  * @ch: ethtool channels structure
1448  *
1449  * We don't support separate tx and rx queues as channels. The other count
1450  * represents how many queues are being used for control. max_combined counts
1451  * how many queue pairs we can support. They may not be mapped 1 to 1 with
1452  * q_vectors since we support a lot more queue pairs than q_vectors.
1453  **/
1454 static void hclgevf_get_channels(struct hnae3_handle *handle,
1455 				 struct ethtool_channels *ch)
1456 {
1457 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1458 
1459 	ch->max_combined = hclgevf_get_max_channels(hdev);
1460 	ch->other_count = 0;
1461 	ch->max_other = 0;
1462 	ch->combined_count = hdev->num_tqps;
1463 }
1464 
1465 static const struct hnae3_ae_ops hclgevf_ops = {
1466 	.init_ae_dev = hclgevf_init_ae_dev,
1467 	.uninit_ae_dev = hclgevf_uninit_ae_dev,
1468 	.init_client_instance = hclgevf_register_client,
1469 	.uninit_client_instance = hclgevf_unregister_client,
1470 	.start = hclgevf_ae_start,
1471 	.stop = hclgevf_ae_stop,
1472 	.map_ring_to_vector = hclgevf_map_ring_to_vector,
1473 	.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
1474 	.get_vector = hclgevf_get_vector,
1475 	.reset_queue = hclgevf_reset_tqp,
1476 	.set_promisc_mode = hclgevf_set_promisc_mode,
1477 	.get_mac_addr = hclgevf_get_mac_addr,
1478 	.set_mac_addr = hclgevf_set_mac_addr,
1479 	.add_uc_addr = hclgevf_add_uc_addr,
1480 	.rm_uc_addr = hclgevf_rm_uc_addr,
1481 	.add_mc_addr = hclgevf_add_mc_addr,
1482 	.rm_mc_addr = hclgevf_rm_mc_addr,
1483 	.get_stats = hclgevf_get_stats,
1484 	.update_stats = hclgevf_update_stats,
1485 	.get_strings = hclgevf_get_strings,
1486 	.get_sset_count = hclgevf_get_sset_count,
1487 	.get_rss_key_size = hclgevf_get_rss_key_size,
1488 	.get_rss_indir_size = hclgevf_get_rss_indir_size,
1489 	.get_rss = hclgevf_get_rss,
1490 	.set_rss = hclgevf_set_rss,
1491 	.get_tc_size = hclgevf_get_tc_size,
1492 	.get_fw_version = hclgevf_get_fw_version,
1493 	.set_vlan_filter = hclgevf_set_vlan_filter,
1494 	.get_channels = hclgevf_get_channels,
1495 };
1496 
1497 static struct hnae3_ae_algo ae_algovf = {
1498 	.ops = &hclgevf_ops,
1499 	.name = HCLGEVF_NAME,
1500 	.pdev_id_table = ae_algovf_pci_tbl,
1501 };
1502 
1503 static int hclgevf_init(void)
1504 {
1505 	pr_info("%s is initializing\n", HCLGEVF_NAME);
1506 
1507 	return hnae3_register_ae_algo(&ae_algovf);
1508 }
1509 
1510 static void hclgevf_exit(void)
1511 {
1512 	hnae3_unregister_ae_algo(&ae_algovf);
1513 }
1514 module_init(hclgevf_init);
1515 module_exit(hclgevf_exit);
1516 
1517 MODULE_LICENSE("GPL");
1518 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
1519 MODULE_DESCRIPTION("HCLGEVF Driver");
1520 MODULE_VERSION(HCLGEVF_MOD_VERSION);
1521