1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <net/rtnetlink.h>
16 #include "hclge_cmd.h"
17 #include "hclge_dcb.h"
18 #include "hclge_main.h"
19 #include "hclge_mbx.h"
20 #include "hclge_mdio.h"
21 #include "hclge_tm.h"
22 #include "hclge_err.h"
23 #include "hnae3.h"
24 
25 #define HCLGE_NAME			"hclge"
26 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
27 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
28 
29 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
30 static int hclge_init_vlan_config(struct hclge_dev *hdev);
31 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
32 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
33 			       u16 *allocated_size, bool is_alloc);
34 
35 static struct hnae3_ae_algo ae_algo;
36 
37 static const struct pci_device_id ae_algo_pci_tbl[] = {
38 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
39 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
40 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
41 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
42 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
43 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
44 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
45 	/* required last entry */
46 	{0, }
47 };
48 
49 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
50 
51 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
52 	"App    Loopback test",
53 	"Serdes serial Loopback test",
54 	"Serdes parallel Loopback test",
55 	"Phy    Loopback test"
56 };
57 
58 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
59 	{"mac_tx_mac_pause_num",
60 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
61 	{"mac_rx_mac_pause_num",
62 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
63 	{"mac_tx_pfc_pri0_pkt_num",
64 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
65 	{"mac_tx_pfc_pri1_pkt_num",
66 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
67 	{"mac_tx_pfc_pri2_pkt_num",
68 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
69 	{"mac_tx_pfc_pri3_pkt_num",
70 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
71 	{"mac_tx_pfc_pri4_pkt_num",
72 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
73 	{"mac_tx_pfc_pri5_pkt_num",
74 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
75 	{"mac_tx_pfc_pri6_pkt_num",
76 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
77 	{"mac_tx_pfc_pri7_pkt_num",
78 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
79 	{"mac_rx_pfc_pri0_pkt_num",
80 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
81 	{"mac_rx_pfc_pri1_pkt_num",
82 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
83 	{"mac_rx_pfc_pri2_pkt_num",
84 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
85 	{"mac_rx_pfc_pri3_pkt_num",
86 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
87 	{"mac_rx_pfc_pri4_pkt_num",
88 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
89 	{"mac_rx_pfc_pri5_pkt_num",
90 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
91 	{"mac_rx_pfc_pri6_pkt_num",
92 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
93 	{"mac_rx_pfc_pri7_pkt_num",
94 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
95 	{"mac_tx_total_pkt_num",
96 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
97 	{"mac_tx_total_oct_num",
98 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
99 	{"mac_tx_good_pkt_num",
100 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
101 	{"mac_tx_bad_pkt_num",
102 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
103 	{"mac_tx_good_oct_num",
104 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
105 	{"mac_tx_bad_oct_num",
106 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
107 	{"mac_tx_uni_pkt_num",
108 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
109 	{"mac_tx_multi_pkt_num",
110 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
111 	{"mac_tx_broad_pkt_num",
112 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
113 	{"mac_tx_undersize_pkt_num",
114 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
115 	{"mac_tx_oversize_pkt_num",
116 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
117 	{"mac_tx_64_oct_pkt_num",
118 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
119 	{"mac_tx_65_127_oct_pkt_num",
120 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
121 	{"mac_tx_128_255_oct_pkt_num",
122 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
123 	{"mac_tx_256_511_oct_pkt_num",
124 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
125 	{"mac_tx_512_1023_oct_pkt_num",
126 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
127 	{"mac_tx_1024_1518_oct_pkt_num",
128 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
129 	{"mac_tx_1519_2047_oct_pkt_num",
130 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
131 	{"mac_tx_2048_4095_oct_pkt_num",
132 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
133 	{"mac_tx_4096_8191_oct_pkt_num",
134 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
135 	{"mac_tx_8192_9216_oct_pkt_num",
136 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
137 	{"mac_tx_9217_12287_oct_pkt_num",
138 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
139 	{"mac_tx_12288_16383_oct_pkt_num",
140 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
141 	{"mac_tx_1519_max_good_pkt_num",
142 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
143 	{"mac_tx_1519_max_bad_pkt_num",
144 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
145 	{"mac_rx_total_pkt_num",
146 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
147 	{"mac_rx_total_oct_num",
148 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
149 	{"mac_rx_good_pkt_num",
150 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
151 	{"mac_rx_bad_pkt_num",
152 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
153 	{"mac_rx_good_oct_num",
154 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
155 	{"mac_rx_bad_oct_num",
156 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
157 	{"mac_rx_uni_pkt_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
159 	{"mac_rx_multi_pkt_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
161 	{"mac_rx_broad_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
163 	{"mac_rx_undersize_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
165 	{"mac_rx_oversize_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
167 	{"mac_rx_64_oct_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
169 	{"mac_rx_65_127_oct_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
171 	{"mac_rx_128_255_oct_pkt_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
173 	{"mac_rx_256_511_oct_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
175 	{"mac_rx_512_1023_oct_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
177 	{"mac_rx_1024_1518_oct_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
179 	{"mac_rx_1519_2047_oct_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
181 	{"mac_rx_2048_4095_oct_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
183 	{"mac_rx_4096_8191_oct_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
185 	{"mac_rx_8192_9216_oct_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
187 	{"mac_rx_9217_12287_oct_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
189 	{"mac_rx_12288_16383_oct_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
191 	{"mac_rx_1519_max_good_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
193 	{"mac_rx_1519_max_bad_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
195 
196 	{"mac_tx_fragment_pkt_num",
197 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
198 	{"mac_tx_undermin_pkt_num",
199 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
200 	{"mac_tx_jabber_pkt_num",
201 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
202 	{"mac_tx_err_all_pkt_num",
203 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
204 	{"mac_tx_from_app_good_pkt_num",
205 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
206 	{"mac_tx_from_app_bad_pkt_num",
207 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
208 	{"mac_rx_fragment_pkt_num",
209 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
210 	{"mac_rx_undermin_pkt_num",
211 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
212 	{"mac_rx_jabber_pkt_num",
213 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
214 	{"mac_rx_fcs_err_pkt_num",
215 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
216 	{"mac_rx_send_app_good_pkt_num",
217 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
218 	{"mac_rx_send_app_bad_pkt_num",
219 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
220 };
221 
222 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
223 	{
224 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
225 		.ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
226 		.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
227 		.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
228 		.i_port_bitmap = 0x1,
229 	},
230 };
231 
232 static int hclge_mac_update_stats(struct hclge_dev *hdev)
233 {
234 #define HCLGE_MAC_CMD_NUM 21
235 #define HCLGE_RTN_DATA_NUM 4
236 
237 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
238 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
239 	__le64 *desc_data;
240 	int i, k, n;
241 	int ret;
242 
243 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
244 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
245 	if (ret) {
246 		dev_err(&hdev->pdev->dev,
247 			"Get MAC pkt stats fail, status = %d.\n", ret);
248 
249 		return ret;
250 	}
251 
252 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
253 		if (unlikely(i == 0)) {
254 			desc_data = (__le64 *)(&desc[i].data[0]);
255 			n = HCLGE_RTN_DATA_NUM - 2;
256 		} else {
257 			desc_data = (__le64 *)(&desc[i]);
258 			n = HCLGE_RTN_DATA_NUM;
259 		}
260 		for (k = 0; k < n; k++) {
261 			*data++ += le64_to_cpu(*desc_data);
262 			desc_data++;
263 		}
264 	}
265 
266 	return 0;
267 }
268 
269 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
270 {
271 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
272 	struct hclge_vport *vport = hclge_get_vport(handle);
273 	struct hclge_dev *hdev = vport->back;
274 	struct hnae3_queue *queue;
275 	struct hclge_desc desc[1];
276 	struct hclge_tqp *tqp;
277 	int ret, i;
278 
279 	for (i = 0; i < kinfo->num_tqps; i++) {
280 		queue = handle->kinfo.tqp[i];
281 		tqp = container_of(queue, struct hclge_tqp, q);
282 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
283 		hclge_cmd_setup_basic_desc(&desc[0],
284 					   HCLGE_OPC_QUERY_RX_STATUS,
285 					   true);
286 
287 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
288 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
289 		if (ret) {
290 			dev_err(&hdev->pdev->dev,
291 				"Query tqp stat fail, status = %d,queue = %d\n",
292 				ret,	i);
293 			return ret;
294 		}
295 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
296 			le32_to_cpu(desc[0].data[1]);
297 	}
298 
299 	for (i = 0; i < kinfo->num_tqps; i++) {
300 		queue = handle->kinfo.tqp[i];
301 		tqp = container_of(queue, struct hclge_tqp, q);
302 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
303 		hclge_cmd_setup_basic_desc(&desc[0],
304 					   HCLGE_OPC_QUERY_TX_STATUS,
305 					   true);
306 
307 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
308 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
309 		if (ret) {
310 			dev_err(&hdev->pdev->dev,
311 				"Query tqp stat fail, status = %d,queue = %d\n",
312 				ret, i);
313 			return ret;
314 		}
315 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
316 			le32_to_cpu(desc[0].data[1]);
317 	}
318 
319 	return 0;
320 }
321 
322 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
323 {
324 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
325 	struct hclge_tqp *tqp;
326 	u64 *buff = data;
327 	int i;
328 
329 	for (i = 0; i < kinfo->num_tqps; i++) {
330 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
331 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
332 	}
333 
334 	for (i = 0; i < kinfo->num_tqps; i++) {
335 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
336 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
337 	}
338 
339 	return buff;
340 }
341 
342 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
343 {
344 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
345 
346 	return kinfo->num_tqps * (2);
347 }
348 
349 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
350 {
351 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
352 	u8 *buff = data;
353 	int i = 0;
354 
355 	for (i = 0; i < kinfo->num_tqps; i++) {
356 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
357 			struct hclge_tqp, q);
358 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
359 			 tqp->index);
360 		buff = buff + ETH_GSTRING_LEN;
361 	}
362 
363 	for (i = 0; i < kinfo->num_tqps; i++) {
364 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
365 			struct hclge_tqp, q);
366 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
367 			 tqp->index);
368 		buff = buff + ETH_GSTRING_LEN;
369 	}
370 
371 	return buff;
372 }
373 
374 static u64 *hclge_comm_get_stats(void *comm_stats,
375 				 const struct hclge_comm_stats_str strs[],
376 				 int size, u64 *data)
377 {
378 	u64 *buf = data;
379 	u32 i;
380 
381 	for (i = 0; i < size; i++)
382 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
383 
384 	return buf + size;
385 }
386 
387 static u8 *hclge_comm_get_strings(u32 stringset,
388 				  const struct hclge_comm_stats_str strs[],
389 				  int size, u8 *data)
390 {
391 	char *buff = (char *)data;
392 	u32 i;
393 
394 	if (stringset != ETH_SS_STATS)
395 		return buff;
396 
397 	for (i = 0; i < size; i++) {
398 		snprintf(buff, ETH_GSTRING_LEN,
399 			 strs[i].desc);
400 		buff = buff + ETH_GSTRING_LEN;
401 	}
402 
403 	return (u8 *)buff;
404 }
405 
406 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
407 				 struct net_device_stats *net_stats)
408 {
409 	net_stats->tx_dropped = 0;
410 	net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
411 	net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
412 	net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
413 
414 	net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
415 	net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
416 
417 	net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
418 	net_stats->rx_length_errors =
419 		hw_stats->mac_stats.mac_rx_undersize_pkt_num;
420 	net_stats->rx_length_errors +=
421 		hw_stats->mac_stats.mac_rx_oversize_pkt_num;
422 	net_stats->rx_over_errors =
423 		hw_stats->mac_stats.mac_rx_oversize_pkt_num;
424 }
425 
426 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
427 {
428 	struct hnae3_handle *handle;
429 	int status;
430 
431 	handle = &hdev->vport[0].nic;
432 	if (handle->client) {
433 		status = hclge_tqps_update_stats(handle);
434 		if (status) {
435 			dev_err(&hdev->pdev->dev,
436 				"Update TQPS stats fail, status = %d.\n",
437 				status);
438 		}
439 	}
440 
441 	status = hclge_mac_update_stats(hdev);
442 	if (status)
443 		dev_err(&hdev->pdev->dev,
444 			"Update MAC stats fail, status = %d.\n", status);
445 
446 	hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
447 }
448 
449 static void hclge_update_stats(struct hnae3_handle *handle,
450 			       struct net_device_stats *net_stats)
451 {
452 	struct hclge_vport *vport = hclge_get_vport(handle);
453 	struct hclge_dev *hdev = vport->back;
454 	struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
455 	int status;
456 
457 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
458 		return;
459 
460 	status = hclge_mac_update_stats(hdev);
461 	if (status)
462 		dev_err(&hdev->pdev->dev,
463 			"Update MAC stats fail, status = %d.\n",
464 			status);
465 
466 	status = hclge_tqps_update_stats(handle);
467 	if (status)
468 		dev_err(&hdev->pdev->dev,
469 			"Update TQPS stats fail, status = %d.\n",
470 			status);
471 
472 	hclge_update_netstat(hw_stats, net_stats);
473 
474 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
475 }
476 
477 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
478 {
479 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
480 		HNAE3_SUPPORT_PHY_LOOPBACK |\
481 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
482 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
483 
484 	struct hclge_vport *vport = hclge_get_vport(handle);
485 	struct hclge_dev *hdev = vport->back;
486 	int count = 0;
487 
488 	/* Loopback test support rules:
489 	 * mac: only GE mode support
490 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
491 	 * phy: only support when phy device exist on board
492 	 */
493 	if (stringset == ETH_SS_TEST) {
494 		/* clear loopback bit flags at first */
495 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
496 		if (hdev->pdev->revision >= 0x21 ||
497 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
498 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
499 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
500 			count += 1;
501 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
502 		}
503 
504 		count += 2;
505 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
506 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
507 	} else if (stringset == ETH_SS_STATS) {
508 		count = ARRAY_SIZE(g_mac_stats_string) +
509 			hclge_tqps_get_sset_count(handle, stringset);
510 	}
511 
512 	return count;
513 }
514 
515 static void hclge_get_strings(struct hnae3_handle *handle,
516 			      u32 stringset,
517 			      u8 *data)
518 {
519 	u8 *p = (char *)data;
520 	int size;
521 
522 	if (stringset == ETH_SS_STATS) {
523 		size = ARRAY_SIZE(g_mac_stats_string);
524 		p = hclge_comm_get_strings(stringset,
525 					   g_mac_stats_string,
526 					   size,
527 					   p);
528 		p = hclge_tqps_get_strings(handle, p);
529 	} else if (stringset == ETH_SS_TEST) {
530 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
531 			memcpy(p,
532 			       hns3_nic_test_strs[HNAE3_LOOP_APP],
533 			       ETH_GSTRING_LEN);
534 			p += ETH_GSTRING_LEN;
535 		}
536 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
537 			memcpy(p,
538 			       hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
539 			       ETH_GSTRING_LEN);
540 			p += ETH_GSTRING_LEN;
541 		}
542 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
543 			memcpy(p,
544 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
545 			       ETH_GSTRING_LEN);
546 			p += ETH_GSTRING_LEN;
547 		}
548 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
549 			memcpy(p,
550 			       hns3_nic_test_strs[HNAE3_LOOP_PHY],
551 			       ETH_GSTRING_LEN);
552 			p += ETH_GSTRING_LEN;
553 		}
554 	}
555 }
556 
557 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
558 {
559 	struct hclge_vport *vport = hclge_get_vport(handle);
560 	struct hclge_dev *hdev = vport->back;
561 	u64 *p;
562 
563 	p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
564 				 g_mac_stats_string,
565 				 ARRAY_SIZE(g_mac_stats_string),
566 				 data);
567 	p = hclge_tqps_get_stats(handle, p);
568 }
569 
570 static int hclge_parse_func_status(struct hclge_dev *hdev,
571 				   struct hclge_func_status_cmd *status)
572 {
573 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
574 		return -EINVAL;
575 
576 	/* Set the pf to main pf */
577 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
578 		hdev->flag |= HCLGE_FLAG_MAIN;
579 	else
580 		hdev->flag &= ~HCLGE_FLAG_MAIN;
581 
582 	return 0;
583 }
584 
585 static int hclge_query_function_status(struct hclge_dev *hdev)
586 {
587 	struct hclge_func_status_cmd *req;
588 	struct hclge_desc desc;
589 	int timeout = 0;
590 	int ret;
591 
592 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
593 	req = (struct hclge_func_status_cmd *)desc.data;
594 
595 	do {
596 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
597 		if (ret) {
598 			dev_err(&hdev->pdev->dev,
599 				"query function status failed %d.\n",
600 				ret);
601 
602 			return ret;
603 		}
604 
605 		/* Check pf reset is done */
606 		if (req->pf_state)
607 			break;
608 		usleep_range(1000, 2000);
609 	} while (timeout++ < 5);
610 
611 	ret = hclge_parse_func_status(hdev, req);
612 
613 	return ret;
614 }
615 
616 static int hclge_query_pf_resource(struct hclge_dev *hdev)
617 {
618 	struct hclge_pf_res_cmd *req;
619 	struct hclge_desc desc;
620 	int ret;
621 
622 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
623 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
624 	if (ret) {
625 		dev_err(&hdev->pdev->dev,
626 			"query pf resource failed %d.\n", ret);
627 		return ret;
628 	}
629 
630 	req = (struct hclge_pf_res_cmd *)desc.data;
631 	hdev->num_tqps = __le16_to_cpu(req->tqp_num);
632 	hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
633 
634 	if (hnae3_dev_roce_supported(hdev)) {
635 		hdev->roce_base_msix_offset =
636 		hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
637 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
638 		hdev->num_roce_msi =
639 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
640 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
641 
642 		/* PF should have NIC vectors and Roce vectors,
643 		 * NIC vectors are queued before Roce vectors.
644 		 */
645 		hdev->num_msi = hdev->num_roce_msi  +
646 				hdev->roce_base_msix_offset;
647 	} else {
648 		hdev->num_msi =
649 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
650 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
651 	}
652 
653 	return 0;
654 }
655 
656 static int hclge_parse_speed(int speed_cmd, int *speed)
657 {
658 	switch (speed_cmd) {
659 	case 6:
660 		*speed = HCLGE_MAC_SPEED_10M;
661 		break;
662 	case 7:
663 		*speed = HCLGE_MAC_SPEED_100M;
664 		break;
665 	case 0:
666 		*speed = HCLGE_MAC_SPEED_1G;
667 		break;
668 	case 1:
669 		*speed = HCLGE_MAC_SPEED_10G;
670 		break;
671 	case 2:
672 		*speed = HCLGE_MAC_SPEED_25G;
673 		break;
674 	case 3:
675 		*speed = HCLGE_MAC_SPEED_40G;
676 		break;
677 	case 4:
678 		*speed = HCLGE_MAC_SPEED_50G;
679 		break;
680 	case 5:
681 		*speed = HCLGE_MAC_SPEED_100G;
682 		break;
683 	default:
684 		return -EINVAL;
685 	}
686 
687 	return 0;
688 }
689 
690 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
691 					u8 speed_ability)
692 {
693 	unsigned long *supported = hdev->hw.mac.supported;
694 
695 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
696 		set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
697 			supported);
698 
699 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
700 		set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
701 			supported);
702 
703 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
704 		set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
705 			supported);
706 
707 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
708 		set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
709 			supported);
710 
711 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
712 		set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
713 			supported);
714 
715 	set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
716 	set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
717 }
718 
719 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
720 {
721 	u8 media_type = hdev->hw.mac.media_type;
722 
723 	if (media_type != HNAE3_MEDIA_TYPE_FIBER)
724 		return;
725 
726 	hclge_parse_fiber_link_mode(hdev, speed_ability);
727 }
728 
729 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
730 {
731 	struct hclge_cfg_param_cmd *req;
732 	u64 mac_addr_tmp_high;
733 	u64 mac_addr_tmp;
734 	int i;
735 
736 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
737 
738 	/* get the configuration */
739 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
740 					      HCLGE_CFG_VMDQ_M,
741 					      HCLGE_CFG_VMDQ_S);
742 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
743 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
744 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
745 					    HCLGE_CFG_TQP_DESC_N_M,
746 					    HCLGE_CFG_TQP_DESC_N_S);
747 
748 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
749 					HCLGE_CFG_PHY_ADDR_M,
750 					HCLGE_CFG_PHY_ADDR_S);
751 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
752 					  HCLGE_CFG_MEDIA_TP_M,
753 					  HCLGE_CFG_MEDIA_TP_S);
754 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
755 					  HCLGE_CFG_RX_BUF_LEN_M,
756 					  HCLGE_CFG_RX_BUF_LEN_S);
757 	/* get mac_address */
758 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
759 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
760 					    HCLGE_CFG_MAC_ADDR_H_M,
761 					    HCLGE_CFG_MAC_ADDR_H_S);
762 
763 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
764 
765 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
766 					     HCLGE_CFG_DEFAULT_SPEED_M,
767 					     HCLGE_CFG_DEFAULT_SPEED_S);
768 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
769 					    HCLGE_CFG_RSS_SIZE_M,
770 					    HCLGE_CFG_RSS_SIZE_S);
771 
772 	for (i = 0; i < ETH_ALEN; i++)
773 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
774 
775 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
776 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
777 
778 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
779 					     HCLGE_CFG_SPEED_ABILITY_M,
780 					     HCLGE_CFG_SPEED_ABILITY_S);
781 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
782 					 HCLGE_CFG_UMV_TBL_SPACE_M,
783 					 HCLGE_CFG_UMV_TBL_SPACE_S);
784 	if (!cfg->umv_space)
785 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
786 }
787 
788 /* hclge_get_cfg: query the static parameter from flash
789  * @hdev: pointer to struct hclge_dev
790  * @hcfg: the config structure to be getted
791  */
792 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
793 {
794 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
795 	struct hclge_cfg_param_cmd *req;
796 	int i, ret;
797 
798 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
799 		u32 offset = 0;
800 
801 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
802 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
803 					   true);
804 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
805 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
806 		/* Len should be united by 4 bytes when send to hardware */
807 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
808 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
809 		req->offset = cpu_to_le32(offset);
810 	}
811 
812 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
813 	if (ret) {
814 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
815 		return ret;
816 	}
817 
818 	hclge_parse_cfg(hcfg, desc);
819 
820 	return 0;
821 }
822 
823 static int hclge_get_cap(struct hclge_dev *hdev)
824 {
825 	int ret;
826 
827 	ret = hclge_query_function_status(hdev);
828 	if (ret) {
829 		dev_err(&hdev->pdev->dev,
830 			"query function status error %d.\n", ret);
831 		return ret;
832 	}
833 
834 	/* get pf resource */
835 	ret = hclge_query_pf_resource(hdev);
836 	if (ret)
837 		dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
838 
839 	return ret;
840 }
841 
842 static int hclge_configure(struct hclge_dev *hdev)
843 {
844 	struct hclge_cfg cfg;
845 	int ret, i;
846 
847 	ret = hclge_get_cfg(hdev, &cfg);
848 	if (ret) {
849 		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
850 		return ret;
851 	}
852 
853 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
854 	hdev->base_tqp_pid = 0;
855 	hdev->rss_size_max = cfg.rss_size_max;
856 	hdev->rx_buf_len = cfg.rx_buf_len;
857 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
858 	hdev->hw.mac.media_type = cfg.media_type;
859 	hdev->hw.mac.phy_addr = cfg.phy_addr;
860 	hdev->num_desc = cfg.tqp_desc_num;
861 	hdev->tm_info.num_pg = 1;
862 	hdev->tc_max = cfg.tc_num;
863 	hdev->tm_info.hw_pfc_map = 0;
864 	hdev->wanted_umv_size = cfg.umv_space;
865 
866 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
867 	if (ret) {
868 		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
869 		return ret;
870 	}
871 
872 	hclge_parse_link_mode(hdev, cfg.speed_ability);
873 
874 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
875 	    (hdev->tc_max < 1)) {
876 		dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
877 			 hdev->tc_max);
878 		hdev->tc_max = 1;
879 	}
880 
881 	/* Dev does not support DCB */
882 	if (!hnae3_dev_dcb_supported(hdev)) {
883 		hdev->tc_max = 1;
884 		hdev->pfc_max = 0;
885 	} else {
886 		hdev->pfc_max = hdev->tc_max;
887 	}
888 
889 	hdev->tm_info.num_tc = hdev->tc_max;
890 
891 	/* Currently not support uncontiuous tc */
892 	for (i = 0; i < hdev->tm_info.num_tc; i++)
893 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
894 
895 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
896 
897 	return ret;
898 }
899 
900 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
901 			    int tso_mss_max)
902 {
903 	struct hclge_cfg_tso_status_cmd *req;
904 	struct hclge_desc desc;
905 	u16 tso_mss;
906 
907 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
908 
909 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
910 
911 	tso_mss = 0;
912 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
913 			HCLGE_TSO_MSS_MIN_S, tso_mss_min);
914 	req->tso_mss_min = cpu_to_le16(tso_mss);
915 
916 	tso_mss = 0;
917 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
918 			HCLGE_TSO_MSS_MIN_S, tso_mss_max);
919 	req->tso_mss_max = cpu_to_le16(tso_mss);
920 
921 	return hclge_cmd_send(&hdev->hw, &desc, 1);
922 }
923 
924 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
925 {
926 	struct hclge_cfg_gro_status_cmd *req;
927 	struct hclge_desc desc;
928 	int ret;
929 
930 	if (!hnae3_dev_gro_supported(hdev))
931 		return 0;
932 
933 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
934 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
935 
936 	req->gro_en = cpu_to_le16(en ? 1 : 0);
937 
938 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
939 	if (ret)
940 		dev_err(&hdev->pdev->dev,
941 			"GRO hardware config cmd failed, ret = %d\n", ret);
942 
943 	return ret;
944 }
945 
946 static int hclge_alloc_tqps(struct hclge_dev *hdev)
947 {
948 	struct hclge_tqp *tqp;
949 	int i;
950 
951 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
952 				  sizeof(struct hclge_tqp), GFP_KERNEL);
953 	if (!hdev->htqp)
954 		return -ENOMEM;
955 
956 	tqp = hdev->htqp;
957 
958 	for (i = 0; i < hdev->num_tqps; i++) {
959 		tqp->dev = &hdev->pdev->dev;
960 		tqp->index = i;
961 
962 		tqp->q.ae_algo = &ae_algo;
963 		tqp->q.buf_size = hdev->rx_buf_len;
964 		tqp->q.desc_num = hdev->num_desc;
965 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
966 			i * HCLGE_TQP_REG_SIZE;
967 
968 		tqp++;
969 	}
970 
971 	return 0;
972 }
973 
974 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
975 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
976 {
977 	struct hclge_tqp_map_cmd *req;
978 	struct hclge_desc desc;
979 	int ret;
980 
981 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
982 
983 	req = (struct hclge_tqp_map_cmd *)desc.data;
984 	req->tqp_id = cpu_to_le16(tqp_pid);
985 	req->tqp_vf = func_id;
986 	req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
987 			1 << HCLGE_TQP_MAP_EN_B;
988 	req->tqp_vid = cpu_to_le16(tqp_vid);
989 
990 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
991 	if (ret)
992 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
993 
994 	return ret;
995 }
996 
997 static int  hclge_assign_tqp(struct hclge_vport *vport)
998 {
999 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1000 	struct hclge_dev *hdev = vport->back;
1001 	int i, alloced;
1002 
1003 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1004 	     alloced < kinfo->num_tqps; i++) {
1005 		if (!hdev->htqp[i].alloced) {
1006 			hdev->htqp[i].q.handle = &vport->nic;
1007 			hdev->htqp[i].q.tqp_index = alloced;
1008 			hdev->htqp[i].q.desc_num = kinfo->num_desc;
1009 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1010 			hdev->htqp[i].alloced = true;
1011 			alloced++;
1012 		}
1013 	}
1014 	vport->alloc_tqps = kinfo->num_tqps;
1015 
1016 	return 0;
1017 }
1018 
1019 static int hclge_knic_setup(struct hclge_vport *vport,
1020 			    u16 num_tqps, u16 num_desc)
1021 {
1022 	struct hnae3_handle *nic = &vport->nic;
1023 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1024 	struct hclge_dev *hdev = vport->back;
1025 	int i, ret;
1026 
1027 	kinfo->num_desc = num_desc;
1028 	kinfo->rx_buf_len = hdev->rx_buf_len;
1029 	kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1030 	kinfo->rss_size
1031 		= min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1032 	kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1033 
1034 	for (i = 0; i < HNAE3_MAX_TC; i++) {
1035 		if (hdev->hw_tc_map & BIT(i)) {
1036 			kinfo->tc_info[i].enable = true;
1037 			kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1038 			kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1039 			kinfo->tc_info[i].tc = i;
1040 		} else {
1041 			/* Set to default queue if TC is disable */
1042 			kinfo->tc_info[i].enable = false;
1043 			kinfo->tc_info[i].tqp_offset = 0;
1044 			kinfo->tc_info[i].tqp_count = 1;
1045 			kinfo->tc_info[i].tc = 0;
1046 		}
1047 	}
1048 
1049 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1050 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1051 	if (!kinfo->tqp)
1052 		return -ENOMEM;
1053 
1054 	ret = hclge_assign_tqp(vport);
1055 	if (ret)
1056 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1057 
1058 	return ret;
1059 }
1060 
1061 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1062 				  struct hclge_vport *vport)
1063 {
1064 	struct hnae3_handle *nic = &vport->nic;
1065 	struct hnae3_knic_private_info *kinfo;
1066 	u16 i;
1067 
1068 	kinfo = &nic->kinfo;
1069 	for (i = 0; i < kinfo->num_tqps; i++) {
1070 		struct hclge_tqp *q =
1071 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1072 		bool is_pf;
1073 		int ret;
1074 
1075 		is_pf = !(vport->vport_id);
1076 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1077 					     i, is_pf);
1078 		if (ret)
1079 			return ret;
1080 	}
1081 
1082 	return 0;
1083 }
1084 
1085 static int hclge_map_tqp(struct hclge_dev *hdev)
1086 {
1087 	struct hclge_vport *vport = hdev->vport;
1088 	u16 i, num_vport;
1089 
1090 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1091 	for (i = 0; i < num_vport; i++)	{
1092 		int ret;
1093 
1094 		ret = hclge_map_tqp_to_vport(hdev, vport);
1095 		if (ret)
1096 			return ret;
1097 
1098 		vport++;
1099 	}
1100 
1101 	return 0;
1102 }
1103 
1104 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1105 {
1106 	/* this would be initialized later */
1107 }
1108 
1109 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1110 {
1111 	struct hnae3_handle *nic = &vport->nic;
1112 	struct hclge_dev *hdev = vport->back;
1113 	int ret;
1114 
1115 	nic->pdev = hdev->pdev;
1116 	nic->ae_algo = &ae_algo;
1117 	nic->numa_node_mask = hdev->numa_node_mask;
1118 
1119 	if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1120 		ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
1121 		if (ret) {
1122 			dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1123 				ret);
1124 			return ret;
1125 		}
1126 	} else {
1127 		hclge_unic_setup(vport, num_tqps);
1128 	}
1129 
1130 	return 0;
1131 }
1132 
1133 static int hclge_alloc_vport(struct hclge_dev *hdev)
1134 {
1135 	struct pci_dev *pdev = hdev->pdev;
1136 	struct hclge_vport *vport;
1137 	u32 tqp_main_vport;
1138 	u32 tqp_per_vport;
1139 	int num_vport, i;
1140 	int ret;
1141 
1142 	/* We need to alloc a vport for main NIC of PF */
1143 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1144 
1145 	if (hdev->num_tqps < num_vport) {
1146 		dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1147 			hdev->num_tqps, num_vport);
1148 		return -EINVAL;
1149 	}
1150 
1151 	/* Alloc the same number of TQPs for every vport */
1152 	tqp_per_vport = hdev->num_tqps / num_vport;
1153 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1154 
1155 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1156 			     GFP_KERNEL);
1157 	if (!vport)
1158 		return -ENOMEM;
1159 
1160 	hdev->vport = vport;
1161 	hdev->num_alloc_vport = num_vport;
1162 
1163 	if (IS_ENABLED(CONFIG_PCI_IOV))
1164 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1165 
1166 	for (i = 0; i < num_vport; i++) {
1167 		vport->back = hdev;
1168 		vport->vport_id = i;
1169 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1170 
1171 		if (i == 0)
1172 			ret = hclge_vport_setup(vport, tqp_main_vport);
1173 		else
1174 			ret = hclge_vport_setup(vport, tqp_per_vport);
1175 		if (ret) {
1176 			dev_err(&pdev->dev,
1177 				"vport setup failed for vport %d, %d\n",
1178 				i, ret);
1179 			return ret;
1180 		}
1181 
1182 		vport++;
1183 	}
1184 
1185 	return 0;
1186 }
1187 
1188 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1189 				    struct hclge_pkt_buf_alloc *buf_alloc)
1190 {
1191 /* TX buffer size is unit by 128 byte */
1192 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1193 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1194 	struct hclge_tx_buff_alloc_cmd *req;
1195 	struct hclge_desc desc;
1196 	int ret;
1197 	u8 i;
1198 
1199 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1200 
1201 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1202 	for (i = 0; i < HCLGE_TC_NUM; i++) {
1203 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1204 
1205 		req->tx_pkt_buff[i] =
1206 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1207 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1208 	}
1209 
1210 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1211 	if (ret)
1212 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1213 			ret);
1214 
1215 	return ret;
1216 }
1217 
1218 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1219 				 struct hclge_pkt_buf_alloc *buf_alloc)
1220 {
1221 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1222 
1223 	if (ret)
1224 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1225 
1226 	return ret;
1227 }
1228 
1229 static int hclge_get_tc_num(struct hclge_dev *hdev)
1230 {
1231 	int i, cnt = 0;
1232 
1233 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1234 		if (hdev->hw_tc_map & BIT(i))
1235 			cnt++;
1236 	return cnt;
1237 }
1238 
1239 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1240 {
1241 	int i, cnt = 0;
1242 
1243 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1244 		if (hdev->hw_tc_map & BIT(i) &&
1245 		    hdev->tm_info.hw_pfc_map & BIT(i))
1246 			cnt++;
1247 	return cnt;
1248 }
1249 
1250 /* Get the number of pfc enabled TCs, which have private buffer */
1251 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1252 				  struct hclge_pkt_buf_alloc *buf_alloc)
1253 {
1254 	struct hclge_priv_buf *priv;
1255 	int i, cnt = 0;
1256 
1257 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1258 		priv = &buf_alloc->priv_buf[i];
1259 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1260 		    priv->enable)
1261 			cnt++;
1262 	}
1263 
1264 	return cnt;
1265 }
1266 
1267 /* Get the number of pfc disabled TCs, which have private buffer */
1268 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1269 				     struct hclge_pkt_buf_alloc *buf_alloc)
1270 {
1271 	struct hclge_priv_buf *priv;
1272 	int i, cnt = 0;
1273 
1274 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1275 		priv = &buf_alloc->priv_buf[i];
1276 		if (hdev->hw_tc_map & BIT(i) &&
1277 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1278 		    priv->enable)
1279 			cnt++;
1280 	}
1281 
1282 	return cnt;
1283 }
1284 
1285 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1286 {
1287 	struct hclge_priv_buf *priv;
1288 	u32 rx_priv = 0;
1289 	int i;
1290 
1291 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1292 		priv = &buf_alloc->priv_buf[i];
1293 		if (priv->enable)
1294 			rx_priv += priv->buf_size;
1295 	}
1296 	return rx_priv;
1297 }
1298 
1299 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1300 {
1301 	u32 i, total_tx_size = 0;
1302 
1303 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1304 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1305 
1306 	return total_tx_size;
1307 }
1308 
1309 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1310 				struct hclge_pkt_buf_alloc *buf_alloc,
1311 				u32 rx_all)
1312 {
1313 	u32 shared_buf_min, shared_buf_tc, shared_std;
1314 	int tc_num, pfc_enable_num;
1315 	u32 shared_buf;
1316 	u32 rx_priv;
1317 	int i;
1318 
1319 	tc_num = hclge_get_tc_num(hdev);
1320 	pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1321 
1322 	if (hnae3_dev_dcb_supported(hdev))
1323 		shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1324 	else
1325 		shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1326 
1327 	shared_buf_tc = pfc_enable_num * hdev->mps +
1328 			(tc_num - pfc_enable_num) * hdev->mps / 2 +
1329 			hdev->mps;
1330 	shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1331 
1332 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1333 	if (rx_all <= rx_priv + shared_std)
1334 		return false;
1335 
1336 	shared_buf = rx_all - rx_priv;
1337 	buf_alloc->s_buf.buf_size = shared_buf;
1338 	buf_alloc->s_buf.self.high = shared_buf;
1339 	buf_alloc->s_buf.self.low =  2 * hdev->mps;
1340 
1341 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1342 		if ((hdev->hw_tc_map & BIT(i)) &&
1343 		    (hdev->tm_info.hw_pfc_map & BIT(i))) {
1344 			buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1345 			buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1346 		} else {
1347 			buf_alloc->s_buf.tc_thrd[i].low = 0;
1348 			buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
1349 		}
1350 	}
1351 
1352 	return true;
1353 }
1354 
1355 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1356 				struct hclge_pkt_buf_alloc *buf_alloc)
1357 {
1358 	u32 i, total_size;
1359 
1360 	total_size = hdev->pkt_buf_size;
1361 
1362 	/* alloc tx buffer for all enabled tc */
1363 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1364 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1365 
1366 		if (total_size < HCLGE_DEFAULT_TX_BUF)
1367 			return -ENOMEM;
1368 
1369 		if (hdev->hw_tc_map & BIT(i))
1370 			priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1371 		else
1372 			priv->tx_buf_size = 0;
1373 
1374 		total_size -= priv->tx_buf_size;
1375 	}
1376 
1377 	return 0;
1378 }
1379 
1380 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1381  * @hdev: pointer to struct hclge_dev
1382  * @buf_alloc: pointer to buffer calculation data
1383  * @return: 0: calculate sucessful, negative: fail
1384  */
1385 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1386 				struct hclge_pkt_buf_alloc *buf_alloc)
1387 {
1388 #define HCLGE_BUF_SIZE_UNIT	128
1389 	u32 rx_all = hdev->pkt_buf_size, aligned_mps;
1390 	int no_pfc_priv_num, pfc_priv_num;
1391 	struct hclge_priv_buf *priv;
1392 	int i;
1393 
1394 	aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1395 	rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1396 
1397 	/* When DCB is not supported, rx private
1398 	 * buffer is not allocated.
1399 	 */
1400 	if (!hnae3_dev_dcb_supported(hdev)) {
1401 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1402 			return -ENOMEM;
1403 
1404 		return 0;
1405 	}
1406 
1407 	/* step 1, try to alloc private buffer for all enabled tc */
1408 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1409 		priv = &buf_alloc->priv_buf[i];
1410 		if (hdev->hw_tc_map & BIT(i)) {
1411 			priv->enable = 1;
1412 			if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1413 				priv->wl.low = aligned_mps;
1414 				priv->wl.high = priv->wl.low + aligned_mps;
1415 				priv->buf_size = priv->wl.high +
1416 						HCLGE_DEFAULT_DV;
1417 			} else {
1418 				priv->wl.low = 0;
1419 				priv->wl.high = 2 * aligned_mps;
1420 				priv->buf_size = priv->wl.high;
1421 			}
1422 		} else {
1423 			priv->enable = 0;
1424 			priv->wl.low = 0;
1425 			priv->wl.high = 0;
1426 			priv->buf_size = 0;
1427 		}
1428 	}
1429 
1430 	if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1431 		return 0;
1432 
1433 	/* step 2, try to decrease the buffer size of
1434 	 * no pfc TC's private buffer
1435 	 */
1436 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1437 		priv = &buf_alloc->priv_buf[i];
1438 
1439 		priv->enable = 0;
1440 		priv->wl.low = 0;
1441 		priv->wl.high = 0;
1442 		priv->buf_size = 0;
1443 
1444 		if (!(hdev->hw_tc_map & BIT(i)))
1445 			continue;
1446 
1447 		priv->enable = 1;
1448 
1449 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1450 			priv->wl.low = 128;
1451 			priv->wl.high = priv->wl.low + aligned_mps;
1452 			priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1453 		} else {
1454 			priv->wl.low = 0;
1455 			priv->wl.high = aligned_mps;
1456 			priv->buf_size = priv->wl.high;
1457 		}
1458 	}
1459 
1460 	if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1461 		return 0;
1462 
1463 	/* step 3, try to reduce the number of pfc disabled TCs,
1464 	 * which have private buffer
1465 	 */
1466 	/* get the total no pfc enable TC number, which have private buffer */
1467 	no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1468 
1469 	/* let the last to be cleared first */
1470 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1471 		priv = &buf_alloc->priv_buf[i];
1472 
1473 		if (hdev->hw_tc_map & BIT(i) &&
1474 		    !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1475 			/* Clear the no pfc TC private buffer */
1476 			priv->wl.low = 0;
1477 			priv->wl.high = 0;
1478 			priv->buf_size = 0;
1479 			priv->enable = 0;
1480 			no_pfc_priv_num--;
1481 		}
1482 
1483 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1484 		    no_pfc_priv_num == 0)
1485 			break;
1486 	}
1487 
1488 	if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1489 		return 0;
1490 
1491 	/* step 4, try to reduce the number of pfc enabled TCs
1492 	 * which have private buffer.
1493 	 */
1494 	pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1495 
1496 	/* let the last to be cleared first */
1497 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1498 		priv = &buf_alloc->priv_buf[i];
1499 
1500 		if (hdev->hw_tc_map & BIT(i) &&
1501 		    hdev->tm_info.hw_pfc_map & BIT(i)) {
1502 			/* Reduce the number of pfc TC with private buffer */
1503 			priv->wl.low = 0;
1504 			priv->enable = 0;
1505 			priv->wl.high = 0;
1506 			priv->buf_size = 0;
1507 			pfc_priv_num--;
1508 		}
1509 
1510 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1511 		    pfc_priv_num == 0)
1512 			break;
1513 	}
1514 	if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1515 		return 0;
1516 
1517 	return -ENOMEM;
1518 }
1519 
1520 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1521 				   struct hclge_pkt_buf_alloc *buf_alloc)
1522 {
1523 	struct hclge_rx_priv_buff_cmd *req;
1524 	struct hclge_desc desc;
1525 	int ret;
1526 	int i;
1527 
1528 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1529 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1530 
1531 	/* Alloc private buffer TCs */
1532 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1533 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1534 
1535 		req->buf_num[i] =
1536 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1537 		req->buf_num[i] |=
1538 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1539 	}
1540 
1541 	req->shared_buf =
1542 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1543 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
1544 
1545 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1546 	if (ret)
1547 		dev_err(&hdev->pdev->dev,
1548 			"rx private buffer alloc cmd failed %d\n", ret);
1549 
1550 	return ret;
1551 }
1552 
1553 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1554 				   struct hclge_pkt_buf_alloc *buf_alloc)
1555 {
1556 	struct hclge_rx_priv_wl_buf *req;
1557 	struct hclge_priv_buf *priv;
1558 	struct hclge_desc desc[2];
1559 	int i, j;
1560 	int ret;
1561 
1562 	for (i = 0; i < 2; i++) {
1563 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1564 					   false);
1565 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1566 
1567 		/* The first descriptor set the NEXT bit to 1 */
1568 		if (i == 0)
1569 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1570 		else
1571 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1572 
1573 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1574 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1575 
1576 			priv = &buf_alloc->priv_buf[idx];
1577 			req->tc_wl[j].high =
1578 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1579 			req->tc_wl[j].high |=
1580 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1581 			req->tc_wl[j].low =
1582 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1583 			req->tc_wl[j].low |=
1584 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1585 		}
1586 	}
1587 
1588 	/* Send 2 descriptor at one time */
1589 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1590 	if (ret)
1591 		dev_err(&hdev->pdev->dev,
1592 			"rx private waterline config cmd failed %d\n",
1593 			ret);
1594 	return ret;
1595 }
1596 
1597 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1598 				    struct hclge_pkt_buf_alloc *buf_alloc)
1599 {
1600 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1601 	struct hclge_rx_com_thrd *req;
1602 	struct hclge_desc desc[2];
1603 	struct hclge_tc_thrd *tc;
1604 	int i, j;
1605 	int ret;
1606 
1607 	for (i = 0; i < 2; i++) {
1608 		hclge_cmd_setup_basic_desc(&desc[i],
1609 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1610 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
1611 
1612 		/* The first descriptor set the NEXT bit to 1 */
1613 		if (i == 0)
1614 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1615 		else
1616 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1617 
1618 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1619 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1620 
1621 			req->com_thrd[j].high =
1622 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1623 			req->com_thrd[j].high |=
1624 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1625 			req->com_thrd[j].low =
1626 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1627 			req->com_thrd[j].low |=
1628 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1629 		}
1630 	}
1631 
1632 	/* Send 2 descriptors at one time */
1633 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1634 	if (ret)
1635 		dev_err(&hdev->pdev->dev,
1636 			"common threshold config cmd failed %d\n", ret);
1637 	return ret;
1638 }
1639 
1640 static int hclge_common_wl_config(struct hclge_dev *hdev,
1641 				  struct hclge_pkt_buf_alloc *buf_alloc)
1642 {
1643 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1644 	struct hclge_rx_com_wl *req;
1645 	struct hclge_desc desc;
1646 	int ret;
1647 
1648 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1649 
1650 	req = (struct hclge_rx_com_wl *)desc.data;
1651 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1652 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1653 
1654 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1655 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1656 
1657 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1658 	if (ret)
1659 		dev_err(&hdev->pdev->dev,
1660 			"common waterline config cmd failed %d\n", ret);
1661 
1662 	return ret;
1663 }
1664 
1665 int hclge_buffer_alloc(struct hclge_dev *hdev)
1666 {
1667 	struct hclge_pkt_buf_alloc *pkt_buf;
1668 	int ret;
1669 
1670 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1671 	if (!pkt_buf)
1672 		return -ENOMEM;
1673 
1674 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1675 	if (ret) {
1676 		dev_err(&hdev->pdev->dev,
1677 			"could not calc tx buffer size for all TCs %d\n", ret);
1678 		goto out;
1679 	}
1680 
1681 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1682 	if (ret) {
1683 		dev_err(&hdev->pdev->dev,
1684 			"could not alloc tx buffers %d\n", ret);
1685 		goto out;
1686 	}
1687 
1688 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1689 	if (ret) {
1690 		dev_err(&hdev->pdev->dev,
1691 			"could not calc rx priv buffer size for all TCs %d\n",
1692 			ret);
1693 		goto out;
1694 	}
1695 
1696 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1697 	if (ret) {
1698 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1699 			ret);
1700 		goto out;
1701 	}
1702 
1703 	if (hnae3_dev_dcb_supported(hdev)) {
1704 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1705 		if (ret) {
1706 			dev_err(&hdev->pdev->dev,
1707 				"could not configure rx private waterline %d\n",
1708 				ret);
1709 			goto out;
1710 		}
1711 
1712 		ret = hclge_common_thrd_config(hdev, pkt_buf);
1713 		if (ret) {
1714 			dev_err(&hdev->pdev->dev,
1715 				"could not configure common threshold %d\n",
1716 				ret);
1717 			goto out;
1718 		}
1719 	}
1720 
1721 	ret = hclge_common_wl_config(hdev, pkt_buf);
1722 	if (ret)
1723 		dev_err(&hdev->pdev->dev,
1724 			"could not configure common waterline %d\n", ret);
1725 
1726 out:
1727 	kfree(pkt_buf);
1728 	return ret;
1729 }
1730 
1731 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1732 {
1733 	struct hnae3_handle *roce = &vport->roce;
1734 	struct hnae3_handle *nic = &vport->nic;
1735 
1736 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
1737 
1738 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1739 	    vport->back->num_msi_left == 0)
1740 		return -EINVAL;
1741 
1742 	roce->rinfo.base_vector = vport->back->roce_base_vector;
1743 
1744 	roce->rinfo.netdev = nic->kinfo.netdev;
1745 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
1746 
1747 	roce->pdev = nic->pdev;
1748 	roce->ae_algo = nic->ae_algo;
1749 	roce->numa_node_mask = nic->numa_node_mask;
1750 
1751 	return 0;
1752 }
1753 
1754 static int hclge_init_msi(struct hclge_dev *hdev)
1755 {
1756 	struct pci_dev *pdev = hdev->pdev;
1757 	int vectors;
1758 	int i;
1759 
1760 	vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1761 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
1762 	if (vectors < 0) {
1763 		dev_err(&pdev->dev,
1764 			"failed(%d) to allocate MSI/MSI-X vectors\n",
1765 			vectors);
1766 		return vectors;
1767 	}
1768 	if (vectors < hdev->num_msi)
1769 		dev_warn(&hdev->pdev->dev,
1770 			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1771 			 hdev->num_msi, vectors);
1772 
1773 	hdev->num_msi = vectors;
1774 	hdev->num_msi_left = vectors;
1775 	hdev->base_msi_vector = pdev->irq;
1776 	hdev->roce_base_vector = hdev->base_msi_vector +
1777 				hdev->roce_base_msix_offset;
1778 
1779 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1780 					   sizeof(u16), GFP_KERNEL);
1781 	if (!hdev->vector_status) {
1782 		pci_free_irq_vectors(pdev);
1783 		return -ENOMEM;
1784 	}
1785 
1786 	for (i = 0; i < hdev->num_msi; i++)
1787 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1788 
1789 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1790 					sizeof(int), GFP_KERNEL);
1791 	if (!hdev->vector_irq) {
1792 		pci_free_irq_vectors(pdev);
1793 		return -ENOMEM;
1794 	}
1795 
1796 	return 0;
1797 }
1798 
1799 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1800 {
1801 
1802 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1803 		duplex = HCLGE_MAC_FULL;
1804 
1805 	return duplex;
1806 }
1807 
1808 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
1809 				      u8 duplex)
1810 {
1811 	struct hclge_config_mac_speed_dup_cmd *req;
1812 	struct hclge_desc desc;
1813 	int ret;
1814 
1815 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
1816 
1817 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
1818 
1819 	hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
1820 
1821 	switch (speed) {
1822 	case HCLGE_MAC_SPEED_10M:
1823 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1824 				HCLGE_CFG_SPEED_S, 6);
1825 		break;
1826 	case HCLGE_MAC_SPEED_100M:
1827 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1828 				HCLGE_CFG_SPEED_S, 7);
1829 		break;
1830 	case HCLGE_MAC_SPEED_1G:
1831 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1832 				HCLGE_CFG_SPEED_S, 0);
1833 		break;
1834 	case HCLGE_MAC_SPEED_10G:
1835 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1836 				HCLGE_CFG_SPEED_S, 1);
1837 		break;
1838 	case HCLGE_MAC_SPEED_25G:
1839 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1840 				HCLGE_CFG_SPEED_S, 2);
1841 		break;
1842 	case HCLGE_MAC_SPEED_40G:
1843 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1844 				HCLGE_CFG_SPEED_S, 3);
1845 		break;
1846 	case HCLGE_MAC_SPEED_50G:
1847 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1848 				HCLGE_CFG_SPEED_S, 4);
1849 		break;
1850 	case HCLGE_MAC_SPEED_100G:
1851 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1852 				HCLGE_CFG_SPEED_S, 5);
1853 		break;
1854 	default:
1855 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
1856 		return -EINVAL;
1857 	}
1858 
1859 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1860 		      1);
1861 
1862 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1863 	if (ret) {
1864 		dev_err(&hdev->pdev->dev,
1865 			"mac speed/duplex config cmd failed %d.\n", ret);
1866 		return ret;
1867 	}
1868 
1869 	return 0;
1870 }
1871 
1872 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
1873 {
1874 	int ret;
1875 
1876 	duplex = hclge_check_speed_dup(duplex, speed);
1877 	if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
1878 		return 0;
1879 
1880 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
1881 	if (ret)
1882 		return ret;
1883 
1884 	hdev->hw.mac.speed = speed;
1885 	hdev->hw.mac.duplex = duplex;
1886 
1887 	return 0;
1888 }
1889 
1890 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
1891 				     u8 duplex)
1892 {
1893 	struct hclge_vport *vport = hclge_get_vport(handle);
1894 	struct hclge_dev *hdev = vport->back;
1895 
1896 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
1897 }
1898 
1899 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
1900 					u8 *duplex)
1901 {
1902 	struct hclge_query_an_speed_dup_cmd *req;
1903 	struct hclge_desc desc;
1904 	int speed_tmp;
1905 	int ret;
1906 
1907 	req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
1908 
1909 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
1910 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1911 	if (ret) {
1912 		dev_err(&hdev->pdev->dev,
1913 			"mac speed/autoneg/duplex query cmd failed %d\n",
1914 			ret);
1915 		return ret;
1916 	}
1917 
1918 	*duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
1919 	speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
1920 				    HCLGE_QUERY_SPEED_S);
1921 
1922 	ret = hclge_parse_speed(speed_tmp, speed);
1923 	if (ret)
1924 		dev_err(&hdev->pdev->dev,
1925 			"could not parse speed(=%d), %d\n", speed_tmp, ret);
1926 
1927 	return ret;
1928 }
1929 
1930 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
1931 {
1932 	struct hclge_config_auto_neg_cmd *req;
1933 	struct hclge_desc desc;
1934 	u32 flag = 0;
1935 	int ret;
1936 
1937 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
1938 
1939 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
1940 	hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
1941 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
1942 
1943 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1944 	if (ret)
1945 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
1946 			ret);
1947 
1948 	return ret;
1949 }
1950 
1951 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
1952 {
1953 	struct hclge_vport *vport = hclge_get_vport(handle);
1954 	struct hclge_dev *hdev = vport->back;
1955 
1956 	return hclge_set_autoneg_en(hdev, enable);
1957 }
1958 
1959 static int hclge_get_autoneg(struct hnae3_handle *handle)
1960 {
1961 	struct hclge_vport *vport = hclge_get_vport(handle);
1962 	struct hclge_dev *hdev = vport->back;
1963 	struct phy_device *phydev = hdev->hw.mac.phydev;
1964 
1965 	if (phydev)
1966 		return phydev->autoneg;
1967 
1968 	return hdev->hw.mac.autoneg;
1969 }
1970 
1971 static int hclge_mac_init(struct hclge_dev *hdev)
1972 {
1973 	struct hclge_mac *mac = &hdev->hw.mac;
1974 	int ret;
1975 
1976 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
1977 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
1978 					 hdev->hw.mac.duplex);
1979 	if (ret) {
1980 		dev_err(&hdev->pdev->dev,
1981 			"Config mac speed dup fail ret=%d\n", ret);
1982 		return ret;
1983 	}
1984 
1985 	mac->link = 0;
1986 
1987 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
1988 	if (ret) {
1989 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
1990 		return ret;
1991 	}
1992 
1993 	ret = hclge_buffer_alloc(hdev);
1994 	if (ret)
1995 		dev_err(&hdev->pdev->dev,
1996 			"allocate buffer fail, ret=%d\n", ret);
1997 
1998 	return ret;
1999 }
2000 
2001 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2002 {
2003 	if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2004 		schedule_work(&hdev->mbx_service_task);
2005 }
2006 
2007 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2008 {
2009 	if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2010 		schedule_work(&hdev->rst_service_task);
2011 }
2012 
2013 static void hclge_task_schedule(struct hclge_dev *hdev)
2014 {
2015 	if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2016 	    !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2017 	    !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2018 		(void)schedule_work(&hdev->service_task);
2019 }
2020 
2021 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2022 {
2023 	struct hclge_link_status_cmd *req;
2024 	struct hclge_desc desc;
2025 	int link_status;
2026 	int ret;
2027 
2028 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2029 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2030 	if (ret) {
2031 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2032 			ret);
2033 		return ret;
2034 	}
2035 
2036 	req = (struct hclge_link_status_cmd *)desc.data;
2037 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2038 
2039 	return !!link_status;
2040 }
2041 
2042 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2043 {
2044 	int mac_state;
2045 	int link_stat;
2046 
2047 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2048 		return 0;
2049 
2050 	mac_state = hclge_get_mac_link_status(hdev);
2051 
2052 	if (hdev->hw.mac.phydev) {
2053 		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2054 			link_stat = mac_state &
2055 				hdev->hw.mac.phydev->link;
2056 		else
2057 			link_stat = 0;
2058 
2059 	} else {
2060 		link_stat = mac_state;
2061 	}
2062 
2063 	return !!link_stat;
2064 }
2065 
2066 static void hclge_update_link_status(struct hclge_dev *hdev)
2067 {
2068 	struct hnae3_client *client = hdev->nic_client;
2069 	struct hnae3_handle *handle;
2070 	int state;
2071 	int i;
2072 
2073 	if (!client)
2074 		return;
2075 	state = hclge_get_mac_phy_link(hdev);
2076 	if (state != hdev->hw.mac.link) {
2077 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2078 			handle = &hdev->vport[i].nic;
2079 			client->ops->link_status_change(handle, state);
2080 		}
2081 		hdev->hw.mac.link = state;
2082 	}
2083 }
2084 
2085 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2086 {
2087 	struct hclge_mac mac = hdev->hw.mac;
2088 	u8 duplex;
2089 	int speed;
2090 	int ret;
2091 
2092 	/* get the speed and duplex as autoneg'result from mac cmd when phy
2093 	 * doesn't exit.
2094 	 */
2095 	if (mac.phydev || !mac.autoneg)
2096 		return 0;
2097 
2098 	ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
2099 	if (ret) {
2100 		dev_err(&hdev->pdev->dev,
2101 			"mac autoneg/speed/duplex query failed %d\n", ret);
2102 		return ret;
2103 	}
2104 
2105 	ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2106 	if (ret) {
2107 		dev_err(&hdev->pdev->dev,
2108 			"mac speed/duplex config failed %d\n", ret);
2109 		return ret;
2110 	}
2111 
2112 	return 0;
2113 }
2114 
2115 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2116 {
2117 	struct hclge_vport *vport = hclge_get_vport(handle);
2118 	struct hclge_dev *hdev = vport->back;
2119 
2120 	return hclge_update_speed_duplex(hdev);
2121 }
2122 
2123 static int hclge_get_status(struct hnae3_handle *handle)
2124 {
2125 	struct hclge_vport *vport = hclge_get_vport(handle);
2126 	struct hclge_dev *hdev = vport->back;
2127 
2128 	hclge_update_link_status(hdev);
2129 
2130 	return hdev->hw.mac.link;
2131 }
2132 
2133 static void hclge_service_timer(struct timer_list *t)
2134 {
2135 	struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2136 
2137 	mod_timer(&hdev->service_timer, jiffies + HZ);
2138 	hdev->hw_stats.stats_timer++;
2139 	hclge_task_schedule(hdev);
2140 }
2141 
2142 static void hclge_service_complete(struct hclge_dev *hdev)
2143 {
2144 	WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2145 
2146 	/* Flush memory before next watchdog */
2147 	smp_mb__before_atomic();
2148 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2149 }
2150 
2151 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2152 {
2153 	u32 rst_src_reg;
2154 	u32 cmdq_src_reg;
2155 
2156 	/* fetch the events from their corresponding regs */
2157 	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2158 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2159 
2160 	/* Assumption: If by any chance reset and mailbox events are reported
2161 	 * together then we will only process reset event in this go and will
2162 	 * defer the processing of the mailbox events. Since, we would have not
2163 	 * cleared RX CMDQ event this time we would receive again another
2164 	 * interrupt from H/W just for the mailbox.
2165 	 */
2166 
2167 	/* check for vector0 reset event sources */
2168 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2169 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2170 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2171 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2172 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2173 		return HCLGE_VECTOR0_EVENT_RST;
2174 	}
2175 
2176 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2177 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2178 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2179 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2180 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2181 		return HCLGE_VECTOR0_EVENT_RST;
2182 	}
2183 
2184 	if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2185 		dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2186 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2187 		set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2188 		*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2189 		return HCLGE_VECTOR0_EVENT_RST;
2190 	}
2191 
2192 	/* check for vector0 mailbox(=CMDQ RX) event source */
2193 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2194 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2195 		*clearval = cmdq_src_reg;
2196 		return HCLGE_VECTOR0_EVENT_MBX;
2197 	}
2198 
2199 	return HCLGE_VECTOR0_EVENT_OTHER;
2200 }
2201 
2202 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2203 				    u32 regclr)
2204 {
2205 	switch (event_type) {
2206 	case HCLGE_VECTOR0_EVENT_RST:
2207 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2208 		break;
2209 	case HCLGE_VECTOR0_EVENT_MBX:
2210 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2211 		break;
2212 	default:
2213 		break;
2214 	}
2215 }
2216 
2217 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2218 {
2219 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2220 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2221 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2222 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2223 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2224 }
2225 
2226 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2227 {
2228 	writel(enable ? 1 : 0, vector->addr);
2229 }
2230 
2231 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2232 {
2233 	struct hclge_dev *hdev = data;
2234 	u32 event_cause;
2235 	u32 clearval;
2236 
2237 	hclge_enable_vector(&hdev->misc_vector, false);
2238 	event_cause = hclge_check_event_cause(hdev, &clearval);
2239 
2240 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
2241 	switch (event_cause) {
2242 	case HCLGE_VECTOR0_EVENT_RST:
2243 		hclge_reset_task_schedule(hdev);
2244 		break;
2245 	case HCLGE_VECTOR0_EVENT_MBX:
2246 		/* If we are here then,
2247 		 * 1. Either we are not handling any mbx task and we are not
2248 		 *    scheduled as well
2249 		 *                        OR
2250 		 * 2. We could be handling a mbx task but nothing more is
2251 		 *    scheduled.
2252 		 * In both cases, we should schedule mbx task as there are more
2253 		 * mbx messages reported by this interrupt.
2254 		 */
2255 		hclge_mbx_task_schedule(hdev);
2256 		break;
2257 	default:
2258 		dev_warn(&hdev->pdev->dev,
2259 			 "received unknown or unhandled event of vector0\n");
2260 		break;
2261 	}
2262 
2263 	/* clear the source of interrupt if it is not cause by reset */
2264 	if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2265 		hclge_clear_event_cause(hdev, event_cause, clearval);
2266 		hclge_enable_vector(&hdev->misc_vector, true);
2267 	}
2268 
2269 	return IRQ_HANDLED;
2270 }
2271 
2272 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2273 {
2274 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2275 		dev_warn(&hdev->pdev->dev,
2276 			 "vector(vector_id %d) has been freed.\n", vector_id);
2277 		return;
2278 	}
2279 
2280 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2281 	hdev->num_msi_left += 1;
2282 	hdev->num_msi_used -= 1;
2283 }
2284 
2285 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2286 {
2287 	struct hclge_misc_vector *vector = &hdev->misc_vector;
2288 
2289 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2290 
2291 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2292 	hdev->vector_status[0] = 0;
2293 
2294 	hdev->num_msi_left -= 1;
2295 	hdev->num_msi_used += 1;
2296 }
2297 
2298 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2299 {
2300 	int ret;
2301 
2302 	hclge_get_misc_vector(hdev);
2303 
2304 	/* this would be explicitly freed in the end */
2305 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2306 			  0, "hclge_misc", hdev);
2307 	if (ret) {
2308 		hclge_free_vector(hdev, 0);
2309 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2310 			hdev->misc_vector.vector_irq);
2311 	}
2312 
2313 	return ret;
2314 }
2315 
2316 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2317 {
2318 	free_irq(hdev->misc_vector.vector_irq, hdev);
2319 	hclge_free_vector(hdev, 0);
2320 }
2321 
2322 static int hclge_notify_client(struct hclge_dev *hdev,
2323 			       enum hnae3_reset_notify_type type)
2324 {
2325 	struct hnae3_client *client = hdev->nic_client;
2326 	u16 i;
2327 
2328 	if (!client->ops->reset_notify)
2329 		return -EOPNOTSUPP;
2330 
2331 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2332 		struct hnae3_handle *handle = &hdev->vport[i].nic;
2333 		int ret;
2334 
2335 		ret = client->ops->reset_notify(handle, type);
2336 		if (ret) {
2337 			dev_err(&hdev->pdev->dev,
2338 				"notify nic client failed %d(%d)\n", type, ret);
2339 			return ret;
2340 		}
2341 	}
2342 
2343 	return 0;
2344 }
2345 
2346 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2347 				    enum hnae3_reset_notify_type type)
2348 {
2349 	struct hnae3_client *client = hdev->roce_client;
2350 	int ret = 0;
2351 	u16 i;
2352 
2353 	if (!client)
2354 		return 0;
2355 
2356 	if (!client->ops->reset_notify)
2357 		return -EOPNOTSUPP;
2358 
2359 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2360 		struct hnae3_handle *handle = &hdev->vport[i].roce;
2361 
2362 		ret = client->ops->reset_notify(handle, type);
2363 		if (ret) {
2364 			dev_err(&hdev->pdev->dev,
2365 				"notify roce client failed %d(%d)",
2366 				type, ret);
2367 			return ret;
2368 		}
2369 	}
2370 
2371 	return ret;
2372 }
2373 
2374 static int hclge_reset_wait(struct hclge_dev *hdev)
2375 {
2376 #define HCLGE_RESET_WATI_MS	100
2377 #define HCLGE_RESET_WAIT_CNT	200
2378 	u32 val, reg, reg_bit;
2379 	u32 cnt = 0;
2380 
2381 	switch (hdev->reset_type) {
2382 	case HNAE3_IMP_RESET:
2383 		reg = HCLGE_GLOBAL_RESET_REG;
2384 		reg_bit = HCLGE_IMP_RESET_BIT;
2385 		break;
2386 	case HNAE3_GLOBAL_RESET:
2387 		reg = HCLGE_GLOBAL_RESET_REG;
2388 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
2389 		break;
2390 	case HNAE3_CORE_RESET:
2391 		reg = HCLGE_GLOBAL_RESET_REG;
2392 		reg_bit = HCLGE_CORE_RESET_BIT;
2393 		break;
2394 	case HNAE3_FUNC_RESET:
2395 		reg = HCLGE_FUN_RST_ING;
2396 		reg_bit = HCLGE_FUN_RST_ING_B;
2397 		break;
2398 	case HNAE3_FLR_RESET:
2399 		break;
2400 	default:
2401 		dev_err(&hdev->pdev->dev,
2402 			"Wait for unsupported reset type: %d\n",
2403 			hdev->reset_type);
2404 		return -EINVAL;
2405 	}
2406 
2407 	if (hdev->reset_type == HNAE3_FLR_RESET) {
2408 		while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2409 		       cnt++ < HCLGE_RESET_WAIT_CNT)
2410 			msleep(HCLGE_RESET_WATI_MS);
2411 
2412 		if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2413 			dev_err(&hdev->pdev->dev,
2414 				"flr wait timeout: %d\n", cnt);
2415 			return -EBUSY;
2416 		}
2417 
2418 		return 0;
2419 	}
2420 
2421 	val = hclge_read_dev(&hdev->hw, reg);
2422 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2423 		msleep(HCLGE_RESET_WATI_MS);
2424 		val = hclge_read_dev(&hdev->hw, reg);
2425 		cnt++;
2426 	}
2427 
2428 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
2429 		dev_warn(&hdev->pdev->dev,
2430 			 "Wait for reset timeout: %d\n", hdev->reset_type);
2431 		return -EBUSY;
2432 	}
2433 
2434 	return 0;
2435 }
2436 
2437 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2438 {
2439 	struct hclge_vf_rst_cmd *req;
2440 	struct hclge_desc desc;
2441 
2442 	req = (struct hclge_vf_rst_cmd *)desc.data;
2443 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2444 	req->dest_vfid = func_id;
2445 
2446 	if (reset)
2447 		req->vf_rst = 0x1;
2448 
2449 	return hclge_cmd_send(&hdev->hw, &desc, 1);
2450 }
2451 
2452 int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2453 {
2454 	int i;
2455 
2456 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2457 		struct hclge_vport *vport = &hdev->vport[i];
2458 		int ret;
2459 
2460 		/* Send cmd to set/clear VF's FUNC_RST_ING */
2461 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2462 		if (ret) {
2463 			dev_err(&hdev->pdev->dev,
2464 				"set vf(%d) rst failed %d!\n",
2465 				vport->vport_id, ret);
2466 			return ret;
2467 		}
2468 
2469 		if (!reset)
2470 			continue;
2471 
2472 		/* Inform VF to process the reset.
2473 		 * hclge_inform_reset_assert_to_vf may fail if VF
2474 		 * driver is not loaded.
2475 		 */
2476 		ret = hclge_inform_reset_assert_to_vf(vport);
2477 		if (ret)
2478 			dev_warn(&hdev->pdev->dev,
2479 				 "inform reset to vf(%d) failed %d!\n",
2480 				 vport->vport_id, ret);
2481 	}
2482 
2483 	return 0;
2484 }
2485 
2486 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2487 {
2488 	struct hclge_desc desc;
2489 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2490 	int ret;
2491 
2492 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2493 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2494 	req->fun_reset_vfid = func_id;
2495 
2496 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2497 	if (ret)
2498 		dev_err(&hdev->pdev->dev,
2499 			"send function reset cmd fail, status =%d\n", ret);
2500 
2501 	return ret;
2502 }
2503 
2504 static void hclge_do_reset(struct hclge_dev *hdev)
2505 {
2506 	struct pci_dev *pdev = hdev->pdev;
2507 	u32 val;
2508 
2509 	switch (hdev->reset_type) {
2510 	case HNAE3_GLOBAL_RESET:
2511 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2512 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2513 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2514 		dev_info(&pdev->dev, "Global Reset requested\n");
2515 		break;
2516 	case HNAE3_CORE_RESET:
2517 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2518 		hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2519 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2520 		dev_info(&pdev->dev, "Core Reset requested\n");
2521 		break;
2522 	case HNAE3_FUNC_RESET:
2523 		dev_info(&pdev->dev, "PF Reset requested\n");
2524 		/* schedule again to check later */
2525 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2526 		hclge_reset_task_schedule(hdev);
2527 		break;
2528 	case HNAE3_FLR_RESET:
2529 		dev_info(&pdev->dev, "FLR requested\n");
2530 		/* schedule again to check later */
2531 		set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2532 		hclge_reset_task_schedule(hdev);
2533 		break;
2534 	default:
2535 		dev_warn(&pdev->dev,
2536 			 "Unsupported reset type: %d\n", hdev->reset_type);
2537 		break;
2538 	}
2539 }
2540 
2541 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2542 						   unsigned long *addr)
2543 {
2544 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2545 
2546 	/* return the highest priority reset level amongst all */
2547 	if (test_bit(HNAE3_IMP_RESET, addr)) {
2548 		rst_level = HNAE3_IMP_RESET;
2549 		clear_bit(HNAE3_IMP_RESET, addr);
2550 		clear_bit(HNAE3_GLOBAL_RESET, addr);
2551 		clear_bit(HNAE3_CORE_RESET, addr);
2552 		clear_bit(HNAE3_FUNC_RESET, addr);
2553 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2554 		rst_level = HNAE3_GLOBAL_RESET;
2555 		clear_bit(HNAE3_GLOBAL_RESET, addr);
2556 		clear_bit(HNAE3_CORE_RESET, addr);
2557 		clear_bit(HNAE3_FUNC_RESET, addr);
2558 	} else if (test_bit(HNAE3_CORE_RESET, addr)) {
2559 		rst_level = HNAE3_CORE_RESET;
2560 		clear_bit(HNAE3_CORE_RESET, addr);
2561 		clear_bit(HNAE3_FUNC_RESET, addr);
2562 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2563 		rst_level = HNAE3_FUNC_RESET;
2564 		clear_bit(HNAE3_FUNC_RESET, addr);
2565 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
2566 		rst_level = HNAE3_FLR_RESET;
2567 		clear_bit(HNAE3_FLR_RESET, addr);
2568 	}
2569 
2570 	return rst_level;
2571 }
2572 
2573 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2574 {
2575 	u32 clearval = 0;
2576 
2577 	switch (hdev->reset_type) {
2578 	case HNAE3_IMP_RESET:
2579 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2580 		break;
2581 	case HNAE3_GLOBAL_RESET:
2582 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2583 		break;
2584 	case HNAE3_CORE_RESET:
2585 		clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2586 		break;
2587 	default:
2588 		break;
2589 	}
2590 
2591 	if (!clearval)
2592 		return;
2593 
2594 	hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2595 	hclge_enable_vector(&hdev->misc_vector, true);
2596 }
2597 
2598 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2599 {
2600 	int ret = 0;
2601 
2602 	switch (hdev->reset_type) {
2603 	case HNAE3_FUNC_RESET:
2604 		/* fall through */
2605 	case HNAE3_FLR_RESET:
2606 		ret = hclge_set_all_vf_rst(hdev, true);
2607 		break;
2608 	default:
2609 		break;
2610 	}
2611 
2612 	return ret;
2613 }
2614 
2615 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2616 {
2617 	u32 reg_val;
2618 	int ret = 0;
2619 
2620 	switch (hdev->reset_type) {
2621 	case HNAE3_FUNC_RESET:
2622 		/* There is no mechanism for PF to know if VF has stopped IO
2623 		 * for now, just wait 100 ms for VF to stop IO
2624 		 */
2625 		msleep(100);
2626 		ret = hclge_func_reset_cmd(hdev, 0);
2627 		if (ret) {
2628 			dev_err(&hdev->pdev->dev,
2629 				"asserting function reset fail %d!\n", ret);
2630 			return ret;
2631 		}
2632 
2633 		/* After performaning pf reset, it is not necessary to do the
2634 		 * mailbox handling or send any command to firmware, because
2635 		 * any mailbox handling or command to firmware is only valid
2636 		 * after hclge_cmd_init is called.
2637 		 */
2638 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2639 		break;
2640 	case HNAE3_FLR_RESET:
2641 		/* There is no mechanism for PF to know if VF has stopped IO
2642 		 * for now, just wait 100 ms for VF to stop IO
2643 		 */
2644 		msleep(100);
2645 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2646 		set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2647 		break;
2648 	case HNAE3_IMP_RESET:
2649 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2650 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2651 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2652 		break;
2653 	default:
2654 		break;
2655 	}
2656 
2657 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2658 
2659 	return ret;
2660 }
2661 
2662 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2663 {
2664 #define MAX_RESET_FAIL_CNT 5
2665 #define RESET_UPGRADE_DELAY_SEC 10
2666 
2667 	if (hdev->reset_pending) {
2668 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2669 			 hdev->reset_pending);
2670 		return true;
2671 	} else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2672 		   (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2673 		    BIT(HCLGE_IMP_RESET_BIT))) {
2674 		dev_info(&hdev->pdev->dev,
2675 			 "reset failed because IMP Reset is pending\n");
2676 		hclge_clear_reset_cause(hdev);
2677 		return false;
2678 	} else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2679 		hdev->reset_fail_cnt++;
2680 		if (is_timeout) {
2681 			set_bit(hdev->reset_type, &hdev->reset_pending);
2682 			dev_info(&hdev->pdev->dev,
2683 				 "re-schedule to wait for hw reset done\n");
2684 			return true;
2685 		}
2686 
2687 		dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2688 		hclge_clear_reset_cause(hdev);
2689 		mod_timer(&hdev->reset_timer,
2690 			  jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2691 
2692 		return false;
2693 	}
2694 
2695 	hclge_clear_reset_cause(hdev);
2696 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
2697 	return false;
2698 }
2699 
2700 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2701 {
2702 	int ret = 0;
2703 
2704 	switch (hdev->reset_type) {
2705 	case HNAE3_FUNC_RESET:
2706 		/* fall through */
2707 	case HNAE3_FLR_RESET:
2708 		ret = hclge_set_all_vf_rst(hdev, false);
2709 		break;
2710 	default:
2711 		break;
2712 	}
2713 
2714 	return ret;
2715 }
2716 
2717 static void hclge_reset(struct hclge_dev *hdev)
2718 {
2719 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2720 	bool is_timeout = false;
2721 	int ret;
2722 
2723 	/* Initialize ae_dev reset status as well, in case enet layer wants to
2724 	 * know if device is undergoing reset
2725 	 */
2726 	ae_dev->reset_type = hdev->reset_type;
2727 	hdev->reset_count++;
2728 	hdev->last_reset_time = jiffies;
2729 	/* perform reset of the stack & ae device for a client */
2730 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2731 	if (ret)
2732 		goto err_reset;
2733 
2734 	ret = hclge_reset_prepare_down(hdev);
2735 	if (ret)
2736 		goto err_reset;
2737 
2738 	rtnl_lock();
2739 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2740 	if (ret)
2741 		goto err_reset_lock;
2742 
2743 	rtnl_unlock();
2744 
2745 	ret = hclge_reset_prepare_wait(hdev);
2746 	if (ret)
2747 		goto err_reset;
2748 
2749 	if (hclge_reset_wait(hdev)) {
2750 		is_timeout = true;
2751 		goto err_reset;
2752 	}
2753 
2754 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2755 	if (ret)
2756 		goto err_reset;
2757 
2758 	rtnl_lock();
2759 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2760 	if (ret)
2761 		goto err_reset_lock;
2762 
2763 	ret = hclge_reset_ae_dev(hdev->ae_dev);
2764 	if (ret)
2765 		goto err_reset_lock;
2766 
2767 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2768 	if (ret)
2769 		goto err_reset_lock;
2770 
2771 	hclge_clear_reset_cause(hdev);
2772 
2773 	ret = hclge_reset_prepare_up(hdev);
2774 	if (ret)
2775 		goto err_reset_lock;
2776 
2777 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2778 	if (ret)
2779 		goto err_reset_lock;
2780 
2781 	rtnl_unlock();
2782 
2783 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
2784 	if (ret)
2785 		goto err_reset;
2786 
2787 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
2788 	if (ret)
2789 		goto err_reset;
2790 
2791 	return;
2792 
2793 err_reset_lock:
2794 	rtnl_unlock();
2795 err_reset:
2796 	if (hclge_reset_err_handle(hdev, is_timeout))
2797 		hclge_reset_task_schedule(hdev);
2798 }
2799 
2800 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
2801 {
2802 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2803 	struct hclge_dev *hdev = ae_dev->priv;
2804 
2805 	/* We might end up getting called broadly because of 2 below cases:
2806 	 * 1. Recoverable error was conveyed through APEI and only way to bring
2807 	 *    normalcy is to reset.
2808 	 * 2. A new reset request from the stack due to timeout
2809 	 *
2810 	 * For the first case,error event might not have ae handle available.
2811 	 * check if this is a new reset request and we are not here just because
2812 	 * last reset attempt did not succeed and watchdog hit us again. We will
2813 	 * know this if last reset request did not occur very recently (watchdog
2814 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2815 	 * In case of new request we reset the "reset level" to PF reset.
2816 	 * And if it is a repeat reset request of the most recent one then we
2817 	 * want to make sure we throttle the reset request. Therefore, we will
2818 	 * not allow it again before 3*HZ times.
2819 	 */
2820 	if (!handle)
2821 		handle = &hdev->vport[0].nic;
2822 
2823 	if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
2824 		return;
2825 	else if (hdev->default_reset_request)
2826 		hdev->reset_level =
2827 			hclge_get_reset_level(hdev,
2828 					      &hdev->default_reset_request);
2829 	else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
2830 		hdev->reset_level = HNAE3_FUNC_RESET;
2831 
2832 	dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
2833 		 hdev->reset_level);
2834 
2835 	/* request reset & schedule reset task */
2836 	set_bit(hdev->reset_level, &hdev->reset_request);
2837 	hclge_reset_task_schedule(hdev);
2838 
2839 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
2840 		hdev->reset_level++;
2841 }
2842 
2843 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
2844 					enum hnae3_reset_type rst_type)
2845 {
2846 	struct hclge_dev *hdev = ae_dev->priv;
2847 
2848 	set_bit(rst_type, &hdev->default_reset_request);
2849 }
2850 
2851 static void hclge_reset_timer(struct timer_list *t)
2852 {
2853 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
2854 
2855 	dev_info(&hdev->pdev->dev,
2856 		 "triggering global reset in reset timer\n");
2857 	set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
2858 	hclge_reset_event(hdev->pdev, NULL);
2859 }
2860 
2861 static void hclge_reset_subtask(struct hclge_dev *hdev)
2862 {
2863 	/* check if there is any ongoing reset in the hardware. This status can
2864 	 * be checked from reset_pending. If there is then, we need to wait for
2865 	 * hardware to complete reset.
2866 	 *    a. If we are able to figure out in reasonable time that hardware
2867 	 *       has fully resetted then, we can proceed with driver, client
2868 	 *       reset.
2869 	 *    b. else, we can come back later to check this status so re-sched
2870 	 *       now.
2871 	 */
2872 	hdev->last_reset_time = jiffies;
2873 	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
2874 	if (hdev->reset_type != HNAE3_NONE_RESET)
2875 		hclge_reset(hdev);
2876 
2877 	/* check if we got any *new* reset requests to be honored */
2878 	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
2879 	if (hdev->reset_type != HNAE3_NONE_RESET)
2880 		hclge_do_reset(hdev);
2881 
2882 	hdev->reset_type = HNAE3_NONE_RESET;
2883 }
2884 
2885 static void hclge_reset_service_task(struct work_struct *work)
2886 {
2887 	struct hclge_dev *hdev =
2888 		container_of(work, struct hclge_dev, rst_service_task);
2889 
2890 	if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
2891 		return;
2892 
2893 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
2894 
2895 	hclge_reset_subtask(hdev);
2896 
2897 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
2898 }
2899 
2900 static void hclge_mailbox_service_task(struct work_struct *work)
2901 {
2902 	struct hclge_dev *hdev =
2903 		container_of(work, struct hclge_dev, mbx_service_task);
2904 
2905 	if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
2906 		return;
2907 
2908 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
2909 
2910 	hclge_mbx_handler(hdev);
2911 
2912 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
2913 }
2914 
2915 static void hclge_update_vport_alive(struct hclge_dev *hdev)
2916 {
2917 	int i;
2918 
2919 	/* start from vport 1 for PF is always alive */
2920 	for (i = 1; i < hdev->num_alloc_vport; i++) {
2921 		struct hclge_vport *vport = &hdev->vport[i];
2922 
2923 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
2924 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
2925 
2926 		/* If vf is not alive, set to default value */
2927 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
2928 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
2929 	}
2930 }
2931 
2932 static void hclge_service_task(struct work_struct *work)
2933 {
2934 	struct hclge_dev *hdev =
2935 		container_of(work, struct hclge_dev, service_task);
2936 
2937 	if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
2938 		hclge_update_stats_for_all(hdev);
2939 		hdev->hw_stats.stats_timer = 0;
2940 	}
2941 
2942 	hclge_update_speed_duplex(hdev);
2943 	hclge_update_link_status(hdev);
2944 	hclge_update_vport_alive(hdev);
2945 	hclge_service_complete(hdev);
2946 }
2947 
2948 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2949 {
2950 	/* VF handle has no client */
2951 	if (!handle->client)
2952 		return container_of(handle, struct hclge_vport, nic);
2953 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
2954 		return container_of(handle, struct hclge_vport, roce);
2955 	else
2956 		return container_of(handle, struct hclge_vport, nic);
2957 }
2958 
2959 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
2960 			    struct hnae3_vector_info *vector_info)
2961 {
2962 	struct hclge_vport *vport = hclge_get_vport(handle);
2963 	struct hnae3_vector_info *vector = vector_info;
2964 	struct hclge_dev *hdev = vport->back;
2965 	int alloc = 0;
2966 	int i, j;
2967 
2968 	vector_num = min(hdev->num_msi_left, vector_num);
2969 
2970 	for (j = 0; j < vector_num; j++) {
2971 		for (i = 1; i < hdev->num_msi; i++) {
2972 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
2973 				vector->vector = pci_irq_vector(hdev->pdev, i);
2974 				vector->io_addr = hdev->hw.io_base +
2975 					HCLGE_VECTOR_REG_BASE +
2976 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
2977 					vport->vport_id *
2978 					HCLGE_VECTOR_VF_OFFSET;
2979 				hdev->vector_status[i] = vport->vport_id;
2980 				hdev->vector_irq[i] = vector->vector;
2981 
2982 				vector++;
2983 				alloc++;
2984 
2985 				break;
2986 			}
2987 		}
2988 	}
2989 	hdev->num_msi_left -= alloc;
2990 	hdev->num_msi_used += alloc;
2991 
2992 	return alloc;
2993 }
2994 
2995 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
2996 {
2997 	int i;
2998 
2999 	for (i = 0; i < hdev->num_msi; i++)
3000 		if (vector == hdev->vector_irq[i])
3001 			return i;
3002 
3003 	return -EINVAL;
3004 }
3005 
3006 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3007 {
3008 	struct hclge_vport *vport = hclge_get_vport(handle);
3009 	struct hclge_dev *hdev = vport->back;
3010 	int vector_id;
3011 
3012 	vector_id = hclge_get_vector_index(hdev, vector);
3013 	if (vector_id < 0) {
3014 		dev_err(&hdev->pdev->dev,
3015 			"Get vector index fail. vector_id =%d\n", vector_id);
3016 		return vector_id;
3017 	}
3018 
3019 	hclge_free_vector(hdev, vector_id);
3020 
3021 	return 0;
3022 }
3023 
3024 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3025 {
3026 	return HCLGE_RSS_KEY_SIZE;
3027 }
3028 
3029 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3030 {
3031 	return HCLGE_RSS_IND_TBL_SIZE;
3032 }
3033 
3034 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3035 				  const u8 hfunc, const u8 *key)
3036 {
3037 	struct hclge_rss_config_cmd *req;
3038 	struct hclge_desc desc;
3039 	int key_offset;
3040 	int key_size;
3041 	int ret;
3042 
3043 	req = (struct hclge_rss_config_cmd *)desc.data;
3044 
3045 	for (key_offset = 0; key_offset < 3; key_offset++) {
3046 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3047 					   false);
3048 
3049 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3050 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3051 
3052 		if (key_offset == 2)
3053 			key_size =
3054 			HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3055 		else
3056 			key_size = HCLGE_RSS_HASH_KEY_NUM;
3057 
3058 		memcpy(req->hash_key,
3059 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3060 
3061 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3062 		if (ret) {
3063 			dev_err(&hdev->pdev->dev,
3064 				"Configure RSS config fail, status = %d\n",
3065 				ret);
3066 			return ret;
3067 		}
3068 	}
3069 	return 0;
3070 }
3071 
3072 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3073 {
3074 	struct hclge_rss_indirection_table_cmd *req;
3075 	struct hclge_desc desc;
3076 	int i, j;
3077 	int ret;
3078 
3079 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3080 
3081 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3082 		hclge_cmd_setup_basic_desc
3083 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3084 
3085 		req->start_table_index =
3086 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3087 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3088 
3089 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3090 			req->rss_result[j] =
3091 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3092 
3093 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3094 		if (ret) {
3095 			dev_err(&hdev->pdev->dev,
3096 				"Configure rss indir table fail,status = %d\n",
3097 				ret);
3098 			return ret;
3099 		}
3100 	}
3101 	return 0;
3102 }
3103 
3104 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3105 				 u16 *tc_size, u16 *tc_offset)
3106 {
3107 	struct hclge_rss_tc_mode_cmd *req;
3108 	struct hclge_desc desc;
3109 	int ret;
3110 	int i;
3111 
3112 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3113 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3114 
3115 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3116 		u16 mode = 0;
3117 
3118 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3119 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3120 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3121 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3122 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3123 
3124 		req->rss_tc_mode[i] = cpu_to_le16(mode);
3125 	}
3126 
3127 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3128 	if (ret)
3129 		dev_err(&hdev->pdev->dev,
3130 			"Configure rss tc mode fail, status = %d\n", ret);
3131 
3132 	return ret;
3133 }
3134 
3135 static void hclge_get_rss_type(struct hclge_vport *vport)
3136 {
3137 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
3138 	    vport->rss_tuple_sets.ipv4_udp_en ||
3139 	    vport->rss_tuple_sets.ipv4_sctp_en ||
3140 	    vport->rss_tuple_sets.ipv6_tcp_en ||
3141 	    vport->rss_tuple_sets.ipv6_udp_en ||
3142 	    vport->rss_tuple_sets.ipv6_sctp_en)
3143 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3144 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3145 		 vport->rss_tuple_sets.ipv6_fragment_en)
3146 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3147 	else
3148 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3149 }
3150 
3151 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3152 {
3153 	struct hclge_rss_input_tuple_cmd *req;
3154 	struct hclge_desc desc;
3155 	int ret;
3156 
3157 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3158 
3159 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3160 
3161 	/* Get the tuple cfg from pf */
3162 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3163 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3164 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3165 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3166 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3167 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3168 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3169 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3170 	hclge_get_rss_type(&hdev->vport[0]);
3171 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3172 	if (ret)
3173 		dev_err(&hdev->pdev->dev,
3174 			"Configure rss input fail, status = %d\n", ret);
3175 	return ret;
3176 }
3177 
3178 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3179 			 u8 *key, u8 *hfunc)
3180 {
3181 	struct hclge_vport *vport = hclge_get_vport(handle);
3182 	int i;
3183 
3184 	/* Get hash algorithm */
3185 	if (hfunc) {
3186 		switch (vport->rss_algo) {
3187 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3188 			*hfunc = ETH_RSS_HASH_TOP;
3189 			break;
3190 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
3191 			*hfunc = ETH_RSS_HASH_XOR;
3192 			break;
3193 		default:
3194 			*hfunc = ETH_RSS_HASH_UNKNOWN;
3195 			break;
3196 		}
3197 	}
3198 
3199 	/* Get the RSS Key required by the user */
3200 	if (key)
3201 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3202 
3203 	/* Get indirect table */
3204 	if (indir)
3205 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3206 			indir[i] =  vport->rss_indirection_tbl[i];
3207 
3208 	return 0;
3209 }
3210 
3211 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3212 			 const  u8 *key, const  u8 hfunc)
3213 {
3214 	struct hclge_vport *vport = hclge_get_vport(handle);
3215 	struct hclge_dev *hdev = vport->back;
3216 	u8 hash_algo;
3217 	int ret, i;
3218 
3219 	/* Set the RSS Hash Key if specififed by the user */
3220 	if (key) {
3221 		switch (hfunc) {
3222 		case ETH_RSS_HASH_TOP:
3223 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3224 			break;
3225 		case ETH_RSS_HASH_XOR:
3226 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3227 			break;
3228 		case ETH_RSS_HASH_NO_CHANGE:
3229 			hash_algo = vport->rss_algo;
3230 			break;
3231 		default:
3232 			return -EINVAL;
3233 		}
3234 
3235 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3236 		if (ret)
3237 			return ret;
3238 
3239 		/* Update the shadow RSS key with user specified qids */
3240 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3241 		vport->rss_algo = hash_algo;
3242 	}
3243 
3244 	/* Update the shadow RSS table with user specified qids */
3245 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3246 		vport->rss_indirection_tbl[i] = indir[i];
3247 
3248 	/* Update the hardware */
3249 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3250 }
3251 
3252 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3253 {
3254 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3255 
3256 	if (nfc->data & RXH_L4_B_2_3)
3257 		hash_sets |= HCLGE_D_PORT_BIT;
3258 	else
3259 		hash_sets &= ~HCLGE_D_PORT_BIT;
3260 
3261 	if (nfc->data & RXH_IP_SRC)
3262 		hash_sets |= HCLGE_S_IP_BIT;
3263 	else
3264 		hash_sets &= ~HCLGE_S_IP_BIT;
3265 
3266 	if (nfc->data & RXH_IP_DST)
3267 		hash_sets |= HCLGE_D_IP_BIT;
3268 	else
3269 		hash_sets &= ~HCLGE_D_IP_BIT;
3270 
3271 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3272 		hash_sets |= HCLGE_V_TAG_BIT;
3273 
3274 	return hash_sets;
3275 }
3276 
3277 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3278 			       struct ethtool_rxnfc *nfc)
3279 {
3280 	struct hclge_vport *vport = hclge_get_vport(handle);
3281 	struct hclge_dev *hdev = vport->back;
3282 	struct hclge_rss_input_tuple_cmd *req;
3283 	struct hclge_desc desc;
3284 	u8 tuple_sets;
3285 	int ret;
3286 
3287 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3288 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
3289 		return -EINVAL;
3290 
3291 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3292 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3293 
3294 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3295 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3296 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3297 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3298 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3299 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3300 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3301 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3302 
3303 	tuple_sets = hclge_get_rss_hash_bits(nfc);
3304 	switch (nfc->flow_type) {
3305 	case TCP_V4_FLOW:
3306 		req->ipv4_tcp_en = tuple_sets;
3307 		break;
3308 	case TCP_V6_FLOW:
3309 		req->ipv6_tcp_en = tuple_sets;
3310 		break;
3311 	case UDP_V4_FLOW:
3312 		req->ipv4_udp_en = tuple_sets;
3313 		break;
3314 	case UDP_V6_FLOW:
3315 		req->ipv6_udp_en = tuple_sets;
3316 		break;
3317 	case SCTP_V4_FLOW:
3318 		req->ipv4_sctp_en = tuple_sets;
3319 		break;
3320 	case SCTP_V6_FLOW:
3321 		if ((nfc->data & RXH_L4_B_0_1) ||
3322 		    (nfc->data & RXH_L4_B_2_3))
3323 			return -EINVAL;
3324 
3325 		req->ipv6_sctp_en = tuple_sets;
3326 		break;
3327 	case IPV4_FLOW:
3328 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3329 		break;
3330 	case IPV6_FLOW:
3331 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3332 		break;
3333 	default:
3334 		return -EINVAL;
3335 	}
3336 
3337 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3338 	if (ret) {
3339 		dev_err(&hdev->pdev->dev,
3340 			"Set rss tuple fail, status = %d\n", ret);
3341 		return ret;
3342 	}
3343 
3344 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3345 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3346 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3347 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3348 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3349 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3350 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3351 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3352 	hclge_get_rss_type(vport);
3353 	return 0;
3354 }
3355 
3356 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3357 			       struct ethtool_rxnfc *nfc)
3358 {
3359 	struct hclge_vport *vport = hclge_get_vport(handle);
3360 	u8 tuple_sets;
3361 
3362 	nfc->data = 0;
3363 
3364 	switch (nfc->flow_type) {
3365 	case TCP_V4_FLOW:
3366 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3367 		break;
3368 	case UDP_V4_FLOW:
3369 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3370 		break;
3371 	case TCP_V6_FLOW:
3372 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3373 		break;
3374 	case UDP_V6_FLOW:
3375 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3376 		break;
3377 	case SCTP_V4_FLOW:
3378 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3379 		break;
3380 	case SCTP_V6_FLOW:
3381 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3382 		break;
3383 	case IPV4_FLOW:
3384 	case IPV6_FLOW:
3385 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3386 		break;
3387 	default:
3388 		return -EINVAL;
3389 	}
3390 
3391 	if (!tuple_sets)
3392 		return 0;
3393 
3394 	if (tuple_sets & HCLGE_D_PORT_BIT)
3395 		nfc->data |= RXH_L4_B_2_3;
3396 	if (tuple_sets & HCLGE_S_PORT_BIT)
3397 		nfc->data |= RXH_L4_B_0_1;
3398 	if (tuple_sets & HCLGE_D_IP_BIT)
3399 		nfc->data |= RXH_IP_DST;
3400 	if (tuple_sets & HCLGE_S_IP_BIT)
3401 		nfc->data |= RXH_IP_SRC;
3402 
3403 	return 0;
3404 }
3405 
3406 static int hclge_get_tc_size(struct hnae3_handle *handle)
3407 {
3408 	struct hclge_vport *vport = hclge_get_vport(handle);
3409 	struct hclge_dev *hdev = vport->back;
3410 
3411 	return hdev->rss_size_max;
3412 }
3413 
3414 int hclge_rss_init_hw(struct hclge_dev *hdev)
3415 {
3416 	struct hclge_vport *vport = hdev->vport;
3417 	u8 *rss_indir = vport[0].rss_indirection_tbl;
3418 	u16 rss_size = vport[0].alloc_rss_size;
3419 	u8 *key = vport[0].rss_hash_key;
3420 	u8 hfunc = vport[0].rss_algo;
3421 	u16 tc_offset[HCLGE_MAX_TC_NUM];
3422 	u16 tc_valid[HCLGE_MAX_TC_NUM];
3423 	u16 tc_size[HCLGE_MAX_TC_NUM];
3424 	u16 roundup_size;
3425 	int i, ret;
3426 
3427 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
3428 	if (ret)
3429 		return ret;
3430 
3431 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3432 	if (ret)
3433 		return ret;
3434 
3435 	ret = hclge_set_rss_input_tuple(hdev);
3436 	if (ret)
3437 		return ret;
3438 
3439 	/* Each TC have the same queue size, and tc_size set to hardware is
3440 	 * the log2 of roundup power of two of rss_size, the acutal queue
3441 	 * size is limited by indirection table.
3442 	 */
3443 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3444 		dev_err(&hdev->pdev->dev,
3445 			"Configure rss tc size failed, invalid TC_SIZE = %d\n",
3446 			rss_size);
3447 		return -EINVAL;
3448 	}
3449 
3450 	roundup_size = roundup_pow_of_two(rss_size);
3451 	roundup_size = ilog2(roundup_size);
3452 
3453 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3454 		tc_valid[i] = 0;
3455 
3456 		if (!(hdev->hw_tc_map & BIT(i)))
3457 			continue;
3458 
3459 		tc_valid[i] = 1;
3460 		tc_size[i] = roundup_size;
3461 		tc_offset[i] = rss_size * i;
3462 	}
3463 
3464 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3465 }
3466 
3467 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3468 {
3469 	struct hclge_vport *vport = hdev->vport;
3470 	int i, j;
3471 
3472 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3473 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3474 			vport[j].rss_indirection_tbl[i] =
3475 				i % vport[j].alloc_rss_size;
3476 	}
3477 }
3478 
3479 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3480 {
3481 	struct hclge_vport *vport = hdev->vport;
3482 	int i;
3483 
3484 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3485 		vport[i].rss_tuple_sets.ipv4_tcp_en =
3486 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3487 		vport[i].rss_tuple_sets.ipv4_udp_en =
3488 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3489 		vport[i].rss_tuple_sets.ipv4_sctp_en =
3490 			HCLGE_RSS_INPUT_TUPLE_SCTP;
3491 		vport[i].rss_tuple_sets.ipv4_fragment_en =
3492 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3493 		vport[i].rss_tuple_sets.ipv6_tcp_en =
3494 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3495 		vport[i].rss_tuple_sets.ipv6_udp_en =
3496 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3497 		vport[i].rss_tuple_sets.ipv6_sctp_en =
3498 			HCLGE_RSS_INPUT_TUPLE_SCTP;
3499 		vport[i].rss_tuple_sets.ipv6_fragment_en =
3500 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3501 
3502 		vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3503 
3504 		netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE);
3505 	}
3506 
3507 	hclge_rss_indir_init_cfg(hdev);
3508 }
3509 
3510 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3511 				int vector_id, bool en,
3512 				struct hnae3_ring_chain_node *ring_chain)
3513 {
3514 	struct hclge_dev *hdev = vport->back;
3515 	struct hnae3_ring_chain_node *node;
3516 	struct hclge_desc desc;
3517 	struct hclge_ctrl_vector_chain_cmd *req
3518 		= (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3519 	enum hclge_cmd_status status;
3520 	enum hclge_opcode_type op;
3521 	u16 tqp_type_and_id;
3522 	int i;
3523 
3524 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3525 	hclge_cmd_setup_basic_desc(&desc, op, false);
3526 	req->int_vector_id = vector_id;
3527 
3528 	i = 0;
3529 	for (node = ring_chain; node; node = node->next) {
3530 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3531 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
3532 				HCLGE_INT_TYPE_S,
3533 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3534 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3535 				HCLGE_TQP_ID_S, node->tqp_index);
3536 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3537 				HCLGE_INT_GL_IDX_S,
3538 				hnae3_get_field(node->int_gl_idx,
3539 						HNAE3_RING_GL_IDX_M,
3540 						HNAE3_RING_GL_IDX_S));
3541 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3542 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3543 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3544 			req->vfid = vport->vport_id;
3545 
3546 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
3547 			if (status) {
3548 				dev_err(&hdev->pdev->dev,
3549 					"Map TQP fail, status is %d.\n",
3550 					status);
3551 				return -EIO;
3552 			}
3553 			i = 0;
3554 
3555 			hclge_cmd_setup_basic_desc(&desc,
3556 						   op,
3557 						   false);
3558 			req->int_vector_id = vector_id;
3559 		}
3560 	}
3561 
3562 	if (i > 0) {
3563 		req->int_cause_num = i;
3564 		req->vfid = vport->vport_id;
3565 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
3566 		if (status) {
3567 			dev_err(&hdev->pdev->dev,
3568 				"Map TQP fail, status is %d.\n", status);
3569 			return -EIO;
3570 		}
3571 	}
3572 
3573 	return 0;
3574 }
3575 
3576 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3577 				    int vector,
3578 				    struct hnae3_ring_chain_node *ring_chain)
3579 {
3580 	struct hclge_vport *vport = hclge_get_vport(handle);
3581 	struct hclge_dev *hdev = vport->back;
3582 	int vector_id;
3583 
3584 	vector_id = hclge_get_vector_index(hdev, vector);
3585 	if (vector_id < 0) {
3586 		dev_err(&hdev->pdev->dev,
3587 			"Get vector index fail. vector_id =%d\n", vector_id);
3588 		return vector_id;
3589 	}
3590 
3591 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3592 }
3593 
3594 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3595 				       int vector,
3596 				       struct hnae3_ring_chain_node *ring_chain)
3597 {
3598 	struct hclge_vport *vport = hclge_get_vport(handle);
3599 	struct hclge_dev *hdev = vport->back;
3600 	int vector_id, ret;
3601 
3602 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3603 		return 0;
3604 
3605 	vector_id = hclge_get_vector_index(hdev, vector);
3606 	if (vector_id < 0) {
3607 		dev_err(&handle->pdev->dev,
3608 			"Get vector index fail. ret =%d\n", vector_id);
3609 		return vector_id;
3610 	}
3611 
3612 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3613 	if (ret)
3614 		dev_err(&handle->pdev->dev,
3615 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3616 			vector_id,
3617 			ret);
3618 
3619 	return ret;
3620 }
3621 
3622 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3623 			       struct hclge_promisc_param *param)
3624 {
3625 	struct hclge_promisc_cfg_cmd *req;
3626 	struct hclge_desc desc;
3627 	int ret;
3628 
3629 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3630 
3631 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
3632 	req->vf_id = param->vf_id;
3633 
3634 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3635 	 * pdev revision(0x20), new revision support them. The
3636 	 * value of this two fields will not return error when driver
3637 	 * send command to fireware in revision(0x20).
3638 	 */
3639 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3640 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3641 
3642 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3643 	if (ret)
3644 		dev_err(&hdev->pdev->dev,
3645 			"Set promisc mode fail, status is %d.\n", ret);
3646 
3647 	return ret;
3648 }
3649 
3650 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3651 			      bool en_mc, bool en_bc, int vport_id)
3652 {
3653 	if (!param)
3654 		return;
3655 
3656 	memset(param, 0, sizeof(struct hclge_promisc_param));
3657 	if (en_uc)
3658 		param->enable = HCLGE_PROMISC_EN_UC;
3659 	if (en_mc)
3660 		param->enable |= HCLGE_PROMISC_EN_MC;
3661 	if (en_bc)
3662 		param->enable |= HCLGE_PROMISC_EN_BC;
3663 	param->vf_id = vport_id;
3664 }
3665 
3666 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3667 				  bool en_mc_pmc)
3668 {
3669 	struct hclge_vport *vport = hclge_get_vport(handle);
3670 	struct hclge_dev *hdev = vport->back;
3671 	struct hclge_promisc_param param;
3672 
3673 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, true,
3674 				 vport->vport_id);
3675 	return hclge_cmd_set_promisc_mode(hdev, &param);
3676 }
3677 
3678 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3679 {
3680 	struct hclge_get_fd_mode_cmd *req;
3681 	struct hclge_desc desc;
3682 	int ret;
3683 
3684 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3685 
3686 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
3687 
3688 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3689 	if (ret) {
3690 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3691 		return ret;
3692 	}
3693 
3694 	*fd_mode = req->mode;
3695 
3696 	return ret;
3697 }
3698 
3699 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3700 				   u32 *stage1_entry_num,
3701 				   u32 *stage2_entry_num,
3702 				   u16 *stage1_counter_num,
3703 				   u16 *stage2_counter_num)
3704 {
3705 	struct hclge_get_fd_allocation_cmd *req;
3706 	struct hclge_desc desc;
3707 	int ret;
3708 
3709 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3710 
3711 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3712 
3713 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3714 	if (ret) {
3715 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3716 			ret);
3717 		return ret;
3718 	}
3719 
3720 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3721 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3722 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3723 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3724 
3725 	return ret;
3726 }
3727 
3728 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3729 {
3730 	struct hclge_set_fd_key_config_cmd *req;
3731 	struct hclge_fd_key_cfg *stage;
3732 	struct hclge_desc desc;
3733 	int ret;
3734 
3735 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3736 
3737 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
3738 	stage = &hdev->fd_cfg.key_cfg[stage_num];
3739 	req->stage = stage_num;
3740 	req->key_select = stage->key_sel;
3741 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
3742 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
3743 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
3744 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
3745 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
3746 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
3747 
3748 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3749 	if (ret)
3750 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
3751 
3752 	return ret;
3753 }
3754 
3755 static int hclge_init_fd_config(struct hclge_dev *hdev)
3756 {
3757 #define LOW_2_WORDS		0x03
3758 	struct hclge_fd_key_cfg *key_cfg;
3759 	int ret;
3760 
3761 	if (!hnae3_dev_fd_supported(hdev))
3762 		return 0;
3763 
3764 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
3765 	if (ret)
3766 		return ret;
3767 
3768 	switch (hdev->fd_cfg.fd_mode) {
3769 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
3770 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
3771 		break;
3772 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
3773 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
3774 		break;
3775 	default:
3776 		dev_err(&hdev->pdev->dev,
3777 			"Unsupported flow director mode %d\n",
3778 			hdev->fd_cfg.fd_mode);
3779 		return -EOPNOTSUPP;
3780 	}
3781 
3782 	hdev->fd_cfg.fd_en = true;
3783 	hdev->fd_cfg.proto_support =
3784 		TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
3785 		UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
3786 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
3787 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
3788 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
3789 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
3790 	key_cfg->outer_sipv6_word_en = 0;
3791 	key_cfg->outer_dipv6_word_en = 0;
3792 
3793 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
3794 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
3795 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
3796 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
3797 
3798 	/* If use max 400bit key, we can support tuples for ether type */
3799 	if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
3800 		hdev->fd_cfg.proto_support |= ETHER_FLOW;
3801 		key_cfg->tuple_active |=
3802 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
3803 	}
3804 
3805 	/* roce_type is used to filter roce frames
3806 	 * dst_vport is used to specify the rule
3807 	 */
3808 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
3809 
3810 	ret = hclge_get_fd_allocation(hdev,
3811 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
3812 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
3813 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
3814 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
3815 	if (ret)
3816 		return ret;
3817 
3818 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
3819 }
3820 
3821 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
3822 				int loc, u8 *key, bool is_add)
3823 {
3824 	struct hclge_fd_tcam_config_1_cmd *req1;
3825 	struct hclge_fd_tcam_config_2_cmd *req2;
3826 	struct hclge_fd_tcam_config_3_cmd *req3;
3827 	struct hclge_desc desc[3];
3828 	int ret;
3829 
3830 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
3831 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3832 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
3833 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3834 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
3835 
3836 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
3837 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
3838 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
3839 
3840 	req1->stage = stage;
3841 	req1->xy_sel = sel_x ? 1 : 0;
3842 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
3843 	req1->index = cpu_to_le32(loc);
3844 	req1->entry_vld = sel_x ? is_add : 0;
3845 
3846 	if (key) {
3847 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
3848 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
3849 		       sizeof(req2->tcam_data));
3850 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
3851 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
3852 	}
3853 
3854 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
3855 	if (ret)
3856 		dev_err(&hdev->pdev->dev,
3857 			"config tcam key fail, ret=%d\n",
3858 			ret);
3859 
3860 	return ret;
3861 }
3862 
3863 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
3864 			      struct hclge_fd_ad_data *action)
3865 {
3866 	struct hclge_fd_ad_config_cmd *req;
3867 	struct hclge_desc desc;
3868 	u64 ad_data = 0;
3869 	int ret;
3870 
3871 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
3872 
3873 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
3874 	req->index = cpu_to_le32(loc);
3875 	req->stage = stage;
3876 
3877 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
3878 		      action->write_rule_id_to_bd);
3879 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
3880 			action->rule_id);
3881 	ad_data <<= 32;
3882 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
3883 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
3884 		      action->forward_to_direct_queue);
3885 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
3886 			action->queue_id);
3887 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
3888 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
3889 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
3890 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
3891 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
3892 			action->counter_id);
3893 
3894 	req->ad_data = cpu_to_le64(ad_data);
3895 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3896 	if (ret)
3897 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
3898 
3899 	return ret;
3900 }
3901 
3902 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
3903 				   struct hclge_fd_rule *rule)
3904 {
3905 	u16 tmp_x_s, tmp_y_s;
3906 	u32 tmp_x_l, tmp_y_l;
3907 	int i;
3908 
3909 	if (rule->unused_tuple & tuple_bit)
3910 		return true;
3911 
3912 	switch (tuple_bit) {
3913 	case 0:
3914 		return false;
3915 	case BIT(INNER_DST_MAC):
3916 		for (i = 0; i < 6; i++) {
3917 			calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
3918 			       rule->tuples_mask.dst_mac[i]);
3919 			calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
3920 			       rule->tuples_mask.dst_mac[i]);
3921 		}
3922 
3923 		return true;
3924 	case BIT(INNER_SRC_MAC):
3925 		for (i = 0; i < 6; i++) {
3926 			calc_x(key_x[5 - i], rule->tuples.src_mac[i],
3927 			       rule->tuples.src_mac[i]);
3928 			calc_y(key_y[5 - i], rule->tuples.src_mac[i],
3929 			       rule->tuples.src_mac[i]);
3930 		}
3931 
3932 		return true;
3933 	case BIT(INNER_VLAN_TAG_FST):
3934 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
3935 		       rule->tuples_mask.vlan_tag1);
3936 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
3937 		       rule->tuples_mask.vlan_tag1);
3938 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3939 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3940 
3941 		return true;
3942 	case BIT(INNER_ETH_TYPE):
3943 		calc_x(tmp_x_s, rule->tuples.ether_proto,
3944 		       rule->tuples_mask.ether_proto);
3945 		calc_y(tmp_y_s, rule->tuples.ether_proto,
3946 		       rule->tuples_mask.ether_proto);
3947 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3948 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3949 
3950 		return true;
3951 	case BIT(INNER_IP_TOS):
3952 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
3953 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
3954 
3955 		return true;
3956 	case BIT(INNER_IP_PROTO):
3957 		calc_x(*key_x, rule->tuples.ip_proto,
3958 		       rule->tuples_mask.ip_proto);
3959 		calc_y(*key_y, rule->tuples.ip_proto,
3960 		       rule->tuples_mask.ip_proto);
3961 
3962 		return true;
3963 	case BIT(INNER_SRC_IP):
3964 		calc_x(tmp_x_l, rule->tuples.src_ip[3],
3965 		       rule->tuples_mask.src_ip[3]);
3966 		calc_y(tmp_y_l, rule->tuples.src_ip[3],
3967 		       rule->tuples_mask.src_ip[3]);
3968 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
3969 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
3970 
3971 		return true;
3972 	case BIT(INNER_DST_IP):
3973 		calc_x(tmp_x_l, rule->tuples.dst_ip[3],
3974 		       rule->tuples_mask.dst_ip[3]);
3975 		calc_y(tmp_y_l, rule->tuples.dst_ip[3],
3976 		       rule->tuples_mask.dst_ip[3]);
3977 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
3978 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
3979 
3980 		return true;
3981 	case BIT(INNER_SRC_PORT):
3982 		calc_x(tmp_x_s, rule->tuples.src_port,
3983 		       rule->tuples_mask.src_port);
3984 		calc_y(tmp_y_s, rule->tuples.src_port,
3985 		       rule->tuples_mask.src_port);
3986 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3987 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3988 
3989 		return true;
3990 	case BIT(INNER_DST_PORT):
3991 		calc_x(tmp_x_s, rule->tuples.dst_port,
3992 		       rule->tuples_mask.dst_port);
3993 		calc_y(tmp_y_s, rule->tuples.dst_port,
3994 		       rule->tuples_mask.dst_port);
3995 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3996 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3997 
3998 		return true;
3999 	default:
4000 		return false;
4001 	}
4002 }
4003 
4004 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4005 				 u8 vf_id, u8 network_port_id)
4006 {
4007 	u32 port_number = 0;
4008 
4009 	if (port_type == HOST_PORT) {
4010 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4011 				pf_id);
4012 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4013 				vf_id);
4014 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4015 	} else {
4016 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4017 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
4018 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4019 	}
4020 
4021 	return port_number;
4022 }
4023 
4024 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4025 				       __le32 *key_x, __le32 *key_y,
4026 				       struct hclge_fd_rule *rule)
4027 {
4028 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4029 	u8 cur_pos = 0, tuple_size, shift_bits;
4030 	int i;
4031 
4032 	for (i = 0; i < MAX_META_DATA; i++) {
4033 		tuple_size = meta_data_key_info[i].key_length;
4034 		tuple_bit = key_cfg->meta_data_active & BIT(i);
4035 
4036 		switch (tuple_bit) {
4037 		case BIT(ROCE_TYPE):
4038 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4039 			cur_pos += tuple_size;
4040 			break;
4041 		case BIT(DST_VPORT):
4042 			port_number = hclge_get_port_number(HOST_PORT, 0,
4043 							    rule->vf_id, 0);
4044 			hnae3_set_field(meta_data,
4045 					GENMASK(cur_pos + tuple_size, cur_pos),
4046 					cur_pos, port_number);
4047 			cur_pos += tuple_size;
4048 			break;
4049 		default:
4050 			break;
4051 		}
4052 	}
4053 
4054 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4055 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4056 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
4057 
4058 	*key_x = cpu_to_le32(tmp_x << shift_bits);
4059 	*key_y = cpu_to_le32(tmp_y << shift_bits);
4060 }
4061 
4062 /* A complete key is combined with meta data key and tuple key.
4063  * Meta data key is stored at the MSB region, and tuple key is stored at
4064  * the LSB region, unused bits will be filled 0.
4065  */
4066 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4067 			    struct hclge_fd_rule *rule)
4068 {
4069 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4070 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4071 	u8 *cur_key_x, *cur_key_y;
4072 	int i, ret, tuple_size;
4073 	u8 meta_data_region;
4074 
4075 	memset(key_x, 0, sizeof(key_x));
4076 	memset(key_y, 0, sizeof(key_y));
4077 	cur_key_x = key_x;
4078 	cur_key_y = key_y;
4079 
4080 	for (i = 0 ; i < MAX_TUPLE; i++) {
4081 		bool tuple_valid;
4082 		u32 check_tuple;
4083 
4084 		tuple_size = tuple_key_info[i].key_length / 8;
4085 		check_tuple = key_cfg->tuple_active & BIT(i);
4086 
4087 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4088 						     cur_key_y, rule);
4089 		if (tuple_valid) {
4090 			cur_key_x += tuple_size;
4091 			cur_key_y += tuple_size;
4092 		}
4093 	}
4094 
4095 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4096 			MAX_META_DATA_LENGTH / 8;
4097 
4098 	hclge_fd_convert_meta_data(key_cfg,
4099 				   (__le32 *)(key_x + meta_data_region),
4100 				   (__le32 *)(key_y + meta_data_region),
4101 				   rule);
4102 
4103 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4104 				   true);
4105 	if (ret) {
4106 		dev_err(&hdev->pdev->dev,
4107 			"fd key_y config fail, loc=%d, ret=%d\n",
4108 			rule->queue_id, ret);
4109 		return ret;
4110 	}
4111 
4112 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4113 				   true);
4114 	if (ret)
4115 		dev_err(&hdev->pdev->dev,
4116 			"fd key_x config fail, loc=%d, ret=%d\n",
4117 			rule->queue_id, ret);
4118 	return ret;
4119 }
4120 
4121 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4122 			       struct hclge_fd_rule *rule)
4123 {
4124 	struct hclge_fd_ad_data ad_data;
4125 
4126 	ad_data.ad_id = rule->location;
4127 
4128 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4129 		ad_data.drop_packet = true;
4130 		ad_data.forward_to_direct_queue = false;
4131 		ad_data.queue_id = 0;
4132 	} else {
4133 		ad_data.drop_packet = false;
4134 		ad_data.forward_to_direct_queue = true;
4135 		ad_data.queue_id = rule->queue_id;
4136 	}
4137 
4138 	ad_data.use_counter = false;
4139 	ad_data.counter_id = 0;
4140 
4141 	ad_data.use_next_stage = false;
4142 	ad_data.next_input_key = 0;
4143 
4144 	ad_data.write_rule_id_to_bd = true;
4145 	ad_data.rule_id = rule->location;
4146 
4147 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4148 }
4149 
4150 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4151 			       struct ethtool_rx_flow_spec *fs, u32 *unused)
4152 {
4153 	struct ethtool_tcpip4_spec *tcp_ip4_spec;
4154 	struct ethtool_usrip4_spec *usr_ip4_spec;
4155 	struct ethtool_tcpip6_spec *tcp_ip6_spec;
4156 	struct ethtool_usrip6_spec *usr_ip6_spec;
4157 	struct ethhdr *ether_spec;
4158 
4159 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4160 		return -EINVAL;
4161 
4162 	if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4163 		return -EOPNOTSUPP;
4164 
4165 	if ((fs->flow_type & FLOW_EXT) &&
4166 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4167 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4168 		return -EOPNOTSUPP;
4169 	}
4170 
4171 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4172 	case SCTP_V4_FLOW:
4173 	case TCP_V4_FLOW:
4174 	case UDP_V4_FLOW:
4175 		tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4176 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4177 
4178 		if (!tcp_ip4_spec->ip4src)
4179 			*unused |= BIT(INNER_SRC_IP);
4180 
4181 		if (!tcp_ip4_spec->ip4dst)
4182 			*unused |= BIT(INNER_DST_IP);
4183 
4184 		if (!tcp_ip4_spec->psrc)
4185 			*unused |= BIT(INNER_SRC_PORT);
4186 
4187 		if (!tcp_ip4_spec->pdst)
4188 			*unused |= BIT(INNER_DST_PORT);
4189 
4190 		if (!tcp_ip4_spec->tos)
4191 			*unused |= BIT(INNER_IP_TOS);
4192 
4193 		break;
4194 	case IP_USER_FLOW:
4195 		usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4196 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4197 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4198 
4199 		if (!usr_ip4_spec->ip4src)
4200 			*unused |= BIT(INNER_SRC_IP);
4201 
4202 		if (!usr_ip4_spec->ip4dst)
4203 			*unused |= BIT(INNER_DST_IP);
4204 
4205 		if (!usr_ip4_spec->tos)
4206 			*unused |= BIT(INNER_IP_TOS);
4207 
4208 		if (!usr_ip4_spec->proto)
4209 			*unused |= BIT(INNER_IP_PROTO);
4210 
4211 		if (usr_ip4_spec->l4_4_bytes)
4212 			return -EOPNOTSUPP;
4213 
4214 		if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4215 			return -EOPNOTSUPP;
4216 
4217 		break;
4218 	case SCTP_V6_FLOW:
4219 	case TCP_V6_FLOW:
4220 	case UDP_V6_FLOW:
4221 		tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4222 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4223 			BIT(INNER_IP_TOS);
4224 
4225 		if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4226 		    !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4227 			*unused |= BIT(INNER_SRC_IP);
4228 
4229 		if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4230 		    !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4231 			*unused |= BIT(INNER_DST_IP);
4232 
4233 		if (!tcp_ip6_spec->psrc)
4234 			*unused |= BIT(INNER_SRC_PORT);
4235 
4236 		if (!tcp_ip6_spec->pdst)
4237 			*unused |= BIT(INNER_DST_PORT);
4238 
4239 		if (tcp_ip6_spec->tclass)
4240 			return -EOPNOTSUPP;
4241 
4242 		break;
4243 	case IPV6_USER_FLOW:
4244 		usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4245 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4246 			BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4247 			BIT(INNER_DST_PORT);
4248 
4249 		if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4250 		    !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4251 			*unused |= BIT(INNER_SRC_IP);
4252 
4253 		if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4254 		    !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4255 			*unused |= BIT(INNER_DST_IP);
4256 
4257 		if (!usr_ip6_spec->l4_proto)
4258 			*unused |= BIT(INNER_IP_PROTO);
4259 
4260 		if (usr_ip6_spec->tclass)
4261 			return -EOPNOTSUPP;
4262 
4263 		if (usr_ip6_spec->l4_4_bytes)
4264 			return -EOPNOTSUPP;
4265 
4266 		break;
4267 	case ETHER_FLOW:
4268 		ether_spec = &fs->h_u.ether_spec;
4269 		*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4270 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4271 			BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4272 
4273 		if (is_zero_ether_addr(ether_spec->h_source))
4274 			*unused |= BIT(INNER_SRC_MAC);
4275 
4276 		if (is_zero_ether_addr(ether_spec->h_dest))
4277 			*unused |= BIT(INNER_DST_MAC);
4278 
4279 		if (!ether_spec->h_proto)
4280 			*unused |= BIT(INNER_ETH_TYPE);
4281 
4282 		break;
4283 	default:
4284 		return -EOPNOTSUPP;
4285 	}
4286 
4287 	if ((fs->flow_type & FLOW_EXT)) {
4288 		if (fs->h_ext.vlan_etype)
4289 			return -EOPNOTSUPP;
4290 		if (!fs->h_ext.vlan_tci)
4291 			*unused |= BIT(INNER_VLAN_TAG_FST);
4292 
4293 		if (fs->m_ext.vlan_tci) {
4294 			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4295 				return -EINVAL;
4296 		}
4297 	} else {
4298 		*unused |= BIT(INNER_VLAN_TAG_FST);
4299 	}
4300 
4301 	if (fs->flow_type & FLOW_MAC_EXT) {
4302 		if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4303 			return -EOPNOTSUPP;
4304 
4305 		if (is_zero_ether_addr(fs->h_ext.h_dest))
4306 			*unused |= BIT(INNER_DST_MAC);
4307 		else
4308 			*unused &= ~(BIT(INNER_DST_MAC));
4309 	}
4310 
4311 	return 0;
4312 }
4313 
4314 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4315 {
4316 	struct hclge_fd_rule *rule = NULL;
4317 	struct hlist_node *node2;
4318 
4319 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4320 		if (rule->location >= location)
4321 			break;
4322 	}
4323 
4324 	return  rule && rule->location == location;
4325 }
4326 
4327 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4328 				     struct hclge_fd_rule *new_rule,
4329 				     u16 location,
4330 				     bool is_add)
4331 {
4332 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
4333 	struct hlist_node *node2;
4334 
4335 	if (is_add && !new_rule)
4336 		return -EINVAL;
4337 
4338 	hlist_for_each_entry_safe(rule, node2,
4339 				  &hdev->fd_rule_list, rule_node) {
4340 		if (rule->location >= location)
4341 			break;
4342 		parent = rule;
4343 	}
4344 
4345 	if (rule && rule->location == location) {
4346 		hlist_del(&rule->rule_node);
4347 		kfree(rule);
4348 		hdev->hclge_fd_rule_num--;
4349 
4350 		if (!is_add)
4351 			return 0;
4352 
4353 	} else if (!is_add) {
4354 		dev_err(&hdev->pdev->dev,
4355 			"delete fail, rule %d is inexistent\n",
4356 			location);
4357 		return -EINVAL;
4358 	}
4359 
4360 	INIT_HLIST_NODE(&new_rule->rule_node);
4361 
4362 	if (parent)
4363 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4364 	else
4365 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4366 
4367 	hdev->hclge_fd_rule_num++;
4368 
4369 	return 0;
4370 }
4371 
4372 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4373 			      struct ethtool_rx_flow_spec *fs,
4374 			      struct hclge_fd_rule *rule)
4375 {
4376 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4377 
4378 	switch (flow_type) {
4379 	case SCTP_V4_FLOW:
4380 	case TCP_V4_FLOW:
4381 	case UDP_V4_FLOW:
4382 		rule->tuples.src_ip[3] =
4383 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4384 		rule->tuples_mask.src_ip[3] =
4385 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4386 
4387 		rule->tuples.dst_ip[3] =
4388 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4389 		rule->tuples_mask.dst_ip[3] =
4390 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4391 
4392 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4393 		rule->tuples_mask.src_port =
4394 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4395 
4396 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4397 		rule->tuples_mask.dst_port =
4398 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4399 
4400 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4401 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4402 
4403 		rule->tuples.ether_proto = ETH_P_IP;
4404 		rule->tuples_mask.ether_proto = 0xFFFF;
4405 
4406 		break;
4407 	case IP_USER_FLOW:
4408 		rule->tuples.src_ip[3] =
4409 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4410 		rule->tuples_mask.src_ip[3] =
4411 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4412 
4413 		rule->tuples.dst_ip[3] =
4414 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4415 		rule->tuples_mask.dst_ip[3] =
4416 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4417 
4418 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4419 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4420 
4421 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4422 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4423 
4424 		rule->tuples.ether_proto = ETH_P_IP;
4425 		rule->tuples_mask.ether_proto = 0xFFFF;
4426 
4427 		break;
4428 	case SCTP_V6_FLOW:
4429 	case TCP_V6_FLOW:
4430 	case UDP_V6_FLOW:
4431 		be32_to_cpu_array(rule->tuples.src_ip,
4432 				  fs->h_u.tcp_ip6_spec.ip6src, 4);
4433 		be32_to_cpu_array(rule->tuples_mask.src_ip,
4434 				  fs->m_u.tcp_ip6_spec.ip6src, 4);
4435 
4436 		be32_to_cpu_array(rule->tuples.dst_ip,
4437 				  fs->h_u.tcp_ip6_spec.ip6dst, 4);
4438 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
4439 				  fs->m_u.tcp_ip6_spec.ip6dst, 4);
4440 
4441 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4442 		rule->tuples_mask.src_port =
4443 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4444 
4445 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4446 		rule->tuples_mask.dst_port =
4447 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4448 
4449 		rule->tuples.ether_proto = ETH_P_IPV6;
4450 		rule->tuples_mask.ether_proto = 0xFFFF;
4451 
4452 		break;
4453 	case IPV6_USER_FLOW:
4454 		be32_to_cpu_array(rule->tuples.src_ip,
4455 				  fs->h_u.usr_ip6_spec.ip6src, 4);
4456 		be32_to_cpu_array(rule->tuples_mask.src_ip,
4457 				  fs->m_u.usr_ip6_spec.ip6src, 4);
4458 
4459 		be32_to_cpu_array(rule->tuples.dst_ip,
4460 				  fs->h_u.usr_ip6_spec.ip6dst, 4);
4461 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
4462 				  fs->m_u.usr_ip6_spec.ip6dst, 4);
4463 
4464 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4465 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4466 
4467 		rule->tuples.ether_proto = ETH_P_IPV6;
4468 		rule->tuples_mask.ether_proto = 0xFFFF;
4469 
4470 		break;
4471 	case ETHER_FLOW:
4472 		ether_addr_copy(rule->tuples.src_mac,
4473 				fs->h_u.ether_spec.h_source);
4474 		ether_addr_copy(rule->tuples_mask.src_mac,
4475 				fs->m_u.ether_spec.h_source);
4476 
4477 		ether_addr_copy(rule->tuples.dst_mac,
4478 				fs->h_u.ether_spec.h_dest);
4479 		ether_addr_copy(rule->tuples_mask.dst_mac,
4480 				fs->m_u.ether_spec.h_dest);
4481 
4482 		rule->tuples.ether_proto =
4483 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
4484 		rule->tuples_mask.ether_proto =
4485 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
4486 
4487 		break;
4488 	default:
4489 		return -EOPNOTSUPP;
4490 	}
4491 
4492 	switch (flow_type) {
4493 	case SCTP_V4_FLOW:
4494 	case SCTP_V6_FLOW:
4495 		rule->tuples.ip_proto = IPPROTO_SCTP;
4496 		rule->tuples_mask.ip_proto = 0xFF;
4497 		break;
4498 	case TCP_V4_FLOW:
4499 	case TCP_V6_FLOW:
4500 		rule->tuples.ip_proto = IPPROTO_TCP;
4501 		rule->tuples_mask.ip_proto = 0xFF;
4502 		break;
4503 	case UDP_V4_FLOW:
4504 	case UDP_V6_FLOW:
4505 		rule->tuples.ip_proto = IPPROTO_UDP;
4506 		rule->tuples_mask.ip_proto = 0xFF;
4507 		break;
4508 	default:
4509 		break;
4510 	}
4511 
4512 	if ((fs->flow_type & FLOW_EXT)) {
4513 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4514 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4515 	}
4516 
4517 	if (fs->flow_type & FLOW_MAC_EXT) {
4518 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4519 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4520 	}
4521 
4522 	return 0;
4523 }
4524 
4525 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4526 			      struct ethtool_rxnfc *cmd)
4527 {
4528 	struct hclge_vport *vport = hclge_get_vport(handle);
4529 	struct hclge_dev *hdev = vport->back;
4530 	u16 dst_vport_id = 0, q_index = 0;
4531 	struct ethtool_rx_flow_spec *fs;
4532 	struct hclge_fd_rule *rule;
4533 	u32 unused = 0;
4534 	u8 action;
4535 	int ret;
4536 
4537 	if (!hnae3_dev_fd_supported(hdev))
4538 		return -EOPNOTSUPP;
4539 
4540 	if (!hdev->fd_cfg.fd_en) {
4541 		dev_warn(&hdev->pdev->dev,
4542 			 "Please enable flow director first\n");
4543 		return -EOPNOTSUPP;
4544 	}
4545 
4546 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4547 
4548 	ret = hclge_fd_check_spec(hdev, fs, &unused);
4549 	if (ret) {
4550 		dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4551 		return ret;
4552 	}
4553 
4554 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4555 		action = HCLGE_FD_ACTION_DROP_PACKET;
4556 	} else {
4557 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4558 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4559 		u16 tqps;
4560 
4561 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4562 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4563 
4564 		if (ring >= tqps) {
4565 			dev_err(&hdev->pdev->dev,
4566 				"Error: queue id (%d) > max tqp num (%d)\n",
4567 				ring, tqps - 1);
4568 			return -EINVAL;
4569 		}
4570 
4571 		if (vf > hdev->num_req_vfs) {
4572 			dev_err(&hdev->pdev->dev,
4573 				"Error: vf id (%d) > max vf num (%d)\n",
4574 				vf, hdev->num_req_vfs);
4575 			return -EINVAL;
4576 		}
4577 
4578 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4579 		q_index = ring;
4580 	}
4581 
4582 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4583 	if (!rule)
4584 		return -ENOMEM;
4585 
4586 	ret = hclge_fd_get_tuple(hdev, fs, rule);
4587 	if (ret)
4588 		goto free_rule;
4589 
4590 	rule->flow_type = fs->flow_type;
4591 
4592 	rule->location = fs->location;
4593 	rule->unused_tuple = unused;
4594 	rule->vf_id = dst_vport_id;
4595 	rule->queue_id = q_index;
4596 	rule->action = action;
4597 
4598 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4599 	if (ret)
4600 		goto free_rule;
4601 
4602 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4603 	if (ret)
4604 		goto free_rule;
4605 
4606 	ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4607 	if (ret)
4608 		goto free_rule;
4609 
4610 	return ret;
4611 
4612 free_rule:
4613 	kfree(rule);
4614 	return ret;
4615 }
4616 
4617 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4618 			      struct ethtool_rxnfc *cmd)
4619 {
4620 	struct hclge_vport *vport = hclge_get_vport(handle);
4621 	struct hclge_dev *hdev = vport->back;
4622 	struct ethtool_rx_flow_spec *fs;
4623 	int ret;
4624 
4625 	if (!hnae3_dev_fd_supported(hdev))
4626 		return -EOPNOTSUPP;
4627 
4628 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4629 
4630 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4631 		return -EINVAL;
4632 
4633 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
4634 		dev_err(&hdev->pdev->dev,
4635 			"Delete fail, rule %d is inexistent\n",
4636 			fs->location);
4637 		return -ENOENT;
4638 	}
4639 
4640 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4641 				   fs->location, NULL, false);
4642 	if (ret)
4643 		return ret;
4644 
4645 	return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4646 					 false);
4647 }
4648 
4649 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4650 				     bool clear_list)
4651 {
4652 	struct hclge_vport *vport = hclge_get_vport(handle);
4653 	struct hclge_dev *hdev = vport->back;
4654 	struct hclge_fd_rule *rule;
4655 	struct hlist_node *node;
4656 
4657 	if (!hnae3_dev_fd_supported(hdev))
4658 		return;
4659 
4660 	if (clear_list) {
4661 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4662 					  rule_node) {
4663 			hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4664 					     rule->location, NULL, false);
4665 			hlist_del(&rule->rule_node);
4666 			kfree(rule);
4667 			hdev->hclge_fd_rule_num--;
4668 		}
4669 	} else {
4670 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4671 					  rule_node)
4672 			hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4673 					     rule->location, NULL, false);
4674 	}
4675 }
4676 
4677 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4678 {
4679 	struct hclge_vport *vport = hclge_get_vport(handle);
4680 	struct hclge_dev *hdev = vport->back;
4681 	struct hclge_fd_rule *rule;
4682 	struct hlist_node *node;
4683 	int ret;
4684 
4685 	/* Return ok here, because reset error handling will check this
4686 	 * return value. If error is returned here, the reset process will
4687 	 * fail.
4688 	 */
4689 	if (!hnae3_dev_fd_supported(hdev))
4690 		return 0;
4691 
4692 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4693 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4694 		if (!ret)
4695 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4696 
4697 		if (ret) {
4698 			dev_warn(&hdev->pdev->dev,
4699 				 "Restore rule %d failed, remove it\n",
4700 				 rule->location);
4701 			hlist_del(&rule->rule_node);
4702 			kfree(rule);
4703 			hdev->hclge_fd_rule_num--;
4704 		}
4705 	}
4706 	return 0;
4707 }
4708 
4709 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4710 				 struct ethtool_rxnfc *cmd)
4711 {
4712 	struct hclge_vport *vport = hclge_get_vport(handle);
4713 	struct hclge_dev *hdev = vport->back;
4714 
4715 	if (!hnae3_dev_fd_supported(hdev))
4716 		return -EOPNOTSUPP;
4717 
4718 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
4719 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4720 
4721 	return 0;
4722 }
4723 
4724 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
4725 				  struct ethtool_rxnfc *cmd)
4726 {
4727 	struct hclge_vport *vport = hclge_get_vport(handle);
4728 	struct hclge_fd_rule *rule = NULL;
4729 	struct hclge_dev *hdev = vport->back;
4730 	struct ethtool_rx_flow_spec *fs;
4731 	struct hlist_node *node2;
4732 
4733 	if (!hnae3_dev_fd_supported(hdev))
4734 		return -EOPNOTSUPP;
4735 
4736 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4737 
4738 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4739 		if (rule->location >= fs->location)
4740 			break;
4741 	}
4742 
4743 	if (!rule || fs->location != rule->location)
4744 		return -ENOENT;
4745 
4746 	fs->flow_type = rule->flow_type;
4747 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4748 	case SCTP_V4_FLOW:
4749 	case TCP_V4_FLOW:
4750 	case UDP_V4_FLOW:
4751 		fs->h_u.tcp_ip4_spec.ip4src =
4752 				cpu_to_be32(rule->tuples.src_ip[3]);
4753 		fs->m_u.tcp_ip4_spec.ip4src =
4754 				rule->unused_tuple & BIT(INNER_SRC_IP) ?
4755 				0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4756 
4757 		fs->h_u.tcp_ip4_spec.ip4dst =
4758 				cpu_to_be32(rule->tuples.dst_ip[3]);
4759 		fs->m_u.tcp_ip4_spec.ip4dst =
4760 				rule->unused_tuple & BIT(INNER_DST_IP) ?
4761 				0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4762 
4763 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4764 		fs->m_u.tcp_ip4_spec.psrc =
4765 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4766 				0 : cpu_to_be16(rule->tuples_mask.src_port);
4767 
4768 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4769 		fs->m_u.tcp_ip4_spec.pdst =
4770 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
4771 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
4772 
4773 		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
4774 		fs->m_u.tcp_ip4_spec.tos =
4775 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
4776 				0 : rule->tuples_mask.ip_tos;
4777 
4778 		break;
4779 	case IP_USER_FLOW:
4780 		fs->h_u.usr_ip4_spec.ip4src =
4781 				cpu_to_be32(rule->tuples.src_ip[3]);
4782 		fs->m_u.tcp_ip4_spec.ip4src =
4783 				rule->unused_tuple & BIT(INNER_SRC_IP) ?
4784 				0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4785 
4786 		fs->h_u.usr_ip4_spec.ip4dst =
4787 				cpu_to_be32(rule->tuples.dst_ip[3]);
4788 		fs->m_u.usr_ip4_spec.ip4dst =
4789 				rule->unused_tuple & BIT(INNER_DST_IP) ?
4790 				0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4791 
4792 		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
4793 		fs->m_u.usr_ip4_spec.tos =
4794 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
4795 				0 : rule->tuples_mask.ip_tos;
4796 
4797 		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
4798 		fs->m_u.usr_ip4_spec.proto =
4799 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
4800 				0 : rule->tuples_mask.ip_proto;
4801 
4802 		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
4803 
4804 		break;
4805 	case SCTP_V6_FLOW:
4806 	case TCP_V6_FLOW:
4807 	case UDP_V6_FLOW:
4808 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
4809 				  rule->tuples.src_ip, 4);
4810 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
4811 			memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
4812 		else
4813 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
4814 					  rule->tuples_mask.src_ip, 4);
4815 
4816 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
4817 				  rule->tuples.dst_ip, 4);
4818 		if (rule->unused_tuple & BIT(INNER_DST_IP))
4819 			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
4820 		else
4821 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
4822 					  rule->tuples_mask.dst_ip, 4);
4823 
4824 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4825 		fs->m_u.tcp_ip6_spec.psrc =
4826 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4827 				0 : cpu_to_be16(rule->tuples_mask.src_port);
4828 
4829 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4830 		fs->m_u.tcp_ip6_spec.pdst =
4831 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
4832 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
4833 
4834 		break;
4835 	case IPV6_USER_FLOW:
4836 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
4837 				  rule->tuples.src_ip, 4);
4838 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
4839 			memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
4840 		else
4841 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
4842 					  rule->tuples_mask.src_ip, 4);
4843 
4844 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
4845 				  rule->tuples.dst_ip, 4);
4846 		if (rule->unused_tuple & BIT(INNER_DST_IP))
4847 			memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
4848 		else
4849 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
4850 					  rule->tuples_mask.dst_ip, 4);
4851 
4852 		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
4853 		fs->m_u.usr_ip6_spec.l4_proto =
4854 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
4855 				0 : rule->tuples_mask.ip_proto;
4856 
4857 		break;
4858 	case ETHER_FLOW:
4859 		ether_addr_copy(fs->h_u.ether_spec.h_source,
4860 				rule->tuples.src_mac);
4861 		if (rule->unused_tuple & BIT(INNER_SRC_MAC))
4862 			eth_zero_addr(fs->m_u.ether_spec.h_source);
4863 		else
4864 			ether_addr_copy(fs->m_u.ether_spec.h_source,
4865 					rule->tuples_mask.src_mac);
4866 
4867 		ether_addr_copy(fs->h_u.ether_spec.h_dest,
4868 				rule->tuples.dst_mac);
4869 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
4870 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
4871 		else
4872 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
4873 					rule->tuples_mask.dst_mac);
4874 
4875 		fs->h_u.ether_spec.h_proto =
4876 				cpu_to_be16(rule->tuples.ether_proto);
4877 		fs->m_u.ether_spec.h_proto =
4878 				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
4879 				0 : cpu_to_be16(rule->tuples_mask.ether_proto);
4880 
4881 		break;
4882 	default:
4883 		return -EOPNOTSUPP;
4884 	}
4885 
4886 	if (fs->flow_type & FLOW_EXT) {
4887 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
4888 		fs->m_ext.vlan_tci =
4889 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
4890 				cpu_to_be16(VLAN_VID_MASK) :
4891 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
4892 	}
4893 
4894 	if (fs->flow_type & FLOW_MAC_EXT) {
4895 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
4896 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
4897 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
4898 		else
4899 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
4900 					rule->tuples_mask.dst_mac);
4901 	}
4902 
4903 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4904 		fs->ring_cookie = RX_CLS_FLOW_DISC;
4905 	} else {
4906 		u64 vf_id;
4907 
4908 		fs->ring_cookie = rule->queue_id;
4909 		vf_id = rule->vf_id;
4910 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
4911 		fs->ring_cookie |= vf_id;
4912 	}
4913 
4914 	return 0;
4915 }
4916 
4917 static int hclge_get_all_rules(struct hnae3_handle *handle,
4918 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
4919 {
4920 	struct hclge_vport *vport = hclge_get_vport(handle);
4921 	struct hclge_dev *hdev = vport->back;
4922 	struct hclge_fd_rule *rule;
4923 	struct hlist_node *node2;
4924 	int cnt = 0;
4925 
4926 	if (!hnae3_dev_fd_supported(hdev))
4927 		return -EOPNOTSUPP;
4928 
4929 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4930 
4931 	hlist_for_each_entry_safe(rule, node2,
4932 				  &hdev->fd_rule_list, rule_node) {
4933 		if (cnt == cmd->rule_cnt)
4934 			return -EMSGSIZE;
4935 
4936 		rule_locs[cnt] = rule->location;
4937 		cnt++;
4938 	}
4939 
4940 	cmd->rule_cnt = cnt;
4941 
4942 	return 0;
4943 }
4944 
4945 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
4946 {
4947 	struct hclge_vport *vport = hclge_get_vport(handle);
4948 	struct hclge_dev *hdev = vport->back;
4949 
4950 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
4951 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
4952 }
4953 
4954 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
4955 {
4956 	struct hclge_vport *vport = hclge_get_vport(handle);
4957 	struct hclge_dev *hdev = vport->back;
4958 
4959 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4960 }
4961 
4962 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
4963 {
4964 	struct hclge_vport *vport = hclge_get_vport(handle);
4965 	struct hclge_dev *hdev = vport->back;
4966 
4967 	return hdev->reset_count;
4968 }
4969 
4970 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
4971 {
4972 	struct hclge_vport *vport = hclge_get_vport(handle);
4973 	struct hclge_dev *hdev = vport->back;
4974 
4975 	hdev->fd_cfg.fd_en = enable;
4976 	if (!enable)
4977 		hclge_del_all_fd_entries(handle, false);
4978 	else
4979 		hclge_restore_fd_entries(handle);
4980 }
4981 
4982 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
4983 {
4984 	struct hclge_desc desc;
4985 	struct hclge_config_mac_mode_cmd *req =
4986 		(struct hclge_config_mac_mode_cmd *)desc.data;
4987 	u32 loop_en = 0;
4988 	int ret;
4989 
4990 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
4991 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
4992 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
4993 	hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
4994 	hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
4995 	hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
4996 	hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
4997 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
4998 	hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
4999 	hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5000 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5001 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5002 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5003 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5004 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5005 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5006 
5007 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5008 	if (ret)
5009 		dev_err(&hdev->pdev->dev,
5010 			"mac enable fail, ret =%d.\n", ret);
5011 }
5012 
5013 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5014 {
5015 	struct hclge_config_mac_mode_cmd *req;
5016 	struct hclge_desc desc;
5017 	u32 loop_en;
5018 	int ret;
5019 
5020 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5021 	/* 1 Read out the MAC mode config at first */
5022 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5023 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5024 	if (ret) {
5025 		dev_err(&hdev->pdev->dev,
5026 			"mac loopback get fail, ret =%d.\n", ret);
5027 		return ret;
5028 	}
5029 
5030 	/* 2 Then setup the loopback flag */
5031 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5032 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5033 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5034 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5035 
5036 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5037 
5038 	/* 3 Config mac work mode with loopback flag
5039 	 * and its original configure parameters
5040 	 */
5041 	hclge_cmd_reuse_desc(&desc, false);
5042 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5043 	if (ret)
5044 		dev_err(&hdev->pdev->dev,
5045 			"mac loopback set fail, ret =%d.\n", ret);
5046 	return ret;
5047 }
5048 
5049 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5050 				     enum hnae3_loop loop_mode)
5051 {
5052 #define HCLGE_SERDES_RETRY_MS	10
5053 #define HCLGE_SERDES_RETRY_NUM	100
5054 	struct hclge_serdes_lb_cmd *req;
5055 	struct hclge_desc desc;
5056 	int ret, i = 0;
5057 	u8 loop_mode_b;
5058 
5059 	req = (struct hclge_serdes_lb_cmd *)desc.data;
5060 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5061 
5062 	switch (loop_mode) {
5063 	case HNAE3_LOOP_SERIAL_SERDES:
5064 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5065 		break;
5066 	case HNAE3_LOOP_PARALLEL_SERDES:
5067 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5068 		break;
5069 	default:
5070 		dev_err(&hdev->pdev->dev,
5071 			"unsupported serdes loopback mode %d\n", loop_mode);
5072 		return -ENOTSUPP;
5073 	}
5074 
5075 	if (en) {
5076 		req->enable = loop_mode_b;
5077 		req->mask = loop_mode_b;
5078 	} else {
5079 		req->mask = loop_mode_b;
5080 	}
5081 
5082 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5083 	if (ret) {
5084 		dev_err(&hdev->pdev->dev,
5085 			"serdes loopback set fail, ret = %d\n", ret);
5086 		return ret;
5087 	}
5088 
5089 	do {
5090 		msleep(HCLGE_SERDES_RETRY_MS);
5091 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5092 					   true);
5093 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5094 		if (ret) {
5095 			dev_err(&hdev->pdev->dev,
5096 				"serdes loopback get, ret = %d\n", ret);
5097 			return ret;
5098 		}
5099 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
5100 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5101 
5102 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5103 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5104 		return -EBUSY;
5105 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5106 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5107 		return -EIO;
5108 	}
5109 
5110 	hclge_cfg_mac_mode(hdev, en);
5111 	return 0;
5112 }
5113 
5114 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5115 			    int stream_id, bool enable)
5116 {
5117 	struct hclge_desc desc;
5118 	struct hclge_cfg_com_tqp_queue_cmd *req =
5119 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5120 	int ret;
5121 
5122 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5123 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5124 	req->stream_id = cpu_to_le16(stream_id);
5125 	req->enable |= enable << HCLGE_TQP_ENABLE_B;
5126 
5127 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5128 	if (ret)
5129 		dev_err(&hdev->pdev->dev,
5130 			"Tqp enable fail, status =%d.\n", ret);
5131 	return ret;
5132 }
5133 
5134 static int hclge_set_loopback(struct hnae3_handle *handle,
5135 			      enum hnae3_loop loop_mode, bool en)
5136 {
5137 	struct hclge_vport *vport = hclge_get_vport(handle);
5138 	struct hclge_dev *hdev = vport->back;
5139 	int i, ret;
5140 
5141 	switch (loop_mode) {
5142 	case HNAE3_LOOP_APP:
5143 		ret = hclge_set_app_loopback(hdev, en);
5144 		break;
5145 	case HNAE3_LOOP_SERIAL_SERDES:
5146 	case HNAE3_LOOP_PARALLEL_SERDES:
5147 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5148 		break;
5149 	default:
5150 		ret = -ENOTSUPP;
5151 		dev_err(&hdev->pdev->dev,
5152 			"loop_mode %d is not supported\n", loop_mode);
5153 		break;
5154 	}
5155 
5156 	for (i = 0; i < vport->alloc_tqps; i++) {
5157 		ret = hclge_tqp_enable(hdev, i, 0, en);
5158 		if (ret)
5159 			return ret;
5160 	}
5161 
5162 	return 0;
5163 }
5164 
5165 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5166 {
5167 	struct hclge_vport *vport = hclge_get_vport(handle);
5168 	struct hnae3_queue *queue;
5169 	struct hclge_tqp *tqp;
5170 	int i;
5171 
5172 	for (i = 0; i < vport->alloc_tqps; i++) {
5173 		queue = handle->kinfo.tqp[i];
5174 		tqp = container_of(queue, struct hclge_tqp, q);
5175 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5176 	}
5177 }
5178 
5179 static int hclge_ae_start(struct hnae3_handle *handle)
5180 {
5181 	struct hclge_vport *vport = hclge_get_vport(handle);
5182 	struct hclge_dev *hdev = vport->back;
5183 
5184 	/* mac enable */
5185 	hclge_cfg_mac_mode(hdev, true);
5186 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5187 	mod_timer(&hdev->service_timer, jiffies + HZ);
5188 	hdev->hw.mac.link = 0;
5189 
5190 	/* reset tqp stats */
5191 	hclge_reset_tqp_stats(handle);
5192 
5193 	hclge_mac_start_phy(hdev);
5194 
5195 	return 0;
5196 }
5197 
5198 static void hclge_ae_stop(struct hnae3_handle *handle)
5199 {
5200 	struct hclge_vport *vport = hclge_get_vport(handle);
5201 	struct hclge_dev *hdev = vport->back;
5202 
5203 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
5204 
5205 	del_timer_sync(&hdev->service_timer);
5206 	cancel_work_sync(&hdev->service_task);
5207 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5208 
5209 	/* If it is not PF reset, the firmware will disable the MAC,
5210 	 * so it only need to stop phy here.
5211 	 */
5212 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5213 	    hdev->reset_type != HNAE3_FUNC_RESET) {
5214 		hclge_mac_stop_phy(hdev);
5215 		return;
5216 	}
5217 
5218 	/* Mac disable */
5219 	hclge_cfg_mac_mode(hdev, false);
5220 
5221 	hclge_mac_stop_phy(hdev);
5222 
5223 	/* reset tqp stats */
5224 	hclge_reset_tqp_stats(handle);
5225 	del_timer_sync(&hdev->service_timer);
5226 	cancel_work_sync(&hdev->service_task);
5227 	hclge_update_link_status(hdev);
5228 }
5229 
5230 int hclge_vport_start(struct hclge_vport *vport)
5231 {
5232 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5233 	vport->last_active_jiffies = jiffies;
5234 	return 0;
5235 }
5236 
5237 void hclge_vport_stop(struct hclge_vport *vport)
5238 {
5239 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5240 }
5241 
5242 static int hclge_client_start(struct hnae3_handle *handle)
5243 {
5244 	struct hclge_vport *vport = hclge_get_vport(handle);
5245 
5246 	return hclge_vport_start(vport);
5247 }
5248 
5249 static void hclge_client_stop(struct hnae3_handle *handle)
5250 {
5251 	struct hclge_vport *vport = hclge_get_vport(handle);
5252 
5253 	hclge_vport_stop(vport);
5254 }
5255 
5256 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5257 					 u16 cmdq_resp, u8  resp_code,
5258 					 enum hclge_mac_vlan_tbl_opcode op)
5259 {
5260 	struct hclge_dev *hdev = vport->back;
5261 	int return_status = -EIO;
5262 
5263 	if (cmdq_resp) {
5264 		dev_err(&hdev->pdev->dev,
5265 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5266 			cmdq_resp);
5267 		return -EIO;
5268 	}
5269 
5270 	if (op == HCLGE_MAC_VLAN_ADD) {
5271 		if ((!resp_code) || (resp_code == 1)) {
5272 			return_status = 0;
5273 		} else if (resp_code == 2) {
5274 			return_status = -ENOSPC;
5275 			dev_err(&hdev->pdev->dev,
5276 				"add mac addr failed for uc_overflow.\n");
5277 		} else if (resp_code == 3) {
5278 			return_status = -ENOSPC;
5279 			dev_err(&hdev->pdev->dev,
5280 				"add mac addr failed for mc_overflow.\n");
5281 		} else {
5282 			dev_err(&hdev->pdev->dev,
5283 				"add mac addr failed for undefined, code=%d.\n",
5284 				resp_code);
5285 		}
5286 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
5287 		if (!resp_code) {
5288 			return_status = 0;
5289 		} else if (resp_code == 1) {
5290 			return_status = -ENOENT;
5291 			dev_dbg(&hdev->pdev->dev,
5292 				"remove mac addr failed for miss.\n");
5293 		} else {
5294 			dev_err(&hdev->pdev->dev,
5295 				"remove mac addr failed for undefined, code=%d.\n",
5296 				resp_code);
5297 		}
5298 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
5299 		if (!resp_code) {
5300 			return_status = 0;
5301 		} else if (resp_code == 1) {
5302 			return_status = -ENOENT;
5303 			dev_dbg(&hdev->pdev->dev,
5304 				"lookup mac addr failed for miss.\n");
5305 		} else {
5306 			dev_err(&hdev->pdev->dev,
5307 				"lookup mac addr failed for undefined, code=%d.\n",
5308 				resp_code);
5309 		}
5310 	} else {
5311 		return_status = -EINVAL;
5312 		dev_err(&hdev->pdev->dev,
5313 			"unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5314 			op);
5315 	}
5316 
5317 	return return_status;
5318 }
5319 
5320 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5321 {
5322 	int word_num;
5323 	int bit_num;
5324 
5325 	if (vfid > 255 || vfid < 0)
5326 		return -EIO;
5327 
5328 	if (vfid >= 0 && vfid <= 191) {
5329 		word_num = vfid / 32;
5330 		bit_num  = vfid % 32;
5331 		if (clr)
5332 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5333 		else
5334 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5335 	} else {
5336 		word_num = (vfid - 192) / 32;
5337 		bit_num  = vfid % 32;
5338 		if (clr)
5339 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5340 		else
5341 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5342 	}
5343 
5344 	return 0;
5345 }
5346 
5347 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5348 {
5349 #define HCLGE_DESC_NUMBER 3
5350 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5351 	int i, j;
5352 
5353 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5354 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5355 			if (desc[i].data[j])
5356 				return false;
5357 
5358 	return true;
5359 }
5360 
5361 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5362 				   const u8 *addr)
5363 {
5364 	const unsigned char *mac_addr = addr;
5365 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5366 		       (mac_addr[0]) | (mac_addr[1] << 8);
5367 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
5368 
5369 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5370 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5371 }
5372 
5373 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5374 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
5375 {
5376 	struct hclge_dev *hdev = vport->back;
5377 	struct hclge_desc desc;
5378 	u8 resp_code;
5379 	u16 retval;
5380 	int ret;
5381 
5382 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5383 
5384 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5385 
5386 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5387 	if (ret) {
5388 		dev_err(&hdev->pdev->dev,
5389 			"del mac addr failed for cmd_send, ret =%d.\n",
5390 			ret);
5391 		return ret;
5392 	}
5393 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5394 	retval = le16_to_cpu(desc.retval);
5395 
5396 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5397 					     HCLGE_MAC_VLAN_REMOVE);
5398 }
5399 
5400 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5401 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
5402 				     struct hclge_desc *desc,
5403 				     bool is_mc)
5404 {
5405 	struct hclge_dev *hdev = vport->back;
5406 	u8 resp_code;
5407 	u16 retval;
5408 	int ret;
5409 
5410 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5411 	if (is_mc) {
5412 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5413 		memcpy(desc[0].data,
5414 		       req,
5415 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5416 		hclge_cmd_setup_basic_desc(&desc[1],
5417 					   HCLGE_OPC_MAC_VLAN_ADD,
5418 					   true);
5419 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5420 		hclge_cmd_setup_basic_desc(&desc[2],
5421 					   HCLGE_OPC_MAC_VLAN_ADD,
5422 					   true);
5423 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
5424 	} else {
5425 		memcpy(desc[0].data,
5426 		       req,
5427 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5428 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
5429 	}
5430 	if (ret) {
5431 		dev_err(&hdev->pdev->dev,
5432 			"lookup mac addr failed for cmd_send, ret =%d.\n",
5433 			ret);
5434 		return ret;
5435 	}
5436 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5437 	retval = le16_to_cpu(desc[0].retval);
5438 
5439 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5440 					     HCLGE_MAC_VLAN_LKUP);
5441 }
5442 
5443 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5444 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
5445 				  struct hclge_desc *mc_desc)
5446 {
5447 	struct hclge_dev *hdev = vport->back;
5448 	int cfg_status;
5449 	u8 resp_code;
5450 	u16 retval;
5451 	int ret;
5452 
5453 	if (!mc_desc) {
5454 		struct hclge_desc desc;
5455 
5456 		hclge_cmd_setup_basic_desc(&desc,
5457 					   HCLGE_OPC_MAC_VLAN_ADD,
5458 					   false);
5459 		memcpy(desc.data, req,
5460 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5461 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5462 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5463 		retval = le16_to_cpu(desc.retval);
5464 
5465 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5466 							   resp_code,
5467 							   HCLGE_MAC_VLAN_ADD);
5468 	} else {
5469 		hclge_cmd_reuse_desc(&mc_desc[0], false);
5470 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5471 		hclge_cmd_reuse_desc(&mc_desc[1], false);
5472 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5473 		hclge_cmd_reuse_desc(&mc_desc[2], false);
5474 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5475 		memcpy(mc_desc[0].data, req,
5476 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5477 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5478 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5479 		retval = le16_to_cpu(mc_desc[0].retval);
5480 
5481 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5482 							   resp_code,
5483 							   HCLGE_MAC_VLAN_ADD);
5484 	}
5485 
5486 	if (ret) {
5487 		dev_err(&hdev->pdev->dev,
5488 			"add mac addr failed for cmd_send, ret =%d.\n",
5489 			ret);
5490 		return ret;
5491 	}
5492 
5493 	return cfg_status;
5494 }
5495 
5496 static int hclge_init_umv_space(struct hclge_dev *hdev)
5497 {
5498 	u16 allocated_size = 0;
5499 	int ret;
5500 
5501 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5502 				  true);
5503 	if (ret)
5504 		return ret;
5505 
5506 	if (allocated_size < hdev->wanted_umv_size)
5507 		dev_warn(&hdev->pdev->dev,
5508 			 "Alloc umv space failed, want %d, get %d\n",
5509 			 hdev->wanted_umv_size, allocated_size);
5510 
5511 	mutex_init(&hdev->umv_mutex);
5512 	hdev->max_umv_size = allocated_size;
5513 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5514 	hdev->share_umv_size = hdev->priv_umv_size +
5515 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
5516 
5517 	return 0;
5518 }
5519 
5520 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5521 {
5522 	int ret;
5523 
5524 	if (hdev->max_umv_size > 0) {
5525 		ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5526 					  false);
5527 		if (ret)
5528 			return ret;
5529 		hdev->max_umv_size = 0;
5530 	}
5531 	mutex_destroy(&hdev->umv_mutex);
5532 
5533 	return 0;
5534 }
5535 
5536 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5537 			       u16 *allocated_size, bool is_alloc)
5538 {
5539 	struct hclge_umv_spc_alc_cmd *req;
5540 	struct hclge_desc desc;
5541 	int ret;
5542 
5543 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5544 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5545 	hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5546 	req->space_size = cpu_to_le32(space_size);
5547 
5548 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5549 	if (ret) {
5550 		dev_err(&hdev->pdev->dev,
5551 			"%s umv space failed for cmd_send, ret =%d\n",
5552 			is_alloc ? "allocate" : "free", ret);
5553 		return ret;
5554 	}
5555 
5556 	if (is_alloc && allocated_size)
5557 		*allocated_size = le32_to_cpu(desc.data[1]);
5558 
5559 	return 0;
5560 }
5561 
5562 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5563 {
5564 	struct hclge_vport *vport;
5565 	int i;
5566 
5567 	for (i = 0; i < hdev->num_alloc_vport; i++) {
5568 		vport = &hdev->vport[i];
5569 		vport->used_umv_num = 0;
5570 	}
5571 
5572 	mutex_lock(&hdev->umv_mutex);
5573 	hdev->share_umv_size = hdev->priv_umv_size +
5574 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
5575 	mutex_unlock(&hdev->umv_mutex);
5576 }
5577 
5578 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5579 {
5580 	struct hclge_dev *hdev = vport->back;
5581 	bool is_full;
5582 
5583 	mutex_lock(&hdev->umv_mutex);
5584 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5585 		   hdev->share_umv_size == 0);
5586 	mutex_unlock(&hdev->umv_mutex);
5587 
5588 	return is_full;
5589 }
5590 
5591 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5592 {
5593 	struct hclge_dev *hdev = vport->back;
5594 
5595 	mutex_lock(&hdev->umv_mutex);
5596 	if (is_free) {
5597 		if (vport->used_umv_num > hdev->priv_umv_size)
5598 			hdev->share_umv_size++;
5599 		vport->used_umv_num--;
5600 	} else {
5601 		if (vport->used_umv_num >= hdev->priv_umv_size)
5602 			hdev->share_umv_size--;
5603 		vport->used_umv_num++;
5604 	}
5605 	mutex_unlock(&hdev->umv_mutex);
5606 }
5607 
5608 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5609 			     const unsigned char *addr)
5610 {
5611 	struct hclge_vport *vport = hclge_get_vport(handle);
5612 
5613 	return hclge_add_uc_addr_common(vport, addr);
5614 }
5615 
5616 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5617 			     const unsigned char *addr)
5618 {
5619 	struct hclge_dev *hdev = vport->back;
5620 	struct hclge_mac_vlan_tbl_entry_cmd req;
5621 	struct hclge_desc desc;
5622 	u16 egress_port = 0;
5623 	int ret;
5624 
5625 	/* mac addr check */
5626 	if (is_zero_ether_addr(addr) ||
5627 	    is_broadcast_ether_addr(addr) ||
5628 	    is_multicast_ether_addr(addr)) {
5629 		dev_err(&hdev->pdev->dev,
5630 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5631 			 addr,
5632 			 is_zero_ether_addr(addr),
5633 			 is_broadcast_ether_addr(addr),
5634 			 is_multicast_ether_addr(addr));
5635 		return -EINVAL;
5636 	}
5637 
5638 	memset(&req, 0, sizeof(req));
5639 	hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5640 
5641 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5642 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5643 
5644 	req.egress_port = cpu_to_le16(egress_port);
5645 
5646 	hclge_prepare_mac_addr(&req, addr);
5647 
5648 	/* Lookup the mac address in the mac_vlan table, and add
5649 	 * it if the entry is inexistent. Repeated unicast entry
5650 	 * is not allowed in the mac vlan table.
5651 	 */
5652 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5653 	if (ret == -ENOENT) {
5654 		if (!hclge_is_umv_space_full(vport)) {
5655 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5656 			if (!ret)
5657 				hclge_update_umv_space(vport, false);
5658 			return ret;
5659 		}
5660 
5661 		dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5662 			hdev->priv_umv_size);
5663 
5664 		return -ENOSPC;
5665 	}
5666 
5667 	/* check if we just hit the duplicate */
5668 	if (!ret)
5669 		ret = -EINVAL;
5670 
5671 	dev_err(&hdev->pdev->dev,
5672 		"PF failed to add unicast entry(%pM) in the MAC table\n",
5673 		addr);
5674 
5675 	return ret;
5676 }
5677 
5678 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5679 			    const unsigned char *addr)
5680 {
5681 	struct hclge_vport *vport = hclge_get_vport(handle);
5682 
5683 	return hclge_rm_uc_addr_common(vport, addr);
5684 }
5685 
5686 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
5687 			    const unsigned char *addr)
5688 {
5689 	struct hclge_dev *hdev = vport->back;
5690 	struct hclge_mac_vlan_tbl_entry_cmd req;
5691 	int ret;
5692 
5693 	/* mac addr check */
5694 	if (is_zero_ether_addr(addr) ||
5695 	    is_broadcast_ether_addr(addr) ||
5696 	    is_multicast_ether_addr(addr)) {
5697 		dev_dbg(&hdev->pdev->dev,
5698 			"Remove mac err! invalid mac:%pM.\n",
5699 			 addr);
5700 		return -EINVAL;
5701 	}
5702 
5703 	memset(&req, 0, sizeof(req));
5704 	hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5705 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5706 	hclge_prepare_mac_addr(&req, addr);
5707 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
5708 	if (!ret)
5709 		hclge_update_umv_space(vport, true);
5710 
5711 	return ret;
5712 }
5713 
5714 static int hclge_add_mc_addr(struct hnae3_handle *handle,
5715 			     const unsigned char *addr)
5716 {
5717 	struct hclge_vport *vport = hclge_get_vport(handle);
5718 
5719 	return hclge_add_mc_addr_common(vport, addr);
5720 }
5721 
5722 int hclge_add_mc_addr_common(struct hclge_vport *vport,
5723 			     const unsigned char *addr)
5724 {
5725 	struct hclge_dev *hdev = vport->back;
5726 	struct hclge_mac_vlan_tbl_entry_cmd req;
5727 	struct hclge_desc desc[3];
5728 	int status;
5729 
5730 	/* mac addr check */
5731 	if (!is_multicast_ether_addr(addr)) {
5732 		dev_err(&hdev->pdev->dev,
5733 			"Add mc mac err! invalid mac:%pM.\n",
5734 			 addr);
5735 		return -EINVAL;
5736 	}
5737 	memset(&req, 0, sizeof(req));
5738 	hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5739 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5740 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5741 	hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5742 	hclge_prepare_mac_addr(&req, addr);
5743 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5744 	if (!status) {
5745 		/* This mac addr exist, update VFID for it */
5746 		hclge_update_desc_vfid(desc, vport->vport_id, false);
5747 		status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5748 	} else {
5749 		/* This mac addr do not exist, add new entry for it */
5750 		memset(desc[0].data, 0, sizeof(desc[0].data));
5751 		memset(desc[1].data, 0, sizeof(desc[0].data));
5752 		memset(desc[2].data, 0, sizeof(desc[0].data));
5753 		hclge_update_desc_vfid(desc, vport->vport_id, false);
5754 		status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5755 	}
5756 
5757 	if (status == -ENOSPC)
5758 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
5759 
5760 	return status;
5761 }
5762 
5763 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
5764 			    const unsigned char *addr)
5765 {
5766 	struct hclge_vport *vport = hclge_get_vport(handle);
5767 
5768 	return hclge_rm_mc_addr_common(vport, addr);
5769 }
5770 
5771 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
5772 			    const unsigned char *addr)
5773 {
5774 	struct hclge_dev *hdev = vport->back;
5775 	struct hclge_mac_vlan_tbl_entry_cmd req;
5776 	enum hclge_cmd_status status;
5777 	struct hclge_desc desc[3];
5778 
5779 	/* mac addr check */
5780 	if (!is_multicast_ether_addr(addr)) {
5781 		dev_dbg(&hdev->pdev->dev,
5782 			"Remove mc mac err! invalid mac:%pM.\n",
5783 			 addr);
5784 		return -EINVAL;
5785 	}
5786 
5787 	memset(&req, 0, sizeof(req));
5788 	hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5789 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5790 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5791 	hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5792 	hclge_prepare_mac_addr(&req, addr);
5793 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5794 	if (!status) {
5795 		/* This mac addr exist, remove this handle's VFID for it */
5796 		hclge_update_desc_vfid(desc, vport->vport_id, true);
5797 
5798 		if (hclge_is_all_function_id_zero(desc))
5799 			/* All the vfid is zero, so need to delete this entry */
5800 			status = hclge_remove_mac_vlan_tbl(vport, &req);
5801 		else
5802 			/* Not all the vfid is zero, update the vfid */
5803 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5804 
5805 	} else {
5806 		/* Maybe this mac address is in mta table, but it cannot be
5807 		 * deleted here because an entry of mta represents an address
5808 		 * range rather than a specific address. the delete action to
5809 		 * all entries will take effect in update_mta_status called by
5810 		 * hns3_nic_set_rx_mode.
5811 		 */
5812 		status = 0;
5813 	}
5814 
5815 	return status;
5816 }
5817 
5818 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
5819 					      u16 cmdq_resp, u8 resp_code)
5820 {
5821 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
5822 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
5823 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
5824 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
5825 
5826 	int return_status;
5827 
5828 	if (cmdq_resp) {
5829 		dev_err(&hdev->pdev->dev,
5830 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
5831 			cmdq_resp);
5832 		return -EIO;
5833 	}
5834 
5835 	switch (resp_code) {
5836 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
5837 	case HCLGE_ETHERTYPE_ALREADY_ADD:
5838 		return_status = 0;
5839 		break;
5840 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
5841 		dev_err(&hdev->pdev->dev,
5842 			"add mac ethertype failed for manager table overflow.\n");
5843 		return_status = -EIO;
5844 		break;
5845 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
5846 		dev_err(&hdev->pdev->dev,
5847 			"add mac ethertype failed for key conflict.\n");
5848 		return_status = -EIO;
5849 		break;
5850 	default:
5851 		dev_err(&hdev->pdev->dev,
5852 			"add mac ethertype failed for undefined, code=%d.\n",
5853 			resp_code);
5854 		return_status = -EIO;
5855 	}
5856 
5857 	return return_status;
5858 }
5859 
5860 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
5861 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
5862 {
5863 	struct hclge_desc desc;
5864 	u8 resp_code;
5865 	u16 retval;
5866 	int ret;
5867 
5868 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
5869 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
5870 
5871 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5872 	if (ret) {
5873 		dev_err(&hdev->pdev->dev,
5874 			"add mac ethertype failed for cmd_send, ret =%d.\n",
5875 			ret);
5876 		return ret;
5877 	}
5878 
5879 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5880 	retval = le16_to_cpu(desc.retval);
5881 
5882 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
5883 }
5884 
5885 static int init_mgr_tbl(struct hclge_dev *hdev)
5886 {
5887 	int ret;
5888 	int i;
5889 
5890 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
5891 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
5892 		if (ret) {
5893 			dev_err(&hdev->pdev->dev,
5894 				"add mac ethertype failed, ret =%d.\n",
5895 				ret);
5896 			return ret;
5897 		}
5898 	}
5899 
5900 	return 0;
5901 }
5902 
5903 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
5904 {
5905 	struct hclge_vport *vport = hclge_get_vport(handle);
5906 	struct hclge_dev *hdev = vport->back;
5907 
5908 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
5909 }
5910 
5911 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
5912 			      bool is_first)
5913 {
5914 	const unsigned char *new_addr = (const unsigned char *)p;
5915 	struct hclge_vport *vport = hclge_get_vport(handle);
5916 	struct hclge_dev *hdev = vport->back;
5917 	int ret;
5918 
5919 	/* mac addr check */
5920 	if (is_zero_ether_addr(new_addr) ||
5921 	    is_broadcast_ether_addr(new_addr) ||
5922 	    is_multicast_ether_addr(new_addr)) {
5923 		dev_err(&hdev->pdev->dev,
5924 			"Change uc mac err! invalid mac:%p.\n",
5925 			 new_addr);
5926 		return -EINVAL;
5927 	}
5928 
5929 	if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
5930 		dev_warn(&hdev->pdev->dev,
5931 			 "remove old uc mac address fail.\n");
5932 
5933 	ret = hclge_add_uc_addr(handle, new_addr);
5934 	if (ret) {
5935 		dev_err(&hdev->pdev->dev,
5936 			"add uc mac address fail, ret =%d.\n",
5937 			ret);
5938 
5939 		if (!is_first &&
5940 		    hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
5941 			dev_err(&hdev->pdev->dev,
5942 				"restore uc mac address fail.\n");
5943 
5944 		return -EIO;
5945 	}
5946 
5947 	ret = hclge_pause_addr_cfg(hdev, new_addr);
5948 	if (ret) {
5949 		dev_err(&hdev->pdev->dev,
5950 			"configure mac pause address fail, ret =%d.\n",
5951 			ret);
5952 		return -EIO;
5953 	}
5954 
5955 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
5956 
5957 	return 0;
5958 }
5959 
5960 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
5961 			  int cmd)
5962 {
5963 	struct hclge_vport *vport = hclge_get_vport(handle);
5964 	struct hclge_dev *hdev = vport->back;
5965 
5966 	if (!hdev->hw.mac.phydev)
5967 		return -EOPNOTSUPP;
5968 
5969 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
5970 }
5971 
5972 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
5973 				      u8 fe_type, bool filter_en)
5974 {
5975 	struct hclge_vlan_filter_ctrl_cmd *req;
5976 	struct hclge_desc desc;
5977 	int ret;
5978 
5979 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
5980 
5981 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
5982 	req->vlan_type = vlan_type;
5983 	req->vlan_fe = filter_en ? fe_type : 0;
5984 
5985 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5986 	if (ret)
5987 		dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
5988 			ret);
5989 
5990 	return ret;
5991 }
5992 
5993 #define HCLGE_FILTER_TYPE_VF		0
5994 #define HCLGE_FILTER_TYPE_PORT		1
5995 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
5996 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
5997 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
5998 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
5999 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
6000 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
6001 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
6002 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
6003 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
6004 
6005 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6006 {
6007 	struct hclge_vport *vport = hclge_get_vport(handle);
6008 	struct hclge_dev *hdev = vport->back;
6009 
6010 	if (hdev->pdev->revision >= 0x21) {
6011 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6012 					   HCLGE_FILTER_FE_EGRESS, enable);
6013 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6014 					   HCLGE_FILTER_FE_INGRESS, enable);
6015 	} else {
6016 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6017 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable);
6018 	}
6019 	if (enable)
6020 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
6021 	else
6022 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6023 }
6024 
6025 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6026 				    bool is_kill, u16 vlan, u8 qos,
6027 				    __be16 proto)
6028 {
6029 #define HCLGE_MAX_VF_BYTES  16
6030 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
6031 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
6032 	struct hclge_desc desc[2];
6033 	u8 vf_byte_val;
6034 	u8 vf_byte_off;
6035 	int ret;
6036 
6037 	hclge_cmd_setup_basic_desc(&desc[0],
6038 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6039 	hclge_cmd_setup_basic_desc(&desc[1],
6040 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6041 
6042 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6043 
6044 	vf_byte_off = vfid / 8;
6045 	vf_byte_val = 1 << (vfid % 8);
6046 
6047 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6048 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6049 
6050 	req0->vlan_id  = cpu_to_le16(vlan);
6051 	req0->vlan_cfg = is_kill;
6052 
6053 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6054 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6055 	else
6056 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6057 
6058 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
6059 	if (ret) {
6060 		dev_err(&hdev->pdev->dev,
6061 			"Send vf vlan command fail, ret =%d.\n",
6062 			ret);
6063 		return ret;
6064 	}
6065 
6066 	if (!is_kill) {
6067 #define HCLGE_VF_VLAN_NO_ENTRY	2
6068 		if (!req0->resp_code || req0->resp_code == 1)
6069 			return 0;
6070 
6071 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6072 			dev_warn(&hdev->pdev->dev,
6073 				 "vf vlan table is full, vf vlan filter is disabled\n");
6074 			return 0;
6075 		}
6076 
6077 		dev_err(&hdev->pdev->dev,
6078 			"Add vf vlan filter fail, ret =%d.\n",
6079 			req0->resp_code);
6080 	} else {
6081 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
6082 		if (!req0->resp_code)
6083 			return 0;
6084 
6085 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6086 			dev_warn(&hdev->pdev->dev,
6087 				 "vlan %d filter is not in vf vlan table\n",
6088 				 vlan);
6089 			return 0;
6090 		}
6091 
6092 		dev_err(&hdev->pdev->dev,
6093 			"Kill vf vlan filter fail, ret =%d.\n",
6094 			req0->resp_code);
6095 	}
6096 
6097 	return -EIO;
6098 }
6099 
6100 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6101 				      u16 vlan_id, bool is_kill)
6102 {
6103 	struct hclge_vlan_filter_pf_cfg_cmd *req;
6104 	struct hclge_desc desc;
6105 	u8 vlan_offset_byte_val;
6106 	u8 vlan_offset_byte;
6107 	u8 vlan_offset_160;
6108 	int ret;
6109 
6110 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6111 
6112 	vlan_offset_160 = vlan_id / 160;
6113 	vlan_offset_byte = (vlan_id % 160) / 8;
6114 	vlan_offset_byte_val = 1 << (vlan_id % 8);
6115 
6116 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6117 	req->vlan_offset = vlan_offset_160;
6118 	req->vlan_cfg = is_kill;
6119 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6120 
6121 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6122 	if (ret)
6123 		dev_err(&hdev->pdev->dev,
6124 			"port vlan command, send fail, ret =%d.\n", ret);
6125 	return ret;
6126 }
6127 
6128 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6129 				    u16 vport_id, u16 vlan_id, u8 qos,
6130 				    bool is_kill)
6131 {
6132 	u16 vport_idx, vport_num = 0;
6133 	int ret;
6134 
6135 	if (is_kill && !vlan_id)
6136 		return 0;
6137 
6138 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6139 				       0, proto);
6140 	if (ret) {
6141 		dev_err(&hdev->pdev->dev,
6142 			"Set %d vport vlan filter config fail, ret =%d.\n",
6143 			vport_id, ret);
6144 		return ret;
6145 	}
6146 
6147 	/* vlan 0 may be added twice when 8021q module is enabled */
6148 	if (!is_kill && !vlan_id &&
6149 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
6150 		return 0;
6151 
6152 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6153 		dev_err(&hdev->pdev->dev,
6154 			"Add port vlan failed, vport %d is already in vlan %d\n",
6155 			vport_id, vlan_id);
6156 		return -EINVAL;
6157 	}
6158 
6159 	if (is_kill &&
6160 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6161 		dev_err(&hdev->pdev->dev,
6162 			"Delete port vlan failed, vport %d is not in vlan %d\n",
6163 			vport_id, vlan_id);
6164 		return -EINVAL;
6165 	}
6166 
6167 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6168 		vport_num++;
6169 
6170 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6171 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6172 						 is_kill);
6173 
6174 	return ret;
6175 }
6176 
6177 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
6178 			  u16 vlan_id, bool is_kill)
6179 {
6180 	struct hclge_vport *vport = hclge_get_vport(handle);
6181 	struct hclge_dev *hdev = vport->back;
6182 
6183 	return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
6184 					0, is_kill);
6185 }
6186 
6187 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
6188 				    u16 vlan, u8 qos, __be16 proto)
6189 {
6190 	struct hclge_vport *vport = hclge_get_vport(handle);
6191 	struct hclge_dev *hdev = vport->back;
6192 
6193 	if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
6194 		return -EINVAL;
6195 	if (proto != htons(ETH_P_8021Q))
6196 		return -EPROTONOSUPPORT;
6197 
6198 	return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
6199 }
6200 
6201 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6202 {
6203 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6204 	struct hclge_vport_vtag_tx_cfg_cmd *req;
6205 	struct hclge_dev *hdev = vport->back;
6206 	struct hclge_desc desc;
6207 	int status;
6208 
6209 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6210 
6211 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6212 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6213 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6214 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6215 		      vcfg->accept_tag1 ? 1 : 0);
6216 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6217 		      vcfg->accept_untag1 ? 1 : 0);
6218 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6219 		      vcfg->accept_tag2 ? 1 : 0);
6220 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6221 		      vcfg->accept_untag2 ? 1 : 0);
6222 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6223 		      vcfg->insert_tag1_en ? 1 : 0);
6224 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6225 		      vcfg->insert_tag2_en ? 1 : 0);
6226 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6227 
6228 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6229 	req->vf_bitmap[req->vf_offset] =
6230 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6231 
6232 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6233 	if (status)
6234 		dev_err(&hdev->pdev->dev,
6235 			"Send port txvlan cfg command fail, ret =%d\n",
6236 			status);
6237 
6238 	return status;
6239 }
6240 
6241 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6242 {
6243 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6244 	struct hclge_vport_vtag_rx_cfg_cmd *req;
6245 	struct hclge_dev *hdev = vport->back;
6246 	struct hclge_desc desc;
6247 	int status;
6248 
6249 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6250 
6251 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6252 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6253 		      vcfg->strip_tag1_en ? 1 : 0);
6254 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6255 		      vcfg->strip_tag2_en ? 1 : 0);
6256 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6257 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
6258 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6259 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
6260 
6261 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6262 	req->vf_bitmap[req->vf_offset] =
6263 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6264 
6265 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6266 	if (status)
6267 		dev_err(&hdev->pdev->dev,
6268 			"Send port rxvlan cfg command fail, ret =%d\n",
6269 			status);
6270 
6271 	return status;
6272 }
6273 
6274 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6275 {
6276 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6277 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6278 	struct hclge_desc desc;
6279 	int status;
6280 
6281 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6282 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6283 	rx_req->ot_fst_vlan_type =
6284 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6285 	rx_req->ot_sec_vlan_type =
6286 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6287 	rx_req->in_fst_vlan_type =
6288 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6289 	rx_req->in_sec_vlan_type =
6290 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6291 
6292 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6293 	if (status) {
6294 		dev_err(&hdev->pdev->dev,
6295 			"Send rxvlan protocol type command fail, ret =%d\n",
6296 			status);
6297 		return status;
6298 	}
6299 
6300 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6301 
6302 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6303 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6304 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6305 
6306 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6307 	if (status)
6308 		dev_err(&hdev->pdev->dev,
6309 			"Send txvlan protocol type command fail, ret =%d\n",
6310 			status);
6311 
6312 	return status;
6313 }
6314 
6315 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6316 {
6317 #define HCLGE_DEF_VLAN_TYPE		0x8100
6318 
6319 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6320 	struct hclge_vport *vport;
6321 	int ret;
6322 	int i;
6323 
6324 	if (hdev->pdev->revision >= 0x21) {
6325 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6326 						 HCLGE_FILTER_FE_EGRESS, true);
6327 		if (ret)
6328 			return ret;
6329 
6330 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6331 						 HCLGE_FILTER_FE_INGRESS, true);
6332 		if (ret)
6333 			return ret;
6334 	} else {
6335 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6336 						 HCLGE_FILTER_FE_EGRESS_V1_B,
6337 						 true);
6338 		if (ret)
6339 			return ret;
6340 	}
6341 
6342 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
6343 
6344 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6345 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6346 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6347 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6348 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6349 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6350 
6351 	ret = hclge_set_vlan_protocol_type(hdev);
6352 	if (ret)
6353 		return ret;
6354 
6355 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6356 		vport = &hdev->vport[i];
6357 		vport->txvlan_cfg.accept_tag1 = true;
6358 		vport->txvlan_cfg.accept_untag1 = true;
6359 
6360 		/* accept_tag2 and accept_untag2 are not supported on
6361 		 * pdev revision(0x20), new revision support them. The
6362 		 * value of this two fields will not return error when driver
6363 		 * send command to fireware in revision(0x20).
6364 		 * This two fields can not configured by user.
6365 		 */
6366 		vport->txvlan_cfg.accept_tag2 = true;
6367 		vport->txvlan_cfg.accept_untag2 = true;
6368 
6369 		vport->txvlan_cfg.insert_tag1_en = false;
6370 		vport->txvlan_cfg.insert_tag2_en = false;
6371 		vport->txvlan_cfg.default_tag1 = 0;
6372 		vport->txvlan_cfg.default_tag2 = 0;
6373 
6374 		ret = hclge_set_vlan_tx_offload_cfg(vport);
6375 		if (ret)
6376 			return ret;
6377 
6378 		vport->rxvlan_cfg.strip_tag1_en = false;
6379 		vport->rxvlan_cfg.strip_tag2_en = true;
6380 		vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6381 		vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6382 
6383 		ret = hclge_set_vlan_rx_offload_cfg(vport);
6384 		if (ret)
6385 			return ret;
6386 	}
6387 
6388 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6389 }
6390 
6391 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6392 {
6393 	struct hclge_vport *vport = hclge_get_vport(handle);
6394 
6395 	vport->rxvlan_cfg.strip_tag1_en = false;
6396 	vport->rxvlan_cfg.strip_tag2_en = enable;
6397 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6398 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6399 
6400 	return hclge_set_vlan_rx_offload_cfg(vport);
6401 }
6402 
6403 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
6404 {
6405 	struct hclge_config_max_frm_size_cmd *req;
6406 	struct hclge_desc desc;
6407 
6408 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
6409 
6410 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
6411 	req->max_frm_size = cpu_to_le16(new_mps);
6412 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
6413 
6414 	return hclge_cmd_send(&hdev->hw, &desc, 1);
6415 }
6416 
6417 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
6418 {
6419 	struct hclge_vport *vport = hclge_get_vport(handle);
6420 
6421 	return hclge_set_vport_mtu(vport, new_mtu);
6422 }
6423 
6424 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
6425 {
6426 	struct hclge_dev *hdev = vport->back;
6427 	int i, max_frm_size, ret = 0;
6428 
6429 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
6430 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
6431 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
6432 		return -EINVAL;
6433 
6434 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
6435 	mutex_lock(&hdev->vport_lock);
6436 	/* VF's mps must fit within hdev->mps */
6437 	if (vport->vport_id && max_frm_size > hdev->mps) {
6438 		mutex_unlock(&hdev->vport_lock);
6439 		return -EINVAL;
6440 	} else if (vport->vport_id) {
6441 		vport->mps = max_frm_size;
6442 		mutex_unlock(&hdev->vport_lock);
6443 		return 0;
6444 	}
6445 
6446 	/* PF's mps must be greater then VF's mps */
6447 	for (i = 1; i < hdev->num_alloc_vport; i++)
6448 		if (max_frm_size < hdev->vport[i].mps) {
6449 			mutex_unlock(&hdev->vport_lock);
6450 			return -EINVAL;
6451 		}
6452 
6453 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
6454 
6455 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
6456 	if (ret) {
6457 		dev_err(&hdev->pdev->dev,
6458 			"Change mtu fail, ret =%d\n", ret);
6459 		goto out;
6460 	}
6461 
6462 	hdev->mps = max_frm_size;
6463 	vport->mps = max_frm_size;
6464 
6465 	ret = hclge_buffer_alloc(hdev);
6466 	if (ret)
6467 		dev_err(&hdev->pdev->dev,
6468 			"Allocate buffer fail, ret =%d\n", ret);
6469 
6470 out:
6471 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6472 	mutex_unlock(&hdev->vport_lock);
6473 	return ret;
6474 }
6475 
6476 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
6477 				    bool enable)
6478 {
6479 	struct hclge_reset_tqp_queue_cmd *req;
6480 	struct hclge_desc desc;
6481 	int ret;
6482 
6483 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
6484 
6485 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6486 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6487 	hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
6488 
6489 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6490 	if (ret) {
6491 		dev_err(&hdev->pdev->dev,
6492 			"Send tqp reset cmd error, status =%d\n", ret);
6493 		return ret;
6494 	}
6495 
6496 	return 0;
6497 }
6498 
6499 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
6500 {
6501 	struct hclge_reset_tqp_queue_cmd *req;
6502 	struct hclge_desc desc;
6503 	int ret;
6504 
6505 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
6506 
6507 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6508 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6509 
6510 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6511 	if (ret) {
6512 		dev_err(&hdev->pdev->dev,
6513 			"Get reset status error, status =%d\n", ret);
6514 		return ret;
6515 	}
6516 
6517 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
6518 }
6519 
6520 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
6521 					  u16 queue_id)
6522 {
6523 	struct hnae3_queue *queue;
6524 	struct hclge_tqp *tqp;
6525 
6526 	queue = handle->kinfo.tqp[queue_id];
6527 	tqp = container_of(queue, struct hclge_tqp, q);
6528 
6529 	return tqp->index;
6530 }
6531 
6532 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
6533 {
6534 	struct hclge_vport *vport = hclge_get_vport(handle);
6535 	struct hclge_dev *hdev = vport->back;
6536 	int reset_try_times = 0;
6537 	int reset_status;
6538 	u16 queue_gid;
6539 	int ret = 0;
6540 
6541 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
6542 
6543 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
6544 	if (ret) {
6545 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
6546 		return ret;
6547 	}
6548 
6549 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6550 	if (ret) {
6551 		dev_err(&hdev->pdev->dev,
6552 			"Send reset tqp cmd fail, ret = %d\n", ret);
6553 		return ret;
6554 	}
6555 
6556 	reset_try_times = 0;
6557 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6558 		/* Wait for tqp hw reset */
6559 		msleep(20);
6560 		reset_status = hclge_get_reset_status(hdev, queue_gid);
6561 		if (reset_status)
6562 			break;
6563 	}
6564 
6565 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6566 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
6567 		return ret;
6568 	}
6569 
6570 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6571 	if (ret)
6572 		dev_err(&hdev->pdev->dev,
6573 			"Deassert the soft reset fail, ret = %d\n", ret);
6574 
6575 	return ret;
6576 }
6577 
6578 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
6579 {
6580 	struct hclge_dev *hdev = vport->back;
6581 	int reset_try_times = 0;
6582 	int reset_status;
6583 	u16 queue_gid;
6584 	int ret;
6585 
6586 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
6587 
6588 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6589 	if (ret) {
6590 		dev_warn(&hdev->pdev->dev,
6591 			 "Send reset tqp cmd fail, ret = %d\n", ret);
6592 		return;
6593 	}
6594 
6595 	reset_try_times = 0;
6596 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6597 		/* Wait for tqp hw reset */
6598 		msleep(20);
6599 		reset_status = hclge_get_reset_status(hdev, queue_gid);
6600 		if (reset_status)
6601 			break;
6602 	}
6603 
6604 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6605 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
6606 		return;
6607 	}
6608 
6609 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6610 	if (ret)
6611 		dev_warn(&hdev->pdev->dev,
6612 			 "Deassert the soft reset fail, ret = %d\n", ret);
6613 }
6614 
6615 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
6616 {
6617 	struct hclge_vport *vport = hclge_get_vport(handle);
6618 	struct hclge_dev *hdev = vport->back;
6619 
6620 	return hdev->fw_version;
6621 }
6622 
6623 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6624 {
6625 	struct phy_device *phydev = hdev->hw.mac.phydev;
6626 
6627 	if (!phydev)
6628 		return;
6629 
6630 	phy_set_asym_pause(phydev, rx_en, tx_en);
6631 }
6632 
6633 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6634 {
6635 	int ret;
6636 
6637 	if (rx_en && tx_en)
6638 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
6639 	else if (rx_en && !tx_en)
6640 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
6641 	else if (!rx_en && tx_en)
6642 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
6643 	else
6644 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
6645 
6646 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
6647 		return 0;
6648 
6649 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
6650 	if (ret) {
6651 		dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
6652 			ret);
6653 		return ret;
6654 	}
6655 
6656 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
6657 
6658 	return 0;
6659 }
6660 
6661 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
6662 {
6663 	struct phy_device *phydev = hdev->hw.mac.phydev;
6664 	u16 remote_advertising = 0;
6665 	u16 local_advertising = 0;
6666 	u32 rx_pause, tx_pause;
6667 	u8 flowctl;
6668 
6669 	if (!phydev->link || !phydev->autoneg)
6670 		return 0;
6671 
6672 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
6673 
6674 	if (phydev->pause)
6675 		remote_advertising = LPA_PAUSE_CAP;
6676 
6677 	if (phydev->asym_pause)
6678 		remote_advertising |= LPA_PAUSE_ASYM;
6679 
6680 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
6681 					   remote_advertising);
6682 	tx_pause = flowctl & FLOW_CTRL_TX;
6683 	rx_pause = flowctl & FLOW_CTRL_RX;
6684 
6685 	if (phydev->duplex == HCLGE_MAC_HALF) {
6686 		tx_pause = 0;
6687 		rx_pause = 0;
6688 	}
6689 
6690 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
6691 }
6692 
6693 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
6694 				 u32 *rx_en, u32 *tx_en)
6695 {
6696 	struct hclge_vport *vport = hclge_get_vport(handle);
6697 	struct hclge_dev *hdev = vport->back;
6698 
6699 	*auto_neg = hclge_get_autoneg(handle);
6700 
6701 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6702 		*rx_en = 0;
6703 		*tx_en = 0;
6704 		return;
6705 	}
6706 
6707 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
6708 		*rx_en = 1;
6709 		*tx_en = 0;
6710 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
6711 		*tx_en = 1;
6712 		*rx_en = 0;
6713 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
6714 		*rx_en = 1;
6715 		*tx_en = 1;
6716 	} else {
6717 		*rx_en = 0;
6718 		*tx_en = 0;
6719 	}
6720 }
6721 
6722 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
6723 				u32 rx_en, u32 tx_en)
6724 {
6725 	struct hclge_vport *vport = hclge_get_vport(handle);
6726 	struct hclge_dev *hdev = vport->back;
6727 	struct phy_device *phydev = hdev->hw.mac.phydev;
6728 	u32 fc_autoneg;
6729 
6730 	fc_autoneg = hclge_get_autoneg(handle);
6731 	if (auto_neg != fc_autoneg) {
6732 		dev_info(&hdev->pdev->dev,
6733 			 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
6734 		return -EOPNOTSUPP;
6735 	}
6736 
6737 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6738 		dev_info(&hdev->pdev->dev,
6739 			 "Priority flow control enabled. Cannot set link flow control.\n");
6740 		return -EOPNOTSUPP;
6741 	}
6742 
6743 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
6744 
6745 	if (!fc_autoneg)
6746 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
6747 
6748 	/* Only support flow control negotiation for netdev with
6749 	 * phy attached for now.
6750 	 */
6751 	if (!phydev)
6752 		return -EOPNOTSUPP;
6753 
6754 	return phy_start_aneg(phydev);
6755 }
6756 
6757 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
6758 					  u8 *auto_neg, u32 *speed, u8 *duplex)
6759 {
6760 	struct hclge_vport *vport = hclge_get_vport(handle);
6761 	struct hclge_dev *hdev = vport->back;
6762 
6763 	if (speed)
6764 		*speed = hdev->hw.mac.speed;
6765 	if (duplex)
6766 		*duplex = hdev->hw.mac.duplex;
6767 	if (auto_neg)
6768 		*auto_neg = hdev->hw.mac.autoneg;
6769 }
6770 
6771 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
6772 {
6773 	struct hclge_vport *vport = hclge_get_vport(handle);
6774 	struct hclge_dev *hdev = vport->back;
6775 
6776 	if (media_type)
6777 		*media_type = hdev->hw.mac.media_type;
6778 }
6779 
6780 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
6781 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
6782 {
6783 	struct hclge_vport *vport = hclge_get_vport(handle);
6784 	struct hclge_dev *hdev = vport->back;
6785 	struct phy_device *phydev = hdev->hw.mac.phydev;
6786 	int mdix_ctrl, mdix, retval, is_resolved;
6787 
6788 	if (!phydev) {
6789 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6790 		*tp_mdix = ETH_TP_MDI_INVALID;
6791 		return;
6792 	}
6793 
6794 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
6795 
6796 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
6797 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
6798 				    HCLGE_PHY_MDIX_CTRL_S);
6799 
6800 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
6801 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
6802 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
6803 
6804 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
6805 
6806 	switch (mdix_ctrl) {
6807 	case 0x0:
6808 		*tp_mdix_ctrl = ETH_TP_MDI;
6809 		break;
6810 	case 0x1:
6811 		*tp_mdix_ctrl = ETH_TP_MDI_X;
6812 		break;
6813 	case 0x3:
6814 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
6815 		break;
6816 	default:
6817 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6818 		break;
6819 	}
6820 
6821 	if (!is_resolved)
6822 		*tp_mdix = ETH_TP_MDI_INVALID;
6823 	else if (mdix)
6824 		*tp_mdix = ETH_TP_MDI_X;
6825 	else
6826 		*tp_mdix = ETH_TP_MDI;
6827 }
6828 
6829 static int hclge_init_instance_hw(struct hclge_dev *hdev)
6830 {
6831 	return hclge_mac_connect_phy(hdev);
6832 }
6833 
6834 static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
6835 {
6836 	hclge_mac_disconnect_phy(hdev);
6837 }
6838 
6839 static int hclge_init_client_instance(struct hnae3_client *client,
6840 				      struct hnae3_ae_dev *ae_dev)
6841 {
6842 	struct hclge_dev *hdev = ae_dev->priv;
6843 	struct hclge_vport *vport;
6844 	int i, ret;
6845 
6846 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
6847 		vport = &hdev->vport[i];
6848 
6849 		switch (client->type) {
6850 		case HNAE3_CLIENT_KNIC:
6851 
6852 			hdev->nic_client = client;
6853 			vport->nic.client = client;
6854 			ret = client->ops->init_instance(&vport->nic);
6855 			if (ret)
6856 				goto clear_nic;
6857 
6858 			ret = hclge_init_instance_hw(hdev);
6859 			if (ret) {
6860 			        client->ops->uninit_instance(&vport->nic,
6861 			                                     0);
6862 				goto clear_nic;
6863 			}
6864 
6865 			hnae3_set_client_init_flag(client, ae_dev, 1);
6866 
6867 			if (hdev->roce_client &&
6868 			    hnae3_dev_roce_supported(hdev)) {
6869 				struct hnae3_client *rc = hdev->roce_client;
6870 
6871 				ret = hclge_init_roce_base_info(vport);
6872 				if (ret)
6873 					goto clear_roce;
6874 
6875 				ret = rc->ops->init_instance(&vport->roce);
6876 				if (ret)
6877 					goto clear_roce;
6878 
6879 				hnae3_set_client_init_flag(hdev->roce_client,
6880 							   ae_dev, 1);
6881 			}
6882 
6883 			break;
6884 		case HNAE3_CLIENT_UNIC:
6885 			hdev->nic_client = client;
6886 			vport->nic.client = client;
6887 
6888 			ret = client->ops->init_instance(&vport->nic);
6889 			if (ret)
6890 				goto clear_nic;
6891 
6892 			hnae3_set_client_init_flag(client, ae_dev, 1);
6893 
6894 			break;
6895 		case HNAE3_CLIENT_ROCE:
6896 			if (hnae3_dev_roce_supported(hdev)) {
6897 				hdev->roce_client = client;
6898 				vport->roce.client = client;
6899 			}
6900 
6901 			if (hdev->roce_client && hdev->nic_client) {
6902 				ret = hclge_init_roce_base_info(vport);
6903 				if (ret)
6904 					goto clear_roce;
6905 
6906 				ret = client->ops->init_instance(&vport->roce);
6907 				if (ret)
6908 					goto clear_roce;
6909 
6910 				hnae3_set_client_init_flag(client, ae_dev, 1);
6911 			}
6912 
6913 			break;
6914 		default:
6915 			return -EINVAL;
6916 		}
6917 	}
6918 
6919 	return 0;
6920 
6921 clear_nic:
6922 	hdev->nic_client = NULL;
6923 	vport->nic.client = NULL;
6924 	return ret;
6925 clear_roce:
6926 	hdev->roce_client = NULL;
6927 	vport->roce.client = NULL;
6928 	return ret;
6929 }
6930 
6931 static void hclge_uninit_client_instance(struct hnae3_client *client,
6932 					 struct hnae3_ae_dev *ae_dev)
6933 {
6934 	struct hclge_dev *hdev = ae_dev->priv;
6935 	struct hclge_vport *vport;
6936 	int i;
6937 
6938 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
6939 		vport = &hdev->vport[i];
6940 		if (hdev->roce_client) {
6941 			hdev->roce_client->ops->uninit_instance(&vport->roce,
6942 								0);
6943 			hdev->roce_client = NULL;
6944 			vport->roce.client = NULL;
6945 		}
6946 		if (client->type == HNAE3_CLIENT_ROCE)
6947 			return;
6948 		if (hdev->nic_client && client->ops->uninit_instance) {
6949 			hclge_uninit_instance_hw(hdev);
6950 			client->ops->uninit_instance(&vport->nic, 0);
6951 			hdev->nic_client = NULL;
6952 			vport->nic.client = NULL;
6953 		}
6954 	}
6955 }
6956 
6957 static int hclge_pci_init(struct hclge_dev *hdev)
6958 {
6959 	struct pci_dev *pdev = hdev->pdev;
6960 	struct hclge_hw *hw;
6961 	int ret;
6962 
6963 	ret = pci_enable_device(pdev);
6964 	if (ret) {
6965 		dev_err(&pdev->dev, "failed to enable PCI device\n");
6966 		return ret;
6967 	}
6968 
6969 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6970 	if (ret) {
6971 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6972 		if (ret) {
6973 			dev_err(&pdev->dev,
6974 				"can't set consistent PCI DMA");
6975 			goto err_disable_device;
6976 		}
6977 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
6978 	}
6979 
6980 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
6981 	if (ret) {
6982 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
6983 		goto err_disable_device;
6984 	}
6985 
6986 	pci_set_master(pdev);
6987 	hw = &hdev->hw;
6988 	hw->io_base = pcim_iomap(pdev, 2, 0);
6989 	if (!hw->io_base) {
6990 		dev_err(&pdev->dev, "Can't map configuration register space\n");
6991 		ret = -ENOMEM;
6992 		goto err_clr_master;
6993 	}
6994 
6995 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
6996 
6997 	return 0;
6998 err_clr_master:
6999 	pci_clear_master(pdev);
7000 	pci_release_regions(pdev);
7001 err_disable_device:
7002 	pci_disable_device(pdev);
7003 
7004 	return ret;
7005 }
7006 
7007 static void hclge_pci_uninit(struct hclge_dev *hdev)
7008 {
7009 	struct pci_dev *pdev = hdev->pdev;
7010 
7011 	pcim_iounmap(pdev, hdev->hw.io_base);
7012 	pci_free_irq_vectors(pdev);
7013 	pci_clear_master(pdev);
7014 	pci_release_mem_regions(pdev);
7015 	pci_disable_device(pdev);
7016 }
7017 
7018 static void hclge_state_init(struct hclge_dev *hdev)
7019 {
7020 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7021 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7022 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7023 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7024 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7025 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7026 }
7027 
7028 static void hclge_state_uninit(struct hclge_dev *hdev)
7029 {
7030 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7031 
7032 	if (hdev->service_timer.function)
7033 		del_timer_sync(&hdev->service_timer);
7034 	if (hdev->reset_timer.function)
7035 		del_timer_sync(&hdev->reset_timer);
7036 	if (hdev->service_task.func)
7037 		cancel_work_sync(&hdev->service_task);
7038 	if (hdev->rst_service_task.func)
7039 		cancel_work_sync(&hdev->rst_service_task);
7040 	if (hdev->mbx_service_task.func)
7041 		cancel_work_sync(&hdev->mbx_service_task);
7042 }
7043 
7044 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7045 {
7046 #define HCLGE_FLR_WAIT_MS	100
7047 #define HCLGE_FLR_WAIT_CNT	50
7048 	struct hclge_dev *hdev = ae_dev->priv;
7049 	int cnt = 0;
7050 
7051 	clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7052 	clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7053 	set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7054 	hclge_reset_event(hdev->pdev, NULL);
7055 
7056 	while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7057 	       cnt++ < HCLGE_FLR_WAIT_CNT)
7058 		msleep(HCLGE_FLR_WAIT_MS);
7059 
7060 	if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7061 		dev_err(&hdev->pdev->dev,
7062 			"flr wait down timeout: %d\n", cnt);
7063 }
7064 
7065 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7066 {
7067 	struct hclge_dev *hdev = ae_dev->priv;
7068 
7069 	set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7070 }
7071 
7072 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7073 {
7074 	struct pci_dev *pdev = ae_dev->pdev;
7075 	struct hclge_dev *hdev;
7076 	int ret;
7077 
7078 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7079 	if (!hdev) {
7080 		ret = -ENOMEM;
7081 		goto out;
7082 	}
7083 
7084 	hdev->pdev = pdev;
7085 	hdev->ae_dev = ae_dev;
7086 	hdev->reset_type = HNAE3_NONE_RESET;
7087 	hdev->reset_level = HNAE3_FUNC_RESET;
7088 	ae_dev->priv = hdev;
7089 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7090 
7091 	mutex_init(&hdev->vport_lock);
7092 
7093 	ret = hclge_pci_init(hdev);
7094 	if (ret) {
7095 		dev_err(&pdev->dev, "PCI init failed\n");
7096 		goto out;
7097 	}
7098 
7099 	/* Firmware command queue initialize */
7100 	ret = hclge_cmd_queue_init(hdev);
7101 	if (ret) {
7102 		dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7103 		goto err_pci_uninit;
7104 	}
7105 
7106 	/* Firmware command initialize */
7107 	ret = hclge_cmd_init(hdev);
7108 	if (ret)
7109 		goto err_cmd_uninit;
7110 
7111 	ret = hclge_get_cap(hdev);
7112 	if (ret) {
7113 		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7114 			ret);
7115 		goto err_cmd_uninit;
7116 	}
7117 
7118 	ret = hclge_configure(hdev);
7119 	if (ret) {
7120 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7121 		goto err_cmd_uninit;
7122 	}
7123 
7124 	ret = hclge_init_msi(hdev);
7125 	if (ret) {
7126 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
7127 		goto err_cmd_uninit;
7128 	}
7129 
7130 	ret = hclge_misc_irq_init(hdev);
7131 	if (ret) {
7132 		dev_err(&pdev->dev,
7133 			"Misc IRQ(vector0) init error, ret = %d.\n",
7134 			ret);
7135 		goto err_msi_uninit;
7136 	}
7137 
7138 	ret = hclge_alloc_tqps(hdev);
7139 	if (ret) {
7140 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
7141 		goto err_msi_irq_uninit;
7142 	}
7143 
7144 	ret = hclge_alloc_vport(hdev);
7145 	if (ret) {
7146 		dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
7147 		goto err_msi_irq_uninit;
7148 	}
7149 
7150 	ret = hclge_map_tqp(hdev);
7151 	if (ret) {
7152 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7153 		goto err_msi_irq_uninit;
7154 	}
7155 
7156 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7157 		ret = hclge_mac_mdio_config(hdev);
7158 		if (ret) {
7159 			dev_err(&hdev->pdev->dev,
7160 				"mdio config fail ret=%d\n", ret);
7161 			goto err_msi_irq_uninit;
7162 		}
7163 	}
7164 
7165 	ret = hclge_init_umv_space(hdev);
7166 	if (ret) {
7167 		dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
7168 		goto err_msi_irq_uninit;
7169 	}
7170 
7171 	ret = hclge_mac_init(hdev);
7172 	if (ret) {
7173 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7174 		goto err_mdiobus_unreg;
7175 	}
7176 
7177 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7178 	if (ret) {
7179 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7180 		goto err_mdiobus_unreg;
7181 	}
7182 
7183 	ret = hclge_config_gro(hdev, true);
7184 	if (ret)
7185 		goto err_mdiobus_unreg;
7186 
7187 	ret = hclge_init_vlan_config(hdev);
7188 	if (ret) {
7189 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7190 		goto err_mdiobus_unreg;
7191 	}
7192 
7193 	ret = hclge_tm_schd_init(hdev);
7194 	if (ret) {
7195 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
7196 		goto err_mdiobus_unreg;
7197 	}
7198 
7199 	hclge_rss_init_cfg(hdev);
7200 	ret = hclge_rss_init_hw(hdev);
7201 	if (ret) {
7202 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7203 		goto err_mdiobus_unreg;
7204 	}
7205 
7206 	ret = init_mgr_tbl(hdev);
7207 	if (ret) {
7208 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
7209 		goto err_mdiobus_unreg;
7210 	}
7211 
7212 	ret = hclge_init_fd_config(hdev);
7213 	if (ret) {
7214 		dev_err(&pdev->dev,
7215 			"fd table init fail, ret=%d\n", ret);
7216 		goto err_mdiobus_unreg;
7217 	}
7218 
7219 	ret = hclge_hw_error_set_state(hdev, true);
7220 	if (ret) {
7221 		dev_err(&pdev->dev,
7222 			"hw error interrupts enable failed, ret =%d\n", ret);
7223 		goto err_mdiobus_unreg;
7224 	}
7225 
7226 	hclge_dcb_ops_set(hdev);
7227 
7228 	timer_setup(&hdev->service_timer, hclge_service_timer, 0);
7229 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7230 	INIT_WORK(&hdev->service_task, hclge_service_task);
7231 	INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
7232 	INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
7233 
7234 	hclge_clear_all_event_cause(hdev);
7235 
7236 	/* Enable MISC vector(vector0) */
7237 	hclge_enable_vector(&hdev->misc_vector, true);
7238 
7239 	hclge_state_init(hdev);
7240 	hdev->last_reset_time = jiffies;
7241 
7242 	pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7243 	return 0;
7244 
7245 err_mdiobus_unreg:
7246 	if (hdev->hw.mac.phydev)
7247 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
7248 err_msi_irq_uninit:
7249 	hclge_misc_irq_uninit(hdev);
7250 err_msi_uninit:
7251 	pci_free_irq_vectors(pdev);
7252 err_cmd_uninit:
7253 	hclge_destroy_cmd_queue(&hdev->hw);
7254 err_pci_uninit:
7255 	pcim_iounmap(pdev, hdev->hw.io_base);
7256 	pci_clear_master(pdev);
7257 	pci_release_regions(pdev);
7258 	pci_disable_device(pdev);
7259 out:
7260 	return ret;
7261 }
7262 
7263 static void hclge_stats_clear(struct hclge_dev *hdev)
7264 {
7265 	memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7266 }
7267 
7268 static void hclge_reset_vport_state(struct hclge_dev *hdev)
7269 {
7270 	struct hclge_vport *vport = hdev->vport;
7271 	int i;
7272 
7273 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7274 		hclge_vport_start(vport);
7275 		vport++;
7276 	}
7277 }
7278 
7279 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7280 {
7281 	struct hclge_dev *hdev = ae_dev->priv;
7282 	struct pci_dev *pdev = ae_dev->pdev;
7283 	int ret;
7284 
7285 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7286 
7287 	hclge_stats_clear(hdev);
7288 	memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
7289 
7290 	ret = hclge_cmd_init(hdev);
7291 	if (ret) {
7292 		dev_err(&pdev->dev, "Cmd queue init failed\n");
7293 		return ret;
7294 	}
7295 
7296 	ret = hclge_get_cap(hdev);
7297 	if (ret) {
7298 		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7299 			ret);
7300 		return ret;
7301 	}
7302 
7303 	ret = hclge_configure(hdev);
7304 	if (ret) {
7305 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7306 		return ret;
7307 	}
7308 
7309 	ret = hclge_map_tqp(hdev);
7310 	if (ret) {
7311 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7312 		return ret;
7313 	}
7314 
7315 	hclge_reset_umv_space(hdev);
7316 
7317 	ret = hclge_mac_init(hdev);
7318 	if (ret) {
7319 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7320 		return ret;
7321 	}
7322 
7323 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7324 	if (ret) {
7325 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7326 		return ret;
7327 	}
7328 
7329 	ret = hclge_config_gro(hdev, true);
7330 	if (ret)
7331 		return ret;
7332 
7333 	ret = hclge_init_vlan_config(hdev);
7334 	if (ret) {
7335 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7336 		return ret;
7337 	}
7338 
7339 	ret = hclge_tm_init_hw(hdev);
7340 	if (ret) {
7341 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
7342 		return ret;
7343 	}
7344 
7345 	ret = hclge_rss_init_hw(hdev);
7346 	if (ret) {
7347 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7348 		return ret;
7349 	}
7350 
7351 	ret = hclge_init_fd_config(hdev);
7352 	if (ret) {
7353 		dev_err(&pdev->dev,
7354 			"fd table init fail, ret=%d\n", ret);
7355 		return ret;
7356 	}
7357 
7358 	/* Re-enable the TM hw error interrupts because
7359 	 * they get disabled on core/global reset.
7360 	 */
7361 	if (hclge_enable_tm_hw_error(hdev, true))
7362 		dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n");
7363 
7364 	hclge_reset_vport_state(hdev);
7365 
7366 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
7367 		 HCLGE_DRIVER_NAME);
7368 
7369 	return 0;
7370 }
7371 
7372 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
7373 {
7374 	struct hclge_dev *hdev = ae_dev->priv;
7375 	struct hclge_mac *mac = &hdev->hw.mac;
7376 
7377 	hclge_state_uninit(hdev);
7378 
7379 	if (mac->phydev)
7380 		mdiobus_unregister(mac->mdio_bus);
7381 
7382 	hclge_uninit_umv_space(hdev);
7383 
7384 	/* Disable MISC vector(vector0) */
7385 	hclge_enable_vector(&hdev->misc_vector, false);
7386 	synchronize_irq(hdev->misc_vector.vector_irq);
7387 
7388 	hclge_hw_error_set_state(hdev, false);
7389 	hclge_destroy_cmd_queue(&hdev->hw);
7390 	hclge_misc_irq_uninit(hdev);
7391 	hclge_pci_uninit(hdev);
7392 	mutex_destroy(&hdev->vport_lock);
7393 	ae_dev->priv = NULL;
7394 }
7395 
7396 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
7397 {
7398 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7399 	struct hclge_vport *vport = hclge_get_vport(handle);
7400 	struct hclge_dev *hdev = vport->back;
7401 
7402 	return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
7403 }
7404 
7405 static void hclge_get_channels(struct hnae3_handle *handle,
7406 			       struct ethtool_channels *ch)
7407 {
7408 	struct hclge_vport *vport = hclge_get_vport(handle);
7409 
7410 	ch->max_combined = hclge_get_max_channels(handle);
7411 	ch->other_count = 1;
7412 	ch->max_other = 1;
7413 	ch->combined_count = vport->alloc_tqps;
7414 }
7415 
7416 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
7417 					u16 *alloc_tqps, u16 *max_rss_size)
7418 {
7419 	struct hclge_vport *vport = hclge_get_vport(handle);
7420 	struct hclge_dev *hdev = vport->back;
7421 
7422 	*alloc_tqps = vport->alloc_tqps;
7423 	*max_rss_size = hdev->rss_size_max;
7424 }
7425 
7426 static void hclge_release_tqp(struct hclge_vport *vport)
7427 {
7428 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7429 	struct hclge_dev *hdev = vport->back;
7430 	int i;
7431 
7432 	for (i = 0; i < kinfo->num_tqps; i++) {
7433 		struct hclge_tqp *tqp =
7434 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
7435 
7436 		tqp->q.handle = NULL;
7437 		tqp->q.tqp_index = 0;
7438 		tqp->alloced = false;
7439 	}
7440 
7441 	devm_kfree(&hdev->pdev->dev, kinfo->tqp);
7442 	kinfo->tqp = NULL;
7443 }
7444 
7445 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
7446 {
7447 	struct hclge_vport *vport = hclge_get_vport(handle);
7448 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7449 	struct hclge_dev *hdev = vport->back;
7450 	int cur_rss_size = kinfo->rss_size;
7451 	int cur_tqps = kinfo->num_tqps;
7452 	u16 tc_offset[HCLGE_MAX_TC_NUM];
7453 	u16 tc_valid[HCLGE_MAX_TC_NUM];
7454 	u16 tc_size[HCLGE_MAX_TC_NUM];
7455 	u16 roundup_size;
7456 	u32 *rss_indir;
7457 	int ret, i;
7458 
7459 	/* Free old tqps, and reallocate with new tqp number when nic setup */
7460 	hclge_release_tqp(vport);
7461 
7462 	ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc);
7463 	if (ret) {
7464 		dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
7465 		return ret;
7466 	}
7467 
7468 	ret = hclge_map_tqp_to_vport(hdev, vport);
7469 	if (ret) {
7470 		dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
7471 		return ret;
7472 	}
7473 
7474 	ret = hclge_tm_schd_init(hdev);
7475 	if (ret) {
7476 		dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
7477 		return ret;
7478 	}
7479 
7480 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
7481 	roundup_size = ilog2(roundup_size);
7482 	/* Set the RSS TC mode according to the new RSS size */
7483 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
7484 		tc_valid[i] = 0;
7485 
7486 		if (!(hdev->hw_tc_map & BIT(i)))
7487 			continue;
7488 
7489 		tc_valid[i] = 1;
7490 		tc_size[i] = roundup_size;
7491 		tc_offset[i] = kinfo->rss_size * i;
7492 	}
7493 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
7494 	if (ret)
7495 		return ret;
7496 
7497 	/* Reinitializes the rss indirect table according to the new RSS size */
7498 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
7499 	if (!rss_indir)
7500 		return -ENOMEM;
7501 
7502 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
7503 		rss_indir[i] = i % kinfo->rss_size;
7504 
7505 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
7506 	if (ret)
7507 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
7508 			ret);
7509 
7510 	kfree(rss_indir);
7511 
7512 	if (!ret)
7513 		dev_info(&hdev->pdev->dev,
7514 			 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
7515 			 cur_rss_size, kinfo->rss_size,
7516 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
7517 
7518 	return ret;
7519 }
7520 
7521 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
7522 			      u32 *regs_num_64_bit)
7523 {
7524 	struct hclge_desc desc;
7525 	u32 total_num;
7526 	int ret;
7527 
7528 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
7529 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7530 	if (ret) {
7531 		dev_err(&hdev->pdev->dev,
7532 			"Query register number cmd failed, ret = %d.\n", ret);
7533 		return ret;
7534 	}
7535 
7536 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
7537 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
7538 
7539 	total_num = *regs_num_32_bit + *regs_num_64_bit;
7540 	if (!total_num)
7541 		return -EINVAL;
7542 
7543 	return 0;
7544 }
7545 
7546 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7547 				 void *data)
7548 {
7549 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
7550 
7551 	struct hclge_desc *desc;
7552 	u32 *reg_val = data;
7553 	__le32 *desc_data;
7554 	int cmd_num;
7555 	int i, k, n;
7556 	int ret;
7557 
7558 	if (regs_num == 0)
7559 		return 0;
7560 
7561 	cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
7562 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7563 	if (!desc)
7564 		return -ENOMEM;
7565 
7566 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
7567 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7568 	if (ret) {
7569 		dev_err(&hdev->pdev->dev,
7570 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
7571 		kfree(desc);
7572 		return ret;
7573 	}
7574 
7575 	for (i = 0; i < cmd_num; i++) {
7576 		if (i == 0) {
7577 			desc_data = (__le32 *)(&desc[i].data[0]);
7578 			n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
7579 		} else {
7580 			desc_data = (__le32 *)(&desc[i]);
7581 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
7582 		}
7583 		for (k = 0; k < n; k++) {
7584 			*reg_val++ = le32_to_cpu(*desc_data++);
7585 
7586 			regs_num--;
7587 			if (!regs_num)
7588 				break;
7589 		}
7590 	}
7591 
7592 	kfree(desc);
7593 	return 0;
7594 }
7595 
7596 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7597 				 void *data)
7598 {
7599 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
7600 
7601 	struct hclge_desc *desc;
7602 	u64 *reg_val = data;
7603 	__le64 *desc_data;
7604 	int cmd_num;
7605 	int i, k, n;
7606 	int ret;
7607 
7608 	if (regs_num == 0)
7609 		return 0;
7610 
7611 	cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
7612 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7613 	if (!desc)
7614 		return -ENOMEM;
7615 
7616 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
7617 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7618 	if (ret) {
7619 		dev_err(&hdev->pdev->dev,
7620 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
7621 		kfree(desc);
7622 		return ret;
7623 	}
7624 
7625 	for (i = 0; i < cmd_num; i++) {
7626 		if (i == 0) {
7627 			desc_data = (__le64 *)(&desc[i].data[0]);
7628 			n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
7629 		} else {
7630 			desc_data = (__le64 *)(&desc[i]);
7631 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
7632 		}
7633 		for (k = 0; k < n; k++) {
7634 			*reg_val++ = le64_to_cpu(*desc_data++);
7635 
7636 			regs_num--;
7637 			if (!regs_num)
7638 				break;
7639 		}
7640 	}
7641 
7642 	kfree(desc);
7643 	return 0;
7644 }
7645 
7646 static int hclge_get_regs_len(struct hnae3_handle *handle)
7647 {
7648 	struct hclge_vport *vport = hclge_get_vport(handle);
7649 	struct hclge_dev *hdev = vport->back;
7650 	u32 regs_num_32_bit, regs_num_64_bit;
7651 	int ret;
7652 
7653 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
7654 	if (ret) {
7655 		dev_err(&hdev->pdev->dev,
7656 			"Get register number failed, ret = %d.\n", ret);
7657 		return -EOPNOTSUPP;
7658 	}
7659 
7660 	return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
7661 }
7662 
7663 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
7664 			   void *data)
7665 {
7666 	struct hclge_vport *vport = hclge_get_vport(handle);
7667 	struct hclge_dev *hdev = vport->back;
7668 	u32 regs_num_32_bit, regs_num_64_bit;
7669 	int ret;
7670 
7671 	*version = hdev->fw_version;
7672 
7673 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
7674 	if (ret) {
7675 		dev_err(&hdev->pdev->dev,
7676 			"Get register number failed, ret = %d.\n", ret);
7677 		return;
7678 	}
7679 
7680 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data);
7681 	if (ret) {
7682 		dev_err(&hdev->pdev->dev,
7683 			"Get 32 bit register failed, ret = %d.\n", ret);
7684 		return;
7685 	}
7686 
7687 	data = (u32 *)data + regs_num_32_bit;
7688 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit,
7689 				    data);
7690 	if (ret)
7691 		dev_err(&hdev->pdev->dev,
7692 			"Get 64 bit register failed, ret = %d.\n", ret);
7693 }
7694 
7695 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
7696 {
7697 	struct hclge_set_led_state_cmd *req;
7698 	struct hclge_desc desc;
7699 	int ret;
7700 
7701 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
7702 
7703 	req = (struct hclge_set_led_state_cmd *)desc.data;
7704 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
7705 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
7706 
7707 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7708 	if (ret)
7709 		dev_err(&hdev->pdev->dev,
7710 			"Send set led state cmd error, ret =%d\n", ret);
7711 
7712 	return ret;
7713 }
7714 
7715 enum hclge_led_status {
7716 	HCLGE_LED_OFF,
7717 	HCLGE_LED_ON,
7718 	HCLGE_LED_NO_CHANGE = 0xFF,
7719 };
7720 
7721 static int hclge_set_led_id(struct hnae3_handle *handle,
7722 			    enum ethtool_phys_id_state status)
7723 {
7724 	struct hclge_vport *vport = hclge_get_vport(handle);
7725 	struct hclge_dev *hdev = vport->back;
7726 
7727 	switch (status) {
7728 	case ETHTOOL_ID_ACTIVE:
7729 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
7730 	case ETHTOOL_ID_INACTIVE:
7731 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
7732 	default:
7733 		return -EINVAL;
7734 	}
7735 }
7736 
7737 static void hclge_get_link_mode(struct hnae3_handle *handle,
7738 				unsigned long *supported,
7739 				unsigned long *advertising)
7740 {
7741 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
7742 	struct hclge_vport *vport = hclge_get_vport(handle);
7743 	struct hclge_dev *hdev = vport->back;
7744 	unsigned int idx = 0;
7745 
7746 	for (; idx < size; idx++) {
7747 		supported[idx] = hdev->hw.mac.supported[idx];
7748 		advertising[idx] = hdev->hw.mac.advertising[idx];
7749 	}
7750 }
7751 
7752 static int hclge_gro_en(struct hnae3_handle *handle, int enable)
7753 {
7754 	struct hclge_vport *vport = hclge_get_vport(handle);
7755 	struct hclge_dev *hdev = vport->back;
7756 
7757 	return hclge_config_gro(hdev, enable);
7758 }
7759 
7760 static const struct hnae3_ae_ops hclge_ops = {
7761 	.init_ae_dev = hclge_init_ae_dev,
7762 	.uninit_ae_dev = hclge_uninit_ae_dev,
7763 	.flr_prepare = hclge_flr_prepare,
7764 	.flr_done = hclge_flr_done,
7765 	.init_client_instance = hclge_init_client_instance,
7766 	.uninit_client_instance = hclge_uninit_client_instance,
7767 	.map_ring_to_vector = hclge_map_ring_to_vector,
7768 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
7769 	.get_vector = hclge_get_vector,
7770 	.put_vector = hclge_put_vector,
7771 	.set_promisc_mode = hclge_set_promisc_mode,
7772 	.set_loopback = hclge_set_loopback,
7773 	.start = hclge_ae_start,
7774 	.stop = hclge_ae_stop,
7775 	.client_start = hclge_client_start,
7776 	.client_stop = hclge_client_stop,
7777 	.get_status = hclge_get_status,
7778 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
7779 	.update_speed_duplex_h = hclge_update_speed_duplex_h,
7780 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
7781 	.get_media_type = hclge_get_media_type,
7782 	.get_rss_key_size = hclge_get_rss_key_size,
7783 	.get_rss_indir_size = hclge_get_rss_indir_size,
7784 	.get_rss = hclge_get_rss,
7785 	.set_rss = hclge_set_rss,
7786 	.set_rss_tuple = hclge_set_rss_tuple,
7787 	.get_rss_tuple = hclge_get_rss_tuple,
7788 	.get_tc_size = hclge_get_tc_size,
7789 	.get_mac_addr = hclge_get_mac_addr,
7790 	.set_mac_addr = hclge_set_mac_addr,
7791 	.do_ioctl = hclge_do_ioctl,
7792 	.add_uc_addr = hclge_add_uc_addr,
7793 	.rm_uc_addr = hclge_rm_uc_addr,
7794 	.add_mc_addr = hclge_add_mc_addr,
7795 	.rm_mc_addr = hclge_rm_mc_addr,
7796 	.set_autoneg = hclge_set_autoneg,
7797 	.get_autoneg = hclge_get_autoneg,
7798 	.get_pauseparam = hclge_get_pauseparam,
7799 	.set_pauseparam = hclge_set_pauseparam,
7800 	.set_mtu = hclge_set_mtu,
7801 	.reset_queue = hclge_reset_tqp,
7802 	.get_stats = hclge_get_stats,
7803 	.update_stats = hclge_update_stats,
7804 	.get_strings = hclge_get_strings,
7805 	.get_sset_count = hclge_get_sset_count,
7806 	.get_fw_version = hclge_get_fw_version,
7807 	.get_mdix_mode = hclge_get_mdix_mode,
7808 	.enable_vlan_filter = hclge_enable_vlan_filter,
7809 	.set_vlan_filter = hclge_set_vlan_filter,
7810 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
7811 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
7812 	.reset_event = hclge_reset_event,
7813 	.set_default_reset_request = hclge_set_def_reset_request,
7814 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
7815 	.set_channels = hclge_set_channels,
7816 	.get_channels = hclge_get_channels,
7817 	.get_regs_len = hclge_get_regs_len,
7818 	.get_regs = hclge_get_regs,
7819 	.set_led_id = hclge_set_led_id,
7820 	.get_link_mode = hclge_get_link_mode,
7821 	.add_fd_entry = hclge_add_fd_entry,
7822 	.del_fd_entry = hclge_del_fd_entry,
7823 	.del_all_fd_entries = hclge_del_all_fd_entries,
7824 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
7825 	.get_fd_rule_info = hclge_get_fd_rule_info,
7826 	.get_fd_all_rules = hclge_get_all_rules,
7827 	.restore_fd_rules = hclge_restore_fd_entries,
7828 	.enable_fd = hclge_enable_fd,
7829 	.process_hw_error = hclge_process_ras_hw_error,
7830 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
7831 	.ae_dev_resetting = hclge_ae_dev_resetting,
7832 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
7833 	.set_gro_en = hclge_gro_en,
7834 };
7835 
7836 static struct hnae3_ae_algo ae_algo = {
7837 	.ops = &hclge_ops,
7838 	.pdev_id_table = ae_algo_pci_tbl,
7839 };
7840 
7841 static int hclge_init(void)
7842 {
7843 	pr_info("%s is initializing\n", HCLGE_NAME);
7844 
7845 	hnae3_register_ae_algo(&ae_algo);
7846 
7847 	return 0;
7848 }
7849 
7850 static void hclge_exit(void)
7851 {
7852 	hnae3_unregister_ae_algo(&ae_algo);
7853 }
7854 module_init(hclge_init);
7855 module_exit(hclge_exit);
7856 
7857 MODULE_LICENSE("GPL");
7858 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
7859 MODULE_DESCRIPTION("HCLGE Driver");
7860 MODULE_VERSION(HCLGE_MOD_VERSION);
7861