1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <net/rtnetlink.h>
16 #include "hclge_cmd.h"
17 #include "hclge_dcb.h"
18 #include "hclge_main.h"
19 #include "hclge_mbx.h"
20 #include "hclge_mdio.h"
21 #include "hclge_tm.h"
22 #include "hclge_err.h"
23 #include "hnae3.h"
24 
25 #define HCLGE_NAME			"hclge"
26 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
27 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
28 
29 #define HCLGE_BUF_SIZE_UNIT	256
30 
31 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
32 static int hclge_init_vlan_config(struct hclge_dev *hdev);
33 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
34 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
35 			       u16 *allocated_size, bool is_alloc);
36 
37 static struct hnae3_ae_algo ae_algo;
38 
39 static const struct pci_device_id ae_algo_pci_tbl[] = {
40 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
41 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
42 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
43 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
44 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
45 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
46 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
47 	/* required last entry */
48 	{0, }
49 };
50 
51 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
52 
53 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
54 					 HCLGE_CMDQ_TX_ADDR_H_REG,
55 					 HCLGE_CMDQ_TX_DEPTH_REG,
56 					 HCLGE_CMDQ_TX_TAIL_REG,
57 					 HCLGE_CMDQ_TX_HEAD_REG,
58 					 HCLGE_CMDQ_RX_ADDR_L_REG,
59 					 HCLGE_CMDQ_RX_ADDR_H_REG,
60 					 HCLGE_CMDQ_RX_DEPTH_REG,
61 					 HCLGE_CMDQ_RX_TAIL_REG,
62 					 HCLGE_CMDQ_RX_HEAD_REG,
63 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
64 					 HCLGE_CMDQ_INTR_STS_REG,
65 					 HCLGE_CMDQ_INTR_EN_REG,
66 					 HCLGE_CMDQ_INTR_GEN_REG};
67 
68 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
69 					   HCLGE_VECTOR0_OTER_EN_REG,
70 					   HCLGE_MISC_RESET_STS_REG,
71 					   HCLGE_MISC_VECTOR_INT_STS,
72 					   HCLGE_GLOBAL_RESET_REG,
73 					   HCLGE_FUN_RST_ING,
74 					   HCLGE_GRO_EN_REG};
75 
76 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
77 					 HCLGE_RING_RX_ADDR_H_REG,
78 					 HCLGE_RING_RX_BD_NUM_REG,
79 					 HCLGE_RING_RX_BD_LENGTH_REG,
80 					 HCLGE_RING_RX_MERGE_EN_REG,
81 					 HCLGE_RING_RX_TAIL_REG,
82 					 HCLGE_RING_RX_HEAD_REG,
83 					 HCLGE_RING_RX_FBD_NUM_REG,
84 					 HCLGE_RING_RX_OFFSET_REG,
85 					 HCLGE_RING_RX_FBD_OFFSET_REG,
86 					 HCLGE_RING_RX_STASH_REG,
87 					 HCLGE_RING_RX_BD_ERR_REG,
88 					 HCLGE_RING_TX_ADDR_L_REG,
89 					 HCLGE_RING_TX_ADDR_H_REG,
90 					 HCLGE_RING_TX_BD_NUM_REG,
91 					 HCLGE_RING_TX_PRIORITY_REG,
92 					 HCLGE_RING_TX_TC_REG,
93 					 HCLGE_RING_TX_MERGE_EN_REG,
94 					 HCLGE_RING_TX_TAIL_REG,
95 					 HCLGE_RING_TX_HEAD_REG,
96 					 HCLGE_RING_TX_FBD_NUM_REG,
97 					 HCLGE_RING_TX_OFFSET_REG,
98 					 HCLGE_RING_TX_EBD_NUM_REG,
99 					 HCLGE_RING_TX_EBD_OFFSET_REG,
100 					 HCLGE_RING_TX_BD_ERR_REG,
101 					 HCLGE_RING_EN_REG};
102 
103 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
104 					     HCLGE_TQP_INTR_GL0_REG,
105 					     HCLGE_TQP_INTR_GL1_REG,
106 					     HCLGE_TQP_INTR_GL2_REG,
107 					     HCLGE_TQP_INTR_RL_REG};
108 
109 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
110 	"App    Loopback test",
111 	"Serdes serial Loopback test",
112 	"Serdes parallel Loopback test",
113 	"Phy    Loopback test"
114 };
115 
116 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
117 	{"mac_tx_mac_pause_num",
118 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
119 	{"mac_rx_mac_pause_num",
120 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
121 	{"mac_tx_control_pkt_num",
122 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
123 	{"mac_rx_control_pkt_num",
124 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
125 	{"mac_tx_pfc_pkt_num",
126 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
127 	{"mac_tx_pfc_pri0_pkt_num",
128 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
129 	{"mac_tx_pfc_pri1_pkt_num",
130 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
131 	{"mac_tx_pfc_pri2_pkt_num",
132 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
133 	{"mac_tx_pfc_pri3_pkt_num",
134 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
135 	{"mac_tx_pfc_pri4_pkt_num",
136 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
137 	{"mac_tx_pfc_pri5_pkt_num",
138 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
139 	{"mac_tx_pfc_pri6_pkt_num",
140 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
141 	{"mac_tx_pfc_pri7_pkt_num",
142 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
143 	{"mac_rx_pfc_pkt_num",
144 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
145 	{"mac_rx_pfc_pri0_pkt_num",
146 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
147 	{"mac_rx_pfc_pri1_pkt_num",
148 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
149 	{"mac_rx_pfc_pri2_pkt_num",
150 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
151 	{"mac_rx_pfc_pri3_pkt_num",
152 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
153 	{"mac_rx_pfc_pri4_pkt_num",
154 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
155 	{"mac_rx_pfc_pri5_pkt_num",
156 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
157 	{"mac_rx_pfc_pri6_pkt_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
159 	{"mac_rx_pfc_pri7_pkt_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
161 	{"mac_tx_total_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
163 	{"mac_tx_total_oct_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
165 	{"mac_tx_good_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
167 	{"mac_tx_bad_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
169 	{"mac_tx_good_oct_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
171 	{"mac_tx_bad_oct_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
173 	{"mac_tx_uni_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
175 	{"mac_tx_multi_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
177 	{"mac_tx_broad_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
179 	{"mac_tx_undersize_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
181 	{"mac_tx_oversize_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
183 	{"mac_tx_64_oct_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
185 	{"mac_tx_65_127_oct_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
187 	{"mac_tx_128_255_oct_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
189 	{"mac_tx_256_511_oct_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
191 	{"mac_tx_512_1023_oct_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
193 	{"mac_tx_1024_1518_oct_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
195 	{"mac_tx_1519_2047_oct_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
197 	{"mac_tx_2048_4095_oct_pkt_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
199 	{"mac_tx_4096_8191_oct_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
201 	{"mac_tx_8192_9216_oct_pkt_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
203 	{"mac_tx_9217_12287_oct_pkt_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
205 	{"mac_tx_12288_16383_oct_pkt_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
207 	{"mac_tx_1519_max_good_pkt_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
209 	{"mac_tx_1519_max_bad_pkt_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
211 	{"mac_rx_total_pkt_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
213 	{"mac_rx_total_oct_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
215 	{"mac_rx_good_pkt_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
217 	{"mac_rx_bad_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
219 	{"mac_rx_good_oct_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
221 	{"mac_rx_bad_oct_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
223 	{"mac_rx_uni_pkt_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
225 	{"mac_rx_multi_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
227 	{"mac_rx_broad_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
229 	{"mac_rx_undersize_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
231 	{"mac_rx_oversize_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
233 	{"mac_rx_64_oct_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
235 	{"mac_rx_65_127_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
237 	{"mac_rx_128_255_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
239 	{"mac_rx_256_511_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
241 	{"mac_rx_512_1023_oct_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
243 	{"mac_rx_1024_1518_oct_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
245 	{"mac_rx_1519_2047_oct_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
247 	{"mac_rx_2048_4095_oct_pkt_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
249 	{"mac_rx_4096_8191_oct_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
251 	{"mac_rx_8192_9216_oct_pkt_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
253 	{"mac_rx_9217_12287_oct_pkt_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
255 	{"mac_rx_12288_16383_oct_pkt_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
257 	{"mac_rx_1519_max_good_pkt_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
259 	{"mac_rx_1519_max_bad_pkt_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
261 
262 	{"mac_tx_fragment_pkt_num",
263 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
264 	{"mac_tx_undermin_pkt_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
266 	{"mac_tx_jabber_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
268 	{"mac_tx_err_all_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
270 	{"mac_tx_from_app_good_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
272 	{"mac_tx_from_app_bad_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
274 	{"mac_rx_fragment_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
276 	{"mac_rx_undermin_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
278 	{"mac_rx_jabber_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
280 	{"mac_rx_fcs_err_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
282 	{"mac_rx_send_app_good_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
284 	{"mac_rx_send_app_bad_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
286 };
287 
288 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
289 	{
290 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
291 		.ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
292 		.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
293 		.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
294 		.i_port_bitmap = 0x1,
295 	},
296 };
297 
298 static const u8 hclge_hash_key[] = {
299 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
300 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
301 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
302 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
303 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
304 };
305 
306 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
307 {
308 #define HCLGE_MAC_CMD_NUM 21
309 
310 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
311 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
312 	__le64 *desc_data;
313 	int i, k, n;
314 	int ret;
315 
316 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
317 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
318 	if (ret) {
319 		dev_err(&hdev->pdev->dev,
320 			"Get MAC pkt stats fail, status = %d.\n", ret);
321 
322 		return ret;
323 	}
324 
325 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
326 		/* for special opcode 0032, only the first desc has the head */
327 		if (unlikely(i == 0)) {
328 			desc_data = (__le64 *)(&desc[i].data[0]);
329 			n = HCLGE_RD_FIRST_STATS_NUM;
330 		} else {
331 			desc_data = (__le64 *)(&desc[i]);
332 			n = HCLGE_RD_OTHER_STATS_NUM;
333 		}
334 
335 		for (k = 0; k < n; k++) {
336 			*data += le64_to_cpu(*desc_data);
337 			data++;
338 			desc_data++;
339 		}
340 	}
341 
342 	return 0;
343 }
344 
345 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
346 {
347 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
348 	struct hclge_desc *desc;
349 	__le64 *desc_data;
350 	u16 i, k, n;
351 	int ret;
352 
353 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
354 	if (!desc)
355 		return -ENOMEM;
356 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
357 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
358 	if (ret) {
359 		kfree(desc);
360 		return ret;
361 	}
362 
363 	for (i = 0; i < desc_num; i++) {
364 		/* for special opcode 0034, only the first desc has the head */
365 		if (i == 0) {
366 			desc_data = (__le64 *)(&desc[i].data[0]);
367 			n = HCLGE_RD_FIRST_STATS_NUM;
368 		} else {
369 			desc_data = (__le64 *)(&desc[i]);
370 			n = HCLGE_RD_OTHER_STATS_NUM;
371 		}
372 
373 		for (k = 0; k < n; k++) {
374 			*data += le64_to_cpu(*desc_data);
375 			data++;
376 			desc_data++;
377 		}
378 	}
379 
380 	kfree(desc);
381 
382 	return 0;
383 }
384 
385 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
386 {
387 	struct hclge_desc desc;
388 	__le32 *desc_data;
389 	u32 reg_num;
390 	int ret;
391 
392 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
393 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
394 	if (ret)
395 		return ret;
396 
397 	desc_data = (__le32 *)(&desc.data[0]);
398 	reg_num = le32_to_cpu(*desc_data);
399 
400 	*desc_num = 1 + ((reg_num - 3) >> 2) +
401 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
402 
403 	return 0;
404 }
405 
406 static int hclge_mac_update_stats(struct hclge_dev *hdev)
407 {
408 	u32 desc_num;
409 	int ret;
410 
411 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
412 
413 	/* The firmware supports the new statistics acquisition method */
414 	if (!ret)
415 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
416 	else if (ret == -EOPNOTSUPP)
417 		ret = hclge_mac_update_stats_defective(hdev);
418 	else
419 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
420 
421 	return ret;
422 }
423 
424 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
425 {
426 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
427 	struct hclge_vport *vport = hclge_get_vport(handle);
428 	struct hclge_dev *hdev = vport->back;
429 	struct hnae3_queue *queue;
430 	struct hclge_desc desc[1];
431 	struct hclge_tqp *tqp;
432 	int ret, i;
433 
434 	for (i = 0; i < kinfo->num_tqps; i++) {
435 		queue = handle->kinfo.tqp[i];
436 		tqp = container_of(queue, struct hclge_tqp, q);
437 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
438 		hclge_cmd_setup_basic_desc(&desc[0],
439 					   HCLGE_OPC_QUERY_RX_STATUS,
440 					   true);
441 
442 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
443 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
444 		if (ret) {
445 			dev_err(&hdev->pdev->dev,
446 				"Query tqp stat fail, status = %d,queue = %d\n",
447 				ret,	i);
448 			return ret;
449 		}
450 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
451 			le32_to_cpu(desc[0].data[1]);
452 	}
453 
454 	for (i = 0; i < kinfo->num_tqps; i++) {
455 		queue = handle->kinfo.tqp[i];
456 		tqp = container_of(queue, struct hclge_tqp, q);
457 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
458 		hclge_cmd_setup_basic_desc(&desc[0],
459 					   HCLGE_OPC_QUERY_TX_STATUS,
460 					   true);
461 
462 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
463 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
464 		if (ret) {
465 			dev_err(&hdev->pdev->dev,
466 				"Query tqp stat fail, status = %d,queue = %d\n",
467 				ret, i);
468 			return ret;
469 		}
470 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
471 			le32_to_cpu(desc[0].data[1]);
472 	}
473 
474 	return 0;
475 }
476 
477 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
478 {
479 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
480 	struct hclge_tqp *tqp;
481 	u64 *buff = data;
482 	int i;
483 
484 	for (i = 0; i < kinfo->num_tqps; i++) {
485 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
486 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
487 	}
488 
489 	for (i = 0; i < kinfo->num_tqps; i++) {
490 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
491 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
492 	}
493 
494 	return buff;
495 }
496 
497 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
498 {
499 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
500 
501 	return kinfo->num_tqps * (2);
502 }
503 
504 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
505 {
506 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
507 	u8 *buff = data;
508 	int i = 0;
509 
510 	for (i = 0; i < kinfo->num_tqps; i++) {
511 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
512 			struct hclge_tqp, q);
513 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
514 			 tqp->index);
515 		buff = buff + ETH_GSTRING_LEN;
516 	}
517 
518 	for (i = 0; i < kinfo->num_tqps; i++) {
519 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
520 			struct hclge_tqp, q);
521 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
522 			 tqp->index);
523 		buff = buff + ETH_GSTRING_LEN;
524 	}
525 
526 	return buff;
527 }
528 
529 static u64 *hclge_comm_get_stats(void *comm_stats,
530 				 const struct hclge_comm_stats_str strs[],
531 				 int size, u64 *data)
532 {
533 	u64 *buf = data;
534 	u32 i;
535 
536 	for (i = 0; i < size; i++)
537 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
538 
539 	return buf + size;
540 }
541 
542 static u8 *hclge_comm_get_strings(u32 stringset,
543 				  const struct hclge_comm_stats_str strs[],
544 				  int size, u8 *data)
545 {
546 	char *buff = (char *)data;
547 	u32 i;
548 
549 	if (stringset != ETH_SS_STATS)
550 		return buff;
551 
552 	for (i = 0; i < size; i++) {
553 		snprintf(buff, ETH_GSTRING_LEN,
554 			 strs[i].desc);
555 		buff = buff + ETH_GSTRING_LEN;
556 	}
557 
558 	return (u8 *)buff;
559 }
560 
561 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
562 {
563 	struct hnae3_handle *handle;
564 	int status;
565 
566 	handle = &hdev->vport[0].nic;
567 	if (handle->client) {
568 		status = hclge_tqps_update_stats(handle);
569 		if (status) {
570 			dev_err(&hdev->pdev->dev,
571 				"Update TQPS stats fail, status = %d.\n",
572 				status);
573 		}
574 	}
575 
576 	status = hclge_mac_update_stats(hdev);
577 	if (status)
578 		dev_err(&hdev->pdev->dev,
579 			"Update MAC stats fail, status = %d.\n", status);
580 }
581 
582 static void hclge_update_stats(struct hnae3_handle *handle,
583 			       struct net_device_stats *net_stats)
584 {
585 	struct hclge_vport *vport = hclge_get_vport(handle);
586 	struct hclge_dev *hdev = vport->back;
587 	int status;
588 
589 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
590 		return;
591 
592 	status = hclge_mac_update_stats(hdev);
593 	if (status)
594 		dev_err(&hdev->pdev->dev,
595 			"Update MAC stats fail, status = %d.\n",
596 			status);
597 
598 	status = hclge_tqps_update_stats(handle);
599 	if (status)
600 		dev_err(&hdev->pdev->dev,
601 			"Update TQPS stats fail, status = %d.\n",
602 			status);
603 
604 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
605 }
606 
607 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
608 {
609 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
610 		HNAE3_SUPPORT_PHY_LOOPBACK |\
611 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
612 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
613 
614 	struct hclge_vport *vport = hclge_get_vport(handle);
615 	struct hclge_dev *hdev = vport->back;
616 	int count = 0;
617 
618 	/* Loopback test support rules:
619 	 * mac: only GE mode support
620 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
621 	 * phy: only support when phy device exist on board
622 	 */
623 	if (stringset == ETH_SS_TEST) {
624 		/* clear loopback bit flags at first */
625 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
626 		if (hdev->pdev->revision >= 0x21 ||
627 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
628 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
629 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
630 			count += 1;
631 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
632 		}
633 
634 		count += 2;
635 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
636 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
637 	} else if (stringset == ETH_SS_STATS) {
638 		count = ARRAY_SIZE(g_mac_stats_string) +
639 			hclge_tqps_get_sset_count(handle, stringset);
640 	}
641 
642 	return count;
643 }
644 
645 static void hclge_get_strings(struct hnae3_handle *handle,
646 			      u32 stringset,
647 			      u8 *data)
648 {
649 	u8 *p = (char *)data;
650 	int size;
651 
652 	if (stringset == ETH_SS_STATS) {
653 		size = ARRAY_SIZE(g_mac_stats_string);
654 		p = hclge_comm_get_strings(stringset,
655 					   g_mac_stats_string,
656 					   size,
657 					   p);
658 		p = hclge_tqps_get_strings(handle, p);
659 	} else if (stringset == ETH_SS_TEST) {
660 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
661 			memcpy(p,
662 			       hns3_nic_test_strs[HNAE3_LOOP_APP],
663 			       ETH_GSTRING_LEN);
664 			p += ETH_GSTRING_LEN;
665 		}
666 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
667 			memcpy(p,
668 			       hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
669 			       ETH_GSTRING_LEN);
670 			p += ETH_GSTRING_LEN;
671 		}
672 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
673 			memcpy(p,
674 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
675 			       ETH_GSTRING_LEN);
676 			p += ETH_GSTRING_LEN;
677 		}
678 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
679 			memcpy(p,
680 			       hns3_nic_test_strs[HNAE3_LOOP_PHY],
681 			       ETH_GSTRING_LEN);
682 			p += ETH_GSTRING_LEN;
683 		}
684 	}
685 }
686 
687 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
688 {
689 	struct hclge_vport *vport = hclge_get_vport(handle);
690 	struct hclge_dev *hdev = vport->back;
691 	u64 *p;
692 
693 	p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
694 				 g_mac_stats_string,
695 				 ARRAY_SIZE(g_mac_stats_string),
696 				 data);
697 	p = hclge_tqps_get_stats(handle, p);
698 }
699 
700 static int hclge_parse_func_status(struct hclge_dev *hdev,
701 				   struct hclge_func_status_cmd *status)
702 {
703 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
704 		return -EINVAL;
705 
706 	/* Set the pf to main pf */
707 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
708 		hdev->flag |= HCLGE_FLAG_MAIN;
709 	else
710 		hdev->flag &= ~HCLGE_FLAG_MAIN;
711 
712 	return 0;
713 }
714 
715 static int hclge_query_function_status(struct hclge_dev *hdev)
716 {
717 	struct hclge_func_status_cmd *req;
718 	struct hclge_desc desc;
719 	int timeout = 0;
720 	int ret;
721 
722 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
723 	req = (struct hclge_func_status_cmd *)desc.data;
724 
725 	do {
726 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
727 		if (ret) {
728 			dev_err(&hdev->pdev->dev,
729 				"query function status failed %d.\n",
730 				ret);
731 
732 			return ret;
733 		}
734 
735 		/* Check pf reset is done */
736 		if (req->pf_state)
737 			break;
738 		usleep_range(1000, 2000);
739 	} while (timeout++ < 5);
740 
741 	ret = hclge_parse_func_status(hdev, req);
742 
743 	return ret;
744 }
745 
746 static int hclge_query_pf_resource(struct hclge_dev *hdev)
747 {
748 	struct hclge_pf_res_cmd *req;
749 	struct hclge_desc desc;
750 	int ret;
751 
752 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
753 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
754 	if (ret) {
755 		dev_err(&hdev->pdev->dev,
756 			"query pf resource failed %d.\n", ret);
757 		return ret;
758 	}
759 
760 	req = (struct hclge_pf_res_cmd *)desc.data;
761 	hdev->num_tqps = __le16_to_cpu(req->tqp_num);
762 	hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
763 
764 	if (req->tx_buf_size)
765 		hdev->tx_buf_size =
766 			__le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
767 	else
768 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
769 
770 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
771 
772 	if (req->dv_buf_size)
773 		hdev->dv_buf_size =
774 			__le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
775 	else
776 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
777 
778 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
779 
780 	if (hnae3_dev_roce_supported(hdev)) {
781 		hdev->roce_base_msix_offset =
782 		hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
783 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
784 		hdev->num_roce_msi =
785 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
786 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
787 
788 		/* PF should have NIC vectors and Roce vectors,
789 		 * NIC vectors are queued before Roce vectors.
790 		 */
791 		hdev->num_msi = hdev->num_roce_msi  +
792 				hdev->roce_base_msix_offset;
793 	} else {
794 		hdev->num_msi =
795 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
796 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
797 	}
798 
799 	return 0;
800 }
801 
802 static int hclge_parse_speed(int speed_cmd, int *speed)
803 {
804 	switch (speed_cmd) {
805 	case 6:
806 		*speed = HCLGE_MAC_SPEED_10M;
807 		break;
808 	case 7:
809 		*speed = HCLGE_MAC_SPEED_100M;
810 		break;
811 	case 0:
812 		*speed = HCLGE_MAC_SPEED_1G;
813 		break;
814 	case 1:
815 		*speed = HCLGE_MAC_SPEED_10G;
816 		break;
817 	case 2:
818 		*speed = HCLGE_MAC_SPEED_25G;
819 		break;
820 	case 3:
821 		*speed = HCLGE_MAC_SPEED_40G;
822 		break;
823 	case 4:
824 		*speed = HCLGE_MAC_SPEED_50G;
825 		break;
826 	case 5:
827 		*speed = HCLGE_MAC_SPEED_100G;
828 		break;
829 	default:
830 		return -EINVAL;
831 	}
832 
833 	return 0;
834 }
835 
836 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
837 					u8 speed_ability)
838 {
839 	unsigned long *supported = hdev->hw.mac.supported;
840 
841 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
842 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
843 				 supported);
844 
845 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
846 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
847 				 supported);
848 
849 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
850 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
851 				 supported);
852 
853 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
854 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
855 				 supported);
856 
857 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
858 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
859 				 supported);
860 
861 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
862 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
863 }
864 
865 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
866 					 u8 speed_ability)
867 {
868 	unsigned long *supported = hdev->hw.mac.supported;
869 
870 	/* default to support all speed for GE port */
871 	if (!speed_ability)
872 		speed_ability = HCLGE_SUPPORT_GE;
873 
874 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
875 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
876 				 supported);
877 
878 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
879 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
880 				 supported);
881 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
882 				 supported);
883 	}
884 
885 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
886 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
887 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
888 	}
889 
890 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
891 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
892 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
893 }
894 
895 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
896 {
897 	u8 media_type = hdev->hw.mac.media_type;
898 
899 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
900 		hclge_parse_fiber_link_mode(hdev, speed_ability);
901 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
902 		hclge_parse_copper_link_mode(hdev, speed_ability);
903 }
904 
905 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
906 {
907 	struct hclge_cfg_param_cmd *req;
908 	u64 mac_addr_tmp_high;
909 	u64 mac_addr_tmp;
910 	int i;
911 
912 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
913 
914 	/* get the configuration */
915 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
916 					      HCLGE_CFG_VMDQ_M,
917 					      HCLGE_CFG_VMDQ_S);
918 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
919 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
920 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
921 					    HCLGE_CFG_TQP_DESC_N_M,
922 					    HCLGE_CFG_TQP_DESC_N_S);
923 
924 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
925 					HCLGE_CFG_PHY_ADDR_M,
926 					HCLGE_CFG_PHY_ADDR_S);
927 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
928 					  HCLGE_CFG_MEDIA_TP_M,
929 					  HCLGE_CFG_MEDIA_TP_S);
930 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
931 					  HCLGE_CFG_RX_BUF_LEN_M,
932 					  HCLGE_CFG_RX_BUF_LEN_S);
933 	/* get mac_address */
934 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
935 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
936 					    HCLGE_CFG_MAC_ADDR_H_M,
937 					    HCLGE_CFG_MAC_ADDR_H_S);
938 
939 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
940 
941 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
942 					     HCLGE_CFG_DEFAULT_SPEED_M,
943 					     HCLGE_CFG_DEFAULT_SPEED_S);
944 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
945 					    HCLGE_CFG_RSS_SIZE_M,
946 					    HCLGE_CFG_RSS_SIZE_S);
947 
948 	for (i = 0; i < ETH_ALEN; i++)
949 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
950 
951 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
952 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
953 
954 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
955 					     HCLGE_CFG_SPEED_ABILITY_M,
956 					     HCLGE_CFG_SPEED_ABILITY_S);
957 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
958 					 HCLGE_CFG_UMV_TBL_SPACE_M,
959 					 HCLGE_CFG_UMV_TBL_SPACE_S);
960 	if (!cfg->umv_space)
961 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
962 }
963 
964 /* hclge_get_cfg: query the static parameter from flash
965  * @hdev: pointer to struct hclge_dev
966  * @hcfg: the config structure to be getted
967  */
968 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
969 {
970 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
971 	struct hclge_cfg_param_cmd *req;
972 	int i, ret;
973 
974 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
975 		u32 offset = 0;
976 
977 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
978 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
979 					   true);
980 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
981 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
982 		/* Len should be united by 4 bytes when send to hardware */
983 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
984 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
985 		req->offset = cpu_to_le32(offset);
986 	}
987 
988 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
989 	if (ret) {
990 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
991 		return ret;
992 	}
993 
994 	hclge_parse_cfg(hcfg, desc);
995 
996 	return 0;
997 }
998 
999 static int hclge_get_cap(struct hclge_dev *hdev)
1000 {
1001 	int ret;
1002 
1003 	ret = hclge_query_function_status(hdev);
1004 	if (ret) {
1005 		dev_err(&hdev->pdev->dev,
1006 			"query function status error %d.\n", ret);
1007 		return ret;
1008 	}
1009 
1010 	/* get pf resource */
1011 	ret = hclge_query_pf_resource(hdev);
1012 	if (ret)
1013 		dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1014 
1015 	return ret;
1016 }
1017 
1018 static int hclge_configure(struct hclge_dev *hdev)
1019 {
1020 	struct hclge_cfg cfg;
1021 	int ret, i;
1022 
1023 	ret = hclge_get_cfg(hdev, &cfg);
1024 	if (ret) {
1025 		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1026 		return ret;
1027 	}
1028 
1029 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1030 	hdev->base_tqp_pid = 0;
1031 	hdev->rss_size_max = cfg.rss_size_max;
1032 	hdev->rx_buf_len = cfg.rx_buf_len;
1033 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1034 	hdev->hw.mac.media_type = cfg.media_type;
1035 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1036 	hdev->num_tx_desc = cfg.tqp_desc_num;
1037 	hdev->num_rx_desc = cfg.tqp_desc_num;
1038 	hdev->tm_info.num_pg = 1;
1039 	hdev->tc_max = cfg.tc_num;
1040 	hdev->tm_info.hw_pfc_map = 0;
1041 	hdev->wanted_umv_size = cfg.umv_space;
1042 
1043 	if (hnae3_dev_fd_supported(hdev))
1044 		hdev->fd_en = true;
1045 
1046 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1047 	if (ret) {
1048 		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1049 		return ret;
1050 	}
1051 
1052 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1053 
1054 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1055 	    (hdev->tc_max < 1)) {
1056 		dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1057 			 hdev->tc_max);
1058 		hdev->tc_max = 1;
1059 	}
1060 
1061 	/* Dev does not support DCB */
1062 	if (!hnae3_dev_dcb_supported(hdev)) {
1063 		hdev->tc_max = 1;
1064 		hdev->pfc_max = 0;
1065 	} else {
1066 		hdev->pfc_max = hdev->tc_max;
1067 	}
1068 
1069 	hdev->tm_info.num_tc = 1;
1070 
1071 	/* Currently not support uncontiuous tc */
1072 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1073 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1074 
1075 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1076 
1077 	return ret;
1078 }
1079 
1080 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1081 			    int tso_mss_max)
1082 {
1083 	struct hclge_cfg_tso_status_cmd *req;
1084 	struct hclge_desc desc;
1085 	u16 tso_mss;
1086 
1087 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1088 
1089 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1090 
1091 	tso_mss = 0;
1092 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1093 			HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1094 	req->tso_mss_min = cpu_to_le16(tso_mss);
1095 
1096 	tso_mss = 0;
1097 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1098 			HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1099 	req->tso_mss_max = cpu_to_le16(tso_mss);
1100 
1101 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1102 }
1103 
1104 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1105 {
1106 	struct hclge_cfg_gro_status_cmd *req;
1107 	struct hclge_desc desc;
1108 	int ret;
1109 
1110 	if (!hnae3_dev_gro_supported(hdev))
1111 		return 0;
1112 
1113 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1114 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1115 
1116 	req->gro_en = cpu_to_le16(en ? 1 : 0);
1117 
1118 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1119 	if (ret)
1120 		dev_err(&hdev->pdev->dev,
1121 			"GRO hardware config cmd failed, ret = %d\n", ret);
1122 
1123 	return ret;
1124 }
1125 
1126 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1127 {
1128 	struct hclge_tqp *tqp;
1129 	int i;
1130 
1131 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1132 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1133 	if (!hdev->htqp)
1134 		return -ENOMEM;
1135 
1136 	tqp = hdev->htqp;
1137 
1138 	for (i = 0; i < hdev->num_tqps; i++) {
1139 		tqp->dev = &hdev->pdev->dev;
1140 		tqp->index = i;
1141 
1142 		tqp->q.ae_algo = &ae_algo;
1143 		tqp->q.buf_size = hdev->rx_buf_len;
1144 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1145 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1146 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1147 			i * HCLGE_TQP_REG_SIZE;
1148 
1149 		tqp++;
1150 	}
1151 
1152 	return 0;
1153 }
1154 
1155 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1156 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1157 {
1158 	struct hclge_tqp_map_cmd *req;
1159 	struct hclge_desc desc;
1160 	int ret;
1161 
1162 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1163 
1164 	req = (struct hclge_tqp_map_cmd *)desc.data;
1165 	req->tqp_id = cpu_to_le16(tqp_pid);
1166 	req->tqp_vf = func_id;
1167 	req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1168 			1 << HCLGE_TQP_MAP_EN_B;
1169 	req->tqp_vid = cpu_to_le16(tqp_vid);
1170 
1171 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1172 	if (ret)
1173 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1174 
1175 	return ret;
1176 }
1177 
1178 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1179 {
1180 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1181 	struct hclge_dev *hdev = vport->back;
1182 	int i, alloced;
1183 
1184 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1185 	     alloced < num_tqps; i++) {
1186 		if (!hdev->htqp[i].alloced) {
1187 			hdev->htqp[i].q.handle = &vport->nic;
1188 			hdev->htqp[i].q.tqp_index = alloced;
1189 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1190 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1191 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1192 			hdev->htqp[i].alloced = true;
1193 			alloced++;
1194 		}
1195 	}
1196 	vport->alloc_tqps = alloced;
1197 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1198 				vport->alloc_tqps / hdev->tm_info.num_tc);
1199 
1200 	return 0;
1201 }
1202 
1203 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1204 			    u16 num_tx_desc, u16 num_rx_desc)
1205 
1206 {
1207 	struct hnae3_handle *nic = &vport->nic;
1208 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1209 	struct hclge_dev *hdev = vport->back;
1210 	int ret;
1211 
1212 	kinfo->num_tx_desc = num_tx_desc;
1213 	kinfo->num_rx_desc = num_rx_desc;
1214 
1215 	kinfo->rx_buf_len = hdev->rx_buf_len;
1216 
1217 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1218 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1219 	if (!kinfo->tqp)
1220 		return -ENOMEM;
1221 
1222 	ret = hclge_assign_tqp(vport, num_tqps);
1223 	if (ret)
1224 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1225 
1226 	return ret;
1227 }
1228 
1229 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1230 				  struct hclge_vport *vport)
1231 {
1232 	struct hnae3_handle *nic = &vport->nic;
1233 	struct hnae3_knic_private_info *kinfo;
1234 	u16 i;
1235 
1236 	kinfo = &nic->kinfo;
1237 	for (i = 0; i < vport->alloc_tqps; i++) {
1238 		struct hclge_tqp *q =
1239 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1240 		bool is_pf;
1241 		int ret;
1242 
1243 		is_pf = !(vport->vport_id);
1244 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1245 					     i, is_pf);
1246 		if (ret)
1247 			return ret;
1248 	}
1249 
1250 	return 0;
1251 }
1252 
1253 static int hclge_map_tqp(struct hclge_dev *hdev)
1254 {
1255 	struct hclge_vport *vport = hdev->vport;
1256 	u16 i, num_vport;
1257 
1258 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1259 	for (i = 0; i < num_vport; i++)	{
1260 		int ret;
1261 
1262 		ret = hclge_map_tqp_to_vport(hdev, vport);
1263 		if (ret)
1264 			return ret;
1265 
1266 		vport++;
1267 	}
1268 
1269 	return 0;
1270 }
1271 
1272 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1273 {
1274 	/* this would be initialized later */
1275 }
1276 
1277 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1278 {
1279 	struct hnae3_handle *nic = &vport->nic;
1280 	struct hclge_dev *hdev = vport->back;
1281 	int ret;
1282 
1283 	nic->pdev = hdev->pdev;
1284 	nic->ae_algo = &ae_algo;
1285 	nic->numa_node_mask = hdev->numa_node_mask;
1286 
1287 	if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1288 		ret = hclge_knic_setup(vport, num_tqps,
1289 				       hdev->num_tx_desc, hdev->num_rx_desc);
1290 
1291 		if (ret) {
1292 			dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1293 				ret);
1294 			return ret;
1295 		}
1296 	} else {
1297 		hclge_unic_setup(vport, num_tqps);
1298 	}
1299 
1300 	return 0;
1301 }
1302 
1303 static int hclge_alloc_vport(struct hclge_dev *hdev)
1304 {
1305 	struct pci_dev *pdev = hdev->pdev;
1306 	struct hclge_vport *vport;
1307 	u32 tqp_main_vport;
1308 	u32 tqp_per_vport;
1309 	int num_vport, i;
1310 	int ret;
1311 
1312 	/* We need to alloc a vport for main NIC of PF */
1313 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1314 
1315 	if (hdev->num_tqps < num_vport) {
1316 		dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1317 			hdev->num_tqps, num_vport);
1318 		return -EINVAL;
1319 	}
1320 
1321 	/* Alloc the same number of TQPs for every vport */
1322 	tqp_per_vport = hdev->num_tqps / num_vport;
1323 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1324 
1325 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1326 			     GFP_KERNEL);
1327 	if (!vport)
1328 		return -ENOMEM;
1329 
1330 	hdev->vport = vport;
1331 	hdev->num_alloc_vport = num_vport;
1332 
1333 	if (IS_ENABLED(CONFIG_PCI_IOV))
1334 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1335 
1336 	for (i = 0; i < num_vport; i++) {
1337 		vport->back = hdev;
1338 		vport->vport_id = i;
1339 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1340 		INIT_LIST_HEAD(&vport->vlan_list);
1341 		INIT_LIST_HEAD(&vport->uc_mac_list);
1342 		INIT_LIST_HEAD(&vport->mc_mac_list);
1343 
1344 		if (i == 0)
1345 			ret = hclge_vport_setup(vport, tqp_main_vport);
1346 		else
1347 			ret = hclge_vport_setup(vport, tqp_per_vport);
1348 		if (ret) {
1349 			dev_err(&pdev->dev,
1350 				"vport setup failed for vport %d, %d\n",
1351 				i, ret);
1352 			return ret;
1353 		}
1354 
1355 		vport++;
1356 	}
1357 
1358 	return 0;
1359 }
1360 
1361 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1362 				    struct hclge_pkt_buf_alloc *buf_alloc)
1363 {
1364 /* TX buffer size is unit by 128 byte */
1365 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1366 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1367 	struct hclge_tx_buff_alloc_cmd *req;
1368 	struct hclge_desc desc;
1369 	int ret;
1370 	u8 i;
1371 
1372 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1373 
1374 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1375 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1376 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1377 
1378 		req->tx_pkt_buff[i] =
1379 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1380 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1381 	}
1382 
1383 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1384 	if (ret)
1385 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1386 			ret);
1387 
1388 	return ret;
1389 }
1390 
1391 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1392 				 struct hclge_pkt_buf_alloc *buf_alloc)
1393 {
1394 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1395 
1396 	if (ret)
1397 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1398 
1399 	return ret;
1400 }
1401 
1402 static int hclge_get_tc_num(struct hclge_dev *hdev)
1403 {
1404 	int i, cnt = 0;
1405 
1406 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1407 		if (hdev->hw_tc_map & BIT(i))
1408 			cnt++;
1409 	return cnt;
1410 }
1411 
1412 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1413 {
1414 	int i, cnt = 0;
1415 
1416 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1417 		if (hdev->hw_tc_map & BIT(i) &&
1418 		    hdev->tm_info.hw_pfc_map & BIT(i))
1419 			cnt++;
1420 	return cnt;
1421 }
1422 
1423 /* Get the number of pfc enabled TCs, which have private buffer */
1424 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1425 				  struct hclge_pkt_buf_alloc *buf_alloc)
1426 {
1427 	struct hclge_priv_buf *priv;
1428 	int i, cnt = 0;
1429 
1430 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1431 		priv = &buf_alloc->priv_buf[i];
1432 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1433 		    priv->enable)
1434 			cnt++;
1435 	}
1436 
1437 	return cnt;
1438 }
1439 
1440 /* Get the number of pfc disabled TCs, which have private buffer */
1441 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1442 				     struct hclge_pkt_buf_alloc *buf_alloc)
1443 {
1444 	struct hclge_priv_buf *priv;
1445 	int i, cnt = 0;
1446 
1447 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1448 		priv = &buf_alloc->priv_buf[i];
1449 		if (hdev->hw_tc_map & BIT(i) &&
1450 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1451 		    priv->enable)
1452 			cnt++;
1453 	}
1454 
1455 	return cnt;
1456 }
1457 
1458 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1459 {
1460 	struct hclge_priv_buf *priv;
1461 	u32 rx_priv = 0;
1462 	int i;
1463 
1464 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1465 		priv = &buf_alloc->priv_buf[i];
1466 		if (priv->enable)
1467 			rx_priv += priv->buf_size;
1468 	}
1469 	return rx_priv;
1470 }
1471 
1472 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1473 {
1474 	u32 i, total_tx_size = 0;
1475 
1476 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1477 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1478 
1479 	return total_tx_size;
1480 }
1481 
1482 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1483 				struct hclge_pkt_buf_alloc *buf_alloc,
1484 				u32 rx_all)
1485 {
1486 	u32 shared_buf_min, shared_buf_tc, shared_std;
1487 	int tc_num, pfc_enable_num;
1488 	u32 shared_buf, aligned_mps;
1489 	u32 rx_priv;
1490 	int i;
1491 
1492 	tc_num = hclge_get_tc_num(hdev);
1493 	pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1494 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1495 
1496 	if (hnae3_dev_dcb_supported(hdev))
1497 		shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1498 	else
1499 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1500 					+ hdev->dv_buf_size;
1501 
1502 	shared_buf_tc = pfc_enable_num * aligned_mps +
1503 			(tc_num - pfc_enable_num) * aligned_mps / 2 +
1504 			aligned_mps;
1505 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1506 			     HCLGE_BUF_SIZE_UNIT);
1507 
1508 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1509 	if (rx_all < rx_priv + shared_std)
1510 		return false;
1511 
1512 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1513 	buf_alloc->s_buf.buf_size = shared_buf;
1514 	if (hnae3_dev_dcb_supported(hdev)) {
1515 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1516 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1517 			- roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1518 	} else {
1519 		buf_alloc->s_buf.self.high = aligned_mps +
1520 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1521 		buf_alloc->s_buf.self.low =
1522 			roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1523 	}
1524 
1525 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1526 		if ((hdev->hw_tc_map & BIT(i)) &&
1527 		    (hdev->tm_info.hw_pfc_map & BIT(i))) {
1528 			buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
1529 			buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
1530 		} else {
1531 			buf_alloc->s_buf.tc_thrd[i].low = 0;
1532 			buf_alloc->s_buf.tc_thrd[i].high = aligned_mps;
1533 		}
1534 	}
1535 
1536 	return true;
1537 }
1538 
1539 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1540 				struct hclge_pkt_buf_alloc *buf_alloc)
1541 {
1542 	u32 i, total_size;
1543 
1544 	total_size = hdev->pkt_buf_size;
1545 
1546 	/* alloc tx buffer for all enabled tc */
1547 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1548 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1549 
1550 		if (hdev->hw_tc_map & BIT(i)) {
1551 			if (total_size < hdev->tx_buf_size)
1552 				return -ENOMEM;
1553 
1554 			priv->tx_buf_size = hdev->tx_buf_size;
1555 		} else {
1556 			priv->tx_buf_size = 0;
1557 		}
1558 
1559 		total_size -= priv->tx_buf_size;
1560 	}
1561 
1562 	return 0;
1563 }
1564 
1565 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1566 				  struct hclge_pkt_buf_alloc *buf_alloc)
1567 {
1568 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1569 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1570 	int i;
1571 
1572 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1573 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1574 
1575 		priv->enable = 0;
1576 		priv->wl.low = 0;
1577 		priv->wl.high = 0;
1578 		priv->buf_size = 0;
1579 
1580 		if (!(hdev->hw_tc_map & BIT(i)))
1581 			continue;
1582 
1583 		priv->enable = 1;
1584 
1585 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1586 			priv->wl.low = max ? aligned_mps : 256;
1587 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
1588 						HCLGE_BUF_SIZE_UNIT);
1589 		} else {
1590 			priv->wl.low = 0;
1591 			priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1592 		}
1593 
1594 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1595 	}
1596 
1597 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1598 }
1599 
1600 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1601 					  struct hclge_pkt_buf_alloc *buf_alloc)
1602 {
1603 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1604 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1605 	int i;
1606 
1607 	/* let the last to be cleared first */
1608 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1609 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1610 
1611 		if (hdev->hw_tc_map & BIT(i) &&
1612 		    !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1613 			/* Clear the no pfc TC private buffer */
1614 			priv->wl.low = 0;
1615 			priv->wl.high = 0;
1616 			priv->buf_size = 0;
1617 			priv->enable = 0;
1618 			no_pfc_priv_num--;
1619 		}
1620 
1621 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1622 		    no_pfc_priv_num == 0)
1623 			break;
1624 	}
1625 
1626 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1627 }
1628 
1629 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1630 					struct hclge_pkt_buf_alloc *buf_alloc)
1631 {
1632 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1633 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1634 	int i;
1635 
1636 	/* let the last to be cleared first */
1637 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1638 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1639 
1640 		if (hdev->hw_tc_map & BIT(i) &&
1641 		    hdev->tm_info.hw_pfc_map & BIT(i)) {
1642 			/* Reduce the number of pfc TC with private buffer */
1643 			priv->wl.low = 0;
1644 			priv->enable = 0;
1645 			priv->wl.high = 0;
1646 			priv->buf_size = 0;
1647 			pfc_priv_num--;
1648 		}
1649 
1650 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1651 		    pfc_priv_num == 0)
1652 			break;
1653 	}
1654 
1655 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1656 }
1657 
1658 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1659  * @hdev: pointer to struct hclge_dev
1660  * @buf_alloc: pointer to buffer calculation data
1661  * @return: 0: calculate sucessful, negative: fail
1662  */
1663 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1664 				struct hclge_pkt_buf_alloc *buf_alloc)
1665 {
1666 	/* When DCB is not supported, rx private buffer is not allocated. */
1667 	if (!hnae3_dev_dcb_supported(hdev)) {
1668 		u32 rx_all = hdev->pkt_buf_size;
1669 
1670 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1671 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1672 			return -ENOMEM;
1673 
1674 		return 0;
1675 	}
1676 
1677 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1678 		return 0;
1679 
1680 	/* try to decrease the buffer size */
1681 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1682 		return 0;
1683 
1684 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1685 		return 0;
1686 
1687 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1688 		return 0;
1689 
1690 	return -ENOMEM;
1691 }
1692 
1693 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1694 				   struct hclge_pkt_buf_alloc *buf_alloc)
1695 {
1696 	struct hclge_rx_priv_buff_cmd *req;
1697 	struct hclge_desc desc;
1698 	int ret;
1699 	int i;
1700 
1701 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1702 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1703 
1704 	/* Alloc private buffer TCs */
1705 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1706 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1707 
1708 		req->buf_num[i] =
1709 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1710 		req->buf_num[i] |=
1711 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1712 	}
1713 
1714 	req->shared_buf =
1715 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1716 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
1717 
1718 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1719 	if (ret)
1720 		dev_err(&hdev->pdev->dev,
1721 			"rx private buffer alloc cmd failed %d\n", ret);
1722 
1723 	return ret;
1724 }
1725 
1726 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1727 				   struct hclge_pkt_buf_alloc *buf_alloc)
1728 {
1729 	struct hclge_rx_priv_wl_buf *req;
1730 	struct hclge_priv_buf *priv;
1731 	struct hclge_desc desc[2];
1732 	int i, j;
1733 	int ret;
1734 
1735 	for (i = 0; i < 2; i++) {
1736 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1737 					   false);
1738 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1739 
1740 		/* The first descriptor set the NEXT bit to 1 */
1741 		if (i == 0)
1742 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1743 		else
1744 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1745 
1746 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1747 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1748 
1749 			priv = &buf_alloc->priv_buf[idx];
1750 			req->tc_wl[j].high =
1751 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1752 			req->tc_wl[j].high |=
1753 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1754 			req->tc_wl[j].low =
1755 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1756 			req->tc_wl[j].low |=
1757 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1758 		}
1759 	}
1760 
1761 	/* Send 2 descriptor at one time */
1762 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1763 	if (ret)
1764 		dev_err(&hdev->pdev->dev,
1765 			"rx private waterline config cmd failed %d\n",
1766 			ret);
1767 	return ret;
1768 }
1769 
1770 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1771 				    struct hclge_pkt_buf_alloc *buf_alloc)
1772 {
1773 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1774 	struct hclge_rx_com_thrd *req;
1775 	struct hclge_desc desc[2];
1776 	struct hclge_tc_thrd *tc;
1777 	int i, j;
1778 	int ret;
1779 
1780 	for (i = 0; i < 2; i++) {
1781 		hclge_cmd_setup_basic_desc(&desc[i],
1782 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1783 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
1784 
1785 		/* The first descriptor set the NEXT bit to 1 */
1786 		if (i == 0)
1787 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1788 		else
1789 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1790 
1791 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1792 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1793 
1794 			req->com_thrd[j].high =
1795 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1796 			req->com_thrd[j].high |=
1797 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1798 			req->com_thrd[j].low =
1799 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1800 			req->com_thrd[j].low |=
1801 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1802 		}
1803 	}
1804 
1805 	/* Send 2 descriptors at one time */
1806 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1807 	if (ret)
1808 		dev_err(&hdev->pdev->dev,
1809 			"common threshold config cmd failed %d\n", ret);
1810 	return ret;
1811 }
1812 
1813 static int hclge_common_wl_config(struct hclge_dev *hdev,
1814 				  struct hclge_pkt_buf_alloc *buf_alloc)
1815 {
1816 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1817 	struct hclge_rx_com_wl *req;
1818 	struct hclge_desc desc;
1819 	int ret;
1820 
1821 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1822 
1823 	req = (struct hclge_rx_com_wl *)desc.data;
1824 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1825 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1826 
1827 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1828 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1829 
1830 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1831 	if (ret)
1832 		dev_err(&hdev->pdev->dev,
1833 			"common waterline config cmd failed %d\n", ret);
1834 
1835 	return ret;
1836 }
1837 
1838 int hclge_buffer_alloc(struct hclge_dev *hdev)
1839 {
1840 	struct hclge_pkt_buf_alloc *pkt_buf;
1841 	int ret;
1842 
1843 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1844 	if (!pkt_buf)
1845 		return -ENOMEM;
1846 
1847 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1848 	if (ret) {
1849 		dev_err(&hdev->pdev->dev,
1850 			"could not calc tx buffer size for all TCs %d\n", ret);
1851 		goto out;
1852 	}
1853 
1854 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1855 	if (ret) {
1856 		dev_err(&hdev->pdev->dev,
1857 			"could not alloc tx buffers %d\n", ret);
1858 		goto out;
1859 	}
1860 
1861 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1862 	if (ret) {
1863 		dev_err(&hdev->pdev->dev,
1864 			"could not calc rx priv buffer size for all TCs %d\n",
1865 			ret);
1866 		goto out;
1867 	}
1868 
1869 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1870 	if (ret) {
1871 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1872 			ret);
1873 		goto out;
1874 	}
1875 
1876 	if (hnae3_dev_dcb_supported(hdev)) {
1877 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1878 		if (ret) {
1879 			dev_err(&hdev->pdev->dev,
1880 				"could not configure rx private waterline %d\n",
1881 				ret);
1882 			goto out;
1883 		}
1884 
1885 		ret = hclge_common_thrd_config(hdev, pkt_buf);
1886 		if (ret) {
1887 			dev_err(&hdev->pdev->dev,
1888 				"could not configure common threshold %d\n",
1889 				ret);
1890 			goto out;
1891 		}
1892 	}
1893 
1894 	ret = hclge_common_wl_config(hdev, pkt_buf);
1895 	if (ret)
1896 		dev_err(&hdev->pdev->dev,
1897 			"could not configure common waterline %d\n", ret);
1898 
1899 out:
1900 	kfree(pkt_buf);
1901 	return ret;
1902 }
1903 
1904 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1905 {
1906 	struct hnae3_handle *roce = &vport->roce;
1907 	struct hnae3_handle *nic = &vport->nic;
1908 
1909 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
1910 
1911 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1912 	    vport->back->num_msi_left == 0)
1913 		return -EINVAL;
1914 
1915 	roce->rinfo.base_vector = vport->back->roce_base_vector;
1916 
1917 	roce->rinfo.netdev = nic->kinfo.netdev;
1918 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
1919 
1920 	roce->pdev = nic->pdev;
1921 	roce->ae_algo = nic->ae_algo;
1922 	roce->numa_node_mask = nic->numa_node_mask;
1923 
1924 	return 0;
1925 }
1926 
1927 static int hclge_init_msi(struct hclge_dev *hdev)
1928 {
1929 	struct pci_dev *pdev = hdev->pdev;
1930 	int vectors;
1931 	int i;
1932 
1933 	vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1934 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
1935 	if (vectors < 0) {
1936 		dev_err(&pdev->dev,
1937 			"failed(%d) to allocate MSI/MSI-X vectors\n",
1938 			vectors);
1939 		return vectors;
1940 	}
1941 	if (vectors < hdev->num_msi)
1942 		dev_warn(&hdev->pdev->dev,
1943 			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1944 			 hdev->num_msi, vectors);
1945 
1946 	hdev->num_msi = vectors;
1947 	hdev->num_msi_left = vectors;
1948 	hdev->base_msi_vector = pdev->irq;
1949 	hdev->roce_base_vector = hdev->base_msi_vector +
1950 				hdev->roce_base_msix_offset;
1951 
1952 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1953 					   sizeof(u16), GFP_KERNEL);
1954 	if (!hdev->vector_status) {
1955 		pci_free_irq_vectors(pdev);
1956 		return -ENOMEM;
1957 	}
1958 
1959 	for (i = 0; i < hdev->num_msi; i++)
1960 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1961 
1962 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1963 					sizeof(int), GFP_KERNEL);
1964 	if (!hdev->vector_irq) {
1965 		pci_free_irq_vectors(pdev);
1966 		return -ENOMEM;
1967 	}
1968 
1969 	return 0;
1970 }
1971 
1972 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1973 {
1974 
1975 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1976 		duplex = HCLGE_MAC_FULL;
1977 
1978 	return duplex;
1979 }
1980 
1981 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
1982 				      u8 duplex)
1983 {
1984 	struct hclge_config_mac_speed_dup_cmd *req;
1985 	struct hclge_desc desc;
1986 	int ret;
1987 
1988 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
1989 
1990 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
1991 
1992 	hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
1993 
1994 	switch (speed) {
1995 	case HCLGE_MAC_SPEED_10M:
1996 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1997 				HCLGE_CFG_SPEED_S, 6);
1998 		break;
1999 	case HCLGE_MAC_SPEED_100M:
2000 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2001 				HCLGE_CFG_SPEED_S, 7);
2002 		break;
2003 	case HCLGE_MAC_SPEED_1G:
2004 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2005 				HCLGE_CFG_SPEED_S, 0);
2006 		break;
2007 	case HCLGE_MAC_SPEED_10G:
2008 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2009 				HCLGE_CFG_SPEED_S, 1);
2010 		break;
2011 	case HCLGE_MAC_SPEED_25G:
2012 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2013 				HCLGE_CFG_SPEED_S, 2);
2014 		break;
2015 	case HCLGE_MAC_SPEED_40G:
2016 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2017 				HCLGE_CFG_SPEED_S, 3);
2018 		break;
2019 	case HCLGE_MAC_SPEED_50G:
2020 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2021 				HCLGE_CFG_SPEED_S, 4);
2022 		break;
2023 	case HCLGE_MAC_SPEED_100G:
2024 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2025 				HCLGE_CFG_SPEED_S, 5);
2026 		break;
2027 	default:
2028 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2029 		return -EINVAL;
2030 	}
2031 
2032 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2033 		      1);
2034 
2035 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2036 	if (ret) {
2037 		dev_err(&hdev->pdev->dev,
2038 			"mac speed/duplex config cmd failed %d.\n", ret);
2039 		return ret;
2040 	}
2041 
2042 	return 0;
2043 }
2044 
2045 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2046 {
2047 	int ret;
2048 
2049 	duplex = hclge_check_speed_dup(duplex, speed);
2050 	if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2051 		return 0;
2052 
2053 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2054 	if (ret)
2055 		return ret;
2056 
2057 	hdev->hw.mac.speed = speed;
2058 	hdev->hw.mac.duplex = duplex;
2059 
2060 	return 0;
2061 }
2062 
2063 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2064 				     u8 duplex)
2065 {
2066 	struct hclge_vport *vport = hclge_get_vport(handle);
2067 	struct hclge_dev *hdev = vport->back;
2068 
2069 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2070 }
2071 
2072 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2073 {
2074 	struct hclge_config_auto_neg_cmd *req;
2075 	struct hclge_desc desc;
2076 	u32 flag = 0;
2077 	int ret;
2078 
2079 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2080 
2081 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2082 	hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2083 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2084 
2085 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2086 	if (ret)
2087 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2088 			ret);
2089 
2090 	return ret;
2091 }
2092 
2093 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2094 {
2095 	struct hclge_vport *vport = hclge_get_vport(handle);
2096 	struct hclge_dev *hdev = vport->back;
2097 
2098 	return hclge_set_autoneg_en(hdev, enable);
2099 }
2100 
2101 static int hclge_get_autoneg(struct hnae3_handle *handle)
2102 {
2103 	struct hclge_vport *vport = hclge_get_vport(handle);
2104 	struct hclge_dev *hdev = vport->back;
2105 	struct phy_device *phydev = hdev->hw.mac.phydev;
2106 
2107 	if (phydev)
2108 		return phydev->autoneg;
2109 
2110 	return hdev->hw.mac.autoneg;
2111 }
2112 
2113 static int hclge_mac_init(struct hclge_dev *hdev)
2114 {
2115 	struct hclge_mac *mac = &hdev->hw.mac;
2116 	int ret;
2117 
2118 	hdev->support_sfp_query = true;
2119 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2120 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2121 					 hdev->hw.mac.duplex);
2122 	if (ret) {
2123 		dev_err(&hdev->pdev->dev,
2124 			"Config mac speed dup fail ret=%d\n", ret);
2125 		return ret;
2126 	}
2127 
2128 	mac->link = 0;
2129 
2130 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2131 	if (ret) {
2132 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2133 		return ret;
2134 	}
2135 
2136 	ret = hclge_buffer_alloc(hdev);
2137 	if (ret)
2138 		dev_err(&hdev->pdev->dev,
2139 			"allocate buffer fail, ret=%d\n", ret);
2140 
2141 	return ret;
2142 }
2143 
2144 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2145 {
2146 	if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2147 		schedule_work(&hdev->mbx_service_task);
2148 }
2149 
2150 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2151 {
2152 	if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2153 		schedule_work(&hdev->rst_service_task);
2154 }
2155 
2156 static void hclge_task_schedule(struct hclge_dev *hdev)
2157 {
2158 	if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2159 	    !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2160 	    !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2161 		(void)schedule_work(&hdev->service_task);
2162 }
2163 
2164 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2165 {
2166 	struct hclge_link_status_cmd *req;
2167 	struct hclge_desc desc;
2168 	int link_status;
2169 	int ret;
2170 
2171 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2172 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2173 	if (ret) {
2174 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2175 			ret);
2176 		return ret;
2177 	}
2178 
2179 	req = (struct hclge_link_status_cmd *)desc.data;
2180 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2181 
2182 	return !!link_status;
2183 }
2184 
2185 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2186 {
2187 	int mac_state;
2188 	int link_stat;
2189 
2190 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2191 		return 0;
2192 
2193 	mac_state = hclge_get_mac_link_status(hdev);
2194 
2195 	if (hdev->hw.mac.phydev) {
2196 		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2197 			link_stat = mac_state &
2198 				hdev->hw.mac.phydev->link;
2199 		else
2200 			link_stat = 0;
2201 
2202 	} else {
2203 		link_stat = mac_state;
2204 	}
2205 
2206 	return !!link_stat;
2207 }
2208 
2209 static void hclge_update_link_status(struct hclge_dev *hdev)
2210 {
2211 	struct hnae3_client *rclient = hdev->roce_client;
2212 	struct hnae3_client *client = hdev->nic_client;
2213 	struct hnae3_handle *rhandle;
2214 	struct hnae3_handle *handle;
2215 	int state;
2216 	int i;
2217 
2218 	if (!client)
2219 		return;
2220 	state = hclge_get_mac_phy_link(hdev);
2221 	if (state != hdev->hw.mac.link) {
2222 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2223 			handle = &hdev->vport[i].nic;
2224 			client->ops->link_status_change(handle, state);
2225 			rhandle = &hdev->vport[i].roce;
2226 			if (rclient && rclient->ops->link_status_change)
2227 				rclient->ops->link_status_change(rhandle,
2228 								 state);
2229 		}
2230 		hdev->hw.mac.link = state;
2231 	}
2232 }
2233 
2234 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2235 {
2236 	struct hclge_sfp_speed_cmd *resp = NULL;
2237 	struct hclge_desc desc;
2238 	int ret;
2239 
2240 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2241 	resp = (struct hclge_sfp_speed_cmd *)desc.data;
2242 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2243 	if (ret == -EOPNOTSUPP) {
2244 		dev_warn(&hdev->pdev->dev,
2245 			 "IMP do not support get SFP speed %d\n", ret);
2246 		return ret;
2247 	} else if (ret) {
2248 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2249 		return ret;
2250 	}
2251 
2252 	*speed = resp->sfp_speed;
2253 
2254 	return 0;
2255 }
2256 
2257 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2258 {
2259 	struct hclge_mac mac = hdev->hw.mac;
2260 	int speed;
2261 	int ret;
2262 
2263 	/* get the speed from SFP cmd when phy
2264 	 * doesn't exit.
2265 	 */
2266 	if (mac.phydev)
2267 		return 0;
2268 
2269 	/* if IMP does not support get SFP/qSFP speed, return directly */
2270 	if (!hdev->support_sfp_query)
2271 		return 0;
2272 
2273 	ret = hclge_get_sfp_speed(hdev, &speed);
2274 	if (ret == -EOPNOTSUPP) {
2275 		hdev->support_sfp_query = false;
2276 		return ret;
2277 	} else if (ret) {
2278 		return ret;
2279 	}
2280 
2281 	if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2282 		return 0; /* do nothing if no SFP */
2283 
2284 	/* must config full duplex for SFP */
2285 	return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2286 }
2287 
2288 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2289 {
2290 	struct hclge_vport *vport = hclge_get_vport(handle);
2291 	struct hclge_dev *hdev = vport->back;
2292 
2293 	return hclge_update_speed_duplex(hdev);
2294 }
2295 
2296 static int hclge_get_status(struct hnae3_handle *handle)
2297 {
2298 	struct hclge_vport *vport = hclge_get_vport(handle);
2299 	struct hclge_dev *hdev = vport->back;
2300 
2301 	hclge_update_link_status(hdev);
2302 
2303 	return hdev->hw.mac.link;
2304 }
2305 
2306 static void hclge_service_timer(struct timer_list *t)
2307 {
2308 	struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2309 
2310 	mod_timer(&hdev->service_timer, jiffies + HZ);
2311 	hdev->hw_stats.stats_timer++;
2312 	hclge_task_schedule(hdev);
2313 }
2314 
2315 static void hclge_service_complete(struct hclge_dev *hdev)
2316 {
2317 	WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2318 
2319 	/* Flush memory before next watchdog */
2320 	smp_mb__before_atomic();
2321 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2322 }
2323 
2324 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2325 {
2326 	u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2327 
2328 	/* fetch the events from their corresponding regs */
2329 	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2330 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2331 	msix_src_reg = hclge_read_dev(&hdev->hw,
2332 				      HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2333 
2334 	/* Assumption: If by any chance reset and mailbox events are reported
2335 	 * together then we will only process reset event in this go and will
2336 	 * defer the processing of the mailbox events. Since, we would have not
2337 	 * cleared RX CMDQ event this time we would receive again another
2338 	 * interrupt from H/W just for the mailbox.
2339 	 */
2340 
2341 	/* check for vector0 reset event sources */
2342 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2343 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2344 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2345 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2346 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2347 		return HCLGE_VECTOR0_EVENT_RST;
2348 	}
2349 
2350 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2351 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2352 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2353 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2354 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2355 		return HCLGE_VECTOR0_EVENT_RST;
2356 	}
2357 
2358 	if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2359 		dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2360 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2361 		set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2362 		*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2363 		return HCLGE_VECTOR0_EVENT_RST;
2364 	}
2365 
2366 	/* check for vector0 msix event source */
2367 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK)
2368 		return HCLGE_VECTOR0_EVENT_ERR;
2369 
2370 	/* check for vector0 mailbox(=CMDQ RX) event source */
2371 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2372 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2373 		*clearval = cmdq_src_reg;
2374 		return HCLGE_VECTOR0_EVENT_MBX;
2375 	}
2376 
2377 	return HCLGE_VECTOR0_EVENT_OTHER;
2378 }
2379 
2380 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2381 				    u32 regclr)
2382 {
2383 	switch (event_type) {
2384 	case HCLGE_VECTOR0_EVENT_RST:
2385 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2386 		break;
2387 	case HCLGE_VECTOR0_EVENT_MBX:
2388 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2389 		break;
2390 	default:
2391 		break;
2392 	}
2393 }
2394 
2395 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2396 {
2397 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2398 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2399 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2400 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2401 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2402 }
2403 
2404 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2405 {
2406 	writel(enable ? 1 : 0, vector->addr);
2407 }
2408 
2409 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2410 {
2411 	struct hclge_dev *hdev = data;
2412 	u32 event_cause;
2413 	u32 clearval;
2414 
2415 	hclge_enable_vector(&hdev->misc_vector, false);
2416 	event_cause = hclge_check_event_cause(hdev, &clearval);
2417 
2418 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
2419 	switch (event_cause) {
2420 	case HCLGE_VECTOR0_EVENT_ERR:
2421 		/* we do not know what type of reset is required now. This could
2422 		 * only be decided after we fetch the type of errors which
2423 		 * caused this event. Therefore, we will do below for now:
2424 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2425 		 *    have defered type of reset to be used.
2426 		 * 2. Schedule the reset serivce task.
2427 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2428 		 *    will fetch the correct type of reset.  This would be done
2429 		 *    by first decoding the types of errors.
2430 		 */
2431 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2432 		/* fall through */
2433 	case HCLGE_VECTOR0_EVENT_RST:
2434 		hclge_reset_task_schedule(hdev);
2435 		break;
2436 	case HCLGE_VECTOR0_EVENT_MBX:
2437 		/* If we are here then,
2438 		 * 1. Either we are not handling any mbx task and we are not
2439 		 *    scheduled as well
2440 		 *                        OR
2441 		 * 2. We could be handling a mbx task but nothing more is
2442 		 *    scheduled.
2443 		 * In both cases, we should schedule mbx task as there are more
2444 		 * mbx messages reported by this interrupt.
2445 		 */
2446 		hclge_mbx_task_schedule(hdev);
2447 		break;
2448 	default:
2449 		dev_warn(&hdev->pdev->dev,
2450 			 "received unknown or unhandled event of vector0\n");
2451 		break;
2452 	}
2453 
2454 	/* clear the source of interrupt if it is not cause by reset */
2455 	if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2456 		hclge_clear_event_cause(hdev, event_cause, clearval);
2457 		hclge_enable_vector(&hdev->misc_vector, true);
2458 	}
2459 
2460 	return IRQ_HANDLED;
2461 }
2462 
2463 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2464 {
2465 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2466 		dev_warn(&hdev->pdev->dev,
2467 			 "vector(vector_id %d) has been freed.\n", vector_id);
2468 		return;
2469 	}
2470 
2471 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2472 	hdev->num_msi_left += 1;
2473 	hdev->num_msi_used -= 1;
2474 }
2475 
2476 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2477 {
2478 	struct hclge_misc_vector *vector = &hdev->misc_vector;
2479 
2480 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2481 
2482 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2483 	hdev->vector_status[0] = 0;
2484 
2485 	hdev->num_msi_left -= 1;
2486 	hdev->num_msi_used += 1;
2487 }
2488 
2489 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2490 {
2491 	int ret;
2492 
2493 	hclge_get_misc_vector(hdev);
2494 
2495 	/* this would be explicitly freed in the end */
2496 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2497 			  0, "hclge_misc", hdev);
2498 	if (ret) {
2499 		hclge_free_vector(hdev, 0);
2500 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2501 			hdev->misc_vector.vector_irq);
2502 	}
2503 
2504 	return ret;
2505 }
2506 
2507 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2508 {
2509 	free_irq(hdev->misc_vector.vector_irq, hdev);
2510 	hclge_free_vector(hdev, 0);
2511 }
2512 
2513 int hclge_notify_client(struct hclge_dev *hdev,
2514 			enum hnae3_reset_notify_type type)
2515 {
2516 	struct hnae3_client *client = hdev->nic_client;
2517 	u16 i;
2518 
2519 	if (!client->ops->reset_notify)
2520 		return -EOPNOTSUPP;
2521 
2522 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2523 		struct hnae3_handle *handle = &hdev->vport[i].nic;
2524 		int ret;
2525 
2526 		ret = client->ops->reset_notify(handle, type);
2527 		if (ret) {
2528 			dev_err(&hdev->pdev->dev,
2529 				"notify nic client failed %d(%d)\n", type, ret);
2530 			return ret;
2531 		}
2532 	}
2533 
2534 	return 0;
2535 }
2536 
2537 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2538 				    enum hnae3_reset_notify_type type)
2539 {
2540 	struct hnae3_client *client = hdev->roce_client;
2541 	int ret = 0;
2542 	u16 i;
2543 
2544 	if (!client)
2545 		return 0;
2546 
2547 	if (!client->ops->reset_notify)
2548 		return -EOPNOTSUPP;
2549 
2550 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2551 		struct hnae3_handle *handle = &hdev->vport[i].roce;
2552 
2553 		ret = client->ops->reset_notify(handle, type);
2554 		if (ret) {
2555 			dev_err(&hdev->pdev->dev,
2556 				"notify roce client failed %d(%d)",
2557 				type, ret);
2558 			return ret;
2559 		}
2560 	}
2561 
2562 	return ret;
2563 }
2564 
2565 static int hclge_reset_wait(struct hclge_dev *hdev)
2566 {
2567 #define HCLGE_RESET_WATI_MS	100
2568 #define HCLGE_RESET_WAIT_CNT	200
2569 	u32 val, reg, reg_bit;
2570 	u32 cnt = 0;
2571 
2572 	switch (hdev->reset_type) {
2573 	case HNAE3_IMP_RESET:
2574 		reg = HCLGE_GLOBAL_RESET_REG;
2575 		reg_bit = HCLGE_IMP_RESET_BIT;
2576 		break;
2577 	case HNAE3_GLOBAL_RESET:
2578 		reg = HCLGE_GLOBAL_RESET_REG;
2579 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
2580 		break;
2581 	case HNAE3_CORE_RESET:
2582 		reg = HCLGE_GLOBAL_RESET_REG;
2583 		reg_bit = HCLGE_CORE_RESET_BIT;
2584 		break;
2585 	case HNAE3_FUNC_RESET:
2586 		reg = HCLGE_FUN_RST_ING;
2587 		reg_bit = HCLGE_FUN_RST_ING_B;
2588 		break;
2589 	case HNAE3_FLR_RESET:
2590 		break;
2591 	default:
2592 		dev_err(&hdev->pdev->dev,
2593 			"Wait for unsupported reset type: %d\n",
2594 			hdev->reset_type);
2595 		return -EINVAL;
2596 	}
2597 
2598 	if (hdev->reset_type == HNAE3_FLR_RESET) {
2599 		while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2600 		       cnt++ < HCLGE_RESET_WAIT_CNT)
2601 			msleep(HCLGE_RESET_WATI_MS);
2602 
2603 		if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2604 			dev_err(&hdev->pdev->dev,
2605 				"flr wait timeout: %d\n", cnt);
2606 			return -EBUSY;
2607 		}
2608 
2609 		return 0;
2610 	}
2611 
2612 	val = hclge_read_dev(&hdev->hw, reg);
2613 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2614 		msleep(HCLGE_RESET_WATI_MS);
2615 		val = hclge_read_dev(&hdev->hw, reg);
2616 		cnt++;
2617 	}
2618 
2619 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
2620 		dev_warn(&hdev->pdev->dev,
2621 			 "Wait for reset timeout: %d\n", hdev->reset_type);
2622 		return -EBUSY;
2623 	}
2624 
2625 	return 0;
2626 }
2627 
2628 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2629 {
2630 	struct hclge_vf_rst_cmd *req;
2631 	struct hclge_desc desc;
2632 
2633 	req = (struct hclge_vf_rst_cmd *)desc.data;
2634 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2635 	req->dest_vfid = func_id;
2636 
2637 	if (reset)
2638 		req->vf_rst = 0x1;
2639 
2640 	return hclge_cmd_send(&hdev->hw, &desc, 1);
2641 }
2642 
2643 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2644 {
2645 	int i;
2646 
2647 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2648 		struct hclge_vport *vport = &hdev->vport[i];
2649 		int ret;
2650 
2651 		/* Send cmd to set/clear VF's FUNC_RST_ING */
2652 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2653 		if (ret) {
2654 			dev_err(&hdev->pdev->dev,
2655 				"set vf(%d) rst failed %d!\n",
2656 				vport->vport_id, ret);
2657 			return ret;
2658 		}
2659 
2660 		if (!reset)
2661 			continue;
2662 
2663 		/* Inform VF to process the reset.
2664 		 * hclge_inform_reset_assert_to_vf may fail if VF
2665 		 * driver is not loaded.
2666 		 */
2667 		ret = hclge_inform_reset_assert_to_vf(vport);
2668 		if (ret)
2669 			dev_warn(&hdev->pdev->dev,
2670 				 "inform reset to vf(%d) failed %d!\n",
2671 				 vport->vport_id, ret);
2672 	}
2673 
2674 	return 0;
2675 }
2676 
2677 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2678 {
2679 	struct hclge_desc desc;
2680 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2681 	int ret;
2682 
2683 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2684 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2685 	req->fun_reset_vfid = func_id;
2686 
2687 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2688 	if (ret)
2689 		dev_err(&hdev->pdev->dev,
2690 			"send function reset cmd fail, status =%d\n", ret);
2691 
2692 	return ret;
2693 }
2694 
2695 static void hclge_do_reset(struct hclge_dev *hdev)
2696 {
2697 	struct pci_dev *pdev = hdev->pdev;
2698 	u32 val;
2699 
2700 	switch (hdev->reset_type) {
2701 	case HNAE3_GLOBAL_RESET:
2702 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2703 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2704 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2705 		dev_info(&pdev->dev, "Global Reset requested\n");
2706 		break;
2707 	case HNAE3_CORE_RESET:
2708 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2709 		hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2710 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2711 		dev_info(&pdev->dev, "Core Reset requested\n");
2712 		break;
2713 	case HNAE3_FUNC_RESET:
2714 		dev_info(&pdev->dev, "PF Reset requested\n");
2715 		/* schedule again to check later */
2716 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2717 		hclge_reset_task_schedule(hdev);
2718 		break;
2719 	case HNAE3_FLR_RESET:
2720 		dev_info(&pdev->dev, "FLR requested\n");
2721 		/* schedule again to check later */
2722 		set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2723 		hclge_reset_task_schedule(hdev);
2724 		break;
2725 	default:
2726 		dev_warn(&pdev->dev,
2727 			 "Unsupported reset type: %d\n", hdev->reset_type);
2728 		break;
2729 	}
2730 }
2731 
2732 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2733 						   unsigned long *addr)
2734 {
2735 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2736 
2737 	/* first, resolve any unknown reset type to the known type(s) */
2738 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2739 		/* we will intentionally ignore any errors from this function
2740 		 *  as we will end up in *some* reset request in any case
2741 		 */
2742 		hclge_handle_hw_msix_error(hdev, addr);
2743 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
2744 		/* We defered the clearing of the error event which caused
2745 		 * interrupt since it was not posssible to do that in
2746 		 * interrupt context (and this is the reason we introduced
2747 		 * new UNKNOWN reset type). Now, the errors have been
2748 		 * handled and cleared in hardware we can safely enable
2749 		 * interrupts. This is an exception to the norm.
2750 		 */
2751 		hclge_enable_vector(&hdev->misc_vector, true);
2752 	}
2753 
2754 	/* return the highest priority reset level amongst all */
2755 	if (test_bit(HNAE3_IMP_RESET, addr)) {
2756 		rst_level = HNAE3_IMP_RESET;
2757 		clear_bit(HNAE3_IMP_RESET, addr);
2758 		clear_bit(HNAE3_GLOBAL_RESET, addr);
2759 		clear_bit(HNAE3_CORE_RESET, addr);
2760 		clear_bit(HNAE3_FUNC_RESET, addr);
2761 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2762 		rst_level = HNAE3_GLOBAL_RESET;
2763 		clear_bit(HNAE3_GLOBAL_RESET, addr);
2764 		clear_bit(HNAE3_CORE_RESET, addr);
2765 		clear_bit(HNAE3_FUNC_RESET, addr);
2766 	} else if (test_bit(HNAE3_CORE_RESET, addr)) {
2767 		rst_level = HNAE3_CORE_RESET;
2768 		clear_bit(HNAE3_CORE_RESET, addr);
2769 		clear_bit(HNAE3_FUNC_RESET, addr);
2770 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2771 		rst_level = HNAE3_FUNC_RESET;
2772 		clear_bit(HNAE3_FUNC_RESET, addr);
2773 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
2774 		rst_level = HNAE3_FLR_RESET;
2775 		clear_bit(HNAE3_FLR_RESET, addr);
2776 	}
2777 
2778 	return rst_level;
2779 }
2780 
2781 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2782 {
2783 	u32 clearval = 0;
2784 
2785 	switch (hdev->reset_type) {
2786 	case HNAE3_IMP_RESET:
2787 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2788 		break;
2789 	case HNAE3_GLOBAL_RESET:
2790 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2791 		break;
2792 	case HNAE3_CORE_RESET:
2793 		clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2794 		break;
2795 	default:
2796 		break;
2797 	}
2798 
2799 	if (!clearval)
2800 		return;
2801 
2802 	hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2803 	hclge_enable_vector(&hdev->misc_vector, true);
2804 }
2805 
2806 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2807 {
2808 	int ret = 0;
2809 
2810 	switch (hdev->reset_type) {
2811 	case HNAE3_FUNC_RESET:
2812 		/* fall through */
2813 	case HNAE3_FLR_RESET:
2814 		ret = hclge_set_all_vf_rst(hdev, true);
2815 		break;
2816 	default:
2817 		break;
2818 	}
2819 
2820 	return ret;
2821 }
2822 
2823 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2824 {
2825 	u32 reg_val;
2826 	int ret = 0;
2827 
2828 	switch (hdev->reset_type) {
2829 	case HNAE3_FUNC_RESET:
2830 		/* There is no mechanism for PF to know if VF has stopped IO
2831 		 * for now, just wait 100 ms for VF to stop IO
2832 		 */
2833 		msleep(100);
2834 		ret = hclge_func_reset_cmd(hdev, 0);
2835 		if (ret) {
2836 			dev_err(&hdev->pdev->dev,
2837 				"asserting function reset fail %d!\n", ret);
2838 			return ret;
2839 		}
2840 
2841 		/* After performaning pf reset, it is not necessary to do the
2842 		 * mailbox handling or send any command to firmware, because
2843 		 * any mailbox handling or command to firmware is only valid
2844 		 * after hclge_cmd_init is called.
2845 		 */
2846 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2847 		break;
2848 	case HNAE3_FLR_RESET:
2849 		/* There is no mechanism for PF to know if VF has stopped IO
2850 		 * for now, just wait 100 ms for VF to stop IO
2851 		 */
2852 		msleep(100);
2853 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2854 		set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2855 		break;
2856 	case HNAE3_IMP_RESET:
2857 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2858 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2859 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2860 		break;
2861 	default:
2862 		break;
2863 	}
2864 
2865 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2866 
2867 	return ret;
2868 }
2869 
2870 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2871 {
2872 #define MAX_RESET_FAIL_CNT 5
2873 #define RESET_UPGRADE_DELAY_SEC 10
2874 
2875 	if (hdev->reset_pending) {
2876 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2877 			 hdev->reset_pending);
2878 		return true;
2879 	} else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2880 		   (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2881 		    BIT(HCLGE_IMP_RESET_BIT))) {
2882 		dev_info(&hdev->pdev->dev,
2883 			 "reset failed because IMP Reset is pending\n");
2884 		hclge_clear_reset_cause(hdev);
2885 		return false;
2886 	} else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2887 		hdev->reset_fail_cnt++;
2888 		if (is_timeout) {
2889 			set_bit(hdev->reset_type, &hdev->reset_pending);
2890 			dev_info(&hdev->pdev->dev,
2891 				 "re-schedule to wait for hw reset done\n");
2892 			return true;
2893 		}
2894 
2895 		dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2896 		hclge_clear_reset_cause(hdev);
2897 		mod_timer(&hdev->reset_timer,
2898 			  jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2899 
2900 		return false;
2901 	}
2902 
2903 	hclge_clear_reset_cause(hdev);
2904 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
2905 	return false;
2906 }
2907 
2908 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2909 {
2910 	int ret = 0;
2911 
2912 	switch (hdev->reset_type) {
2913 	case HNAE3_FUNC_RESET:
2914 		/* fall through */
2915 	case HNAE3_FLR_RESET:
2916 		ret = hclge_set_all_vf_rst(hdev, false);
2917 		break;
2918 	default:
2919 		break;
2920 	}
2921 
2922 	return ret;
2923 }
2924 
2925 static void hclge_reset(struct hclge_dev *hdev)
2926 {
2927 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2928 	bool is_timeout = false;
2929 	int ret;
2930 
2931 	/* Initialize ae_dev reset status as well, in case enet layer wants to
2932 	 * know if device is undergoing reset
2933 	 */
2934 	ae_dev->reset_type = hdev->reset_type;
2935 	hdev->reset_count++;
2936 	/* perform reset of the stack & ae device for a client */
2937 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2938 	if (ret)
2939 		goto err_reset;
2940 
2941 	ret = hclge_reset_prepare_down(hdev);
2942 	if (ret)
2943 		goto err_reset;
2944 
2945 	rtnl_lock();
2946 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2947 	if (ret)
2948 		goto err_reset_lock;
2949 
2950 	rtnl_unlock();
2951 
2952 	ret = hclge_reset_prepare_wait(hdev);
2953 	if (ret)
2954 		goto err_reset;
2955 
2956 	if (hclge_reset_wait(hdev)) {
2957 		is_timeout = true;
2958 		goto err_reset;
2959 	}
2960 
2961 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2962 	if (ret)
2963 		goto err_reset;
2964 
2965 	rtnl_lock();
2966 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2967 	if (ret)
2968 		goto err_reset_lock;
2969 
2970 	ret = hclge_reset_ae_dev(hdev->ae_dev);
2971 	if (ret)
2972 		goto err_reset_lock;
2973 
2974 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2975 	if (ret)
2976 		goto err_reset_lock;
2977 
2978 	ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
2979 	if (ret)
2980 		goto err_reset_lock;
2981 
2982 	hclge_clear_reset_cause(hdev);
2983 
2984 	ret = hclge_reset_prepare_up(hdev);
2985 	if (ret)
2986 		goto err_reset_lock;
2987 
2988 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2989 	if (ret)
2990 		goto err_reset_lock;
2991 
2992 	rtnl_unlock();
2993 
2994 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
2995 	if (ret)
2996 		goto err_reset;
2997 
2998 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
2999 	if (ret)
3000 		goto err_reset;
3001 
3002 	hdev->last_reset_time = jiffies;
3003 	hdev->reset_fail_cnt = 0;
3004 	ae_dev->reset_type = HNAE3_NONE_RESET;
3005 
3006 	return;
3007 
3008 err_reset_lock:
3009 	rtnl_unlock();
3010 err_reset:
3011 	if (hclge_reset_err_handle(hdev, is_timeout))
3012 		hclge_reset_task_schedule(hdev);
3013 }
3014 
3015 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3016 {
3017 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3018 	struct hclge_dev *hdev = ae_dev->priv;
3019 
3020 	/* We might end up getting called broadly because of 2 below cases:
3021 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3022 	 *    normalcy is to reset.
3023 	 * 2. A new reset request from the stack due to timeout
3024 	 *
3025 	 * For the first case,error event might not have ae handle available.
3026 	 * check if this is a new reset request and we are not here just because
3027 	 * last reset attempt did not succeed and watchdog hit us again. We will
3028 	 * know this if last reset request did not occur very recently (watchdog
3029 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3030 	 * In case of new request we reset the "reset level" to PF reset.
3031 	 * And if it is a repeat reset request of the most recent one then we
3032 	 * want to make sure we throttle the reset request. Therefore, we will
3033 	 * not allow it again before 3*HZ times.
3034 	 */
3035 	if (!handle)
3036 		handle = &hdev->vport[0].nic;
3037 
3038 	if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3039 		return;
3040 	else if (hdev->default_reset_request)
3041 		hdev->reset_level =
3042 			hclge_get_reset_level(hdev,
3043 					      &hdev->default_reset_request);
3044 	else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3045 		hdev->reset_level = HNAE3_FUNC_RESET;
3046 
3047 	dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3048 		 hdev->reset_level);
3049 
3050 	/* request reset & schedule reset task */
3051 	set_bit(hdev->reset_level, &hdev->reset_request);
3052 	hclge_reset_task_schedule(hdev);
3053 
3054 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3055 		hdev->reset_level++;
3056 }
3057 
3058 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3059 					enum hnae3_reset_type rst_type)
3060 {
3061 	struct hclge_dev *hdev = ae_dev->priv;
3062 
3063 	set_bit(rst_type, &hdev->default_reset_request);
3064 }
3065 
3066 static void hclge_reset_timer(struct timer_list *t)
3067 {
3068 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3069 
3070 	dev_info(&hdev->pdev->dev,
3071 		 "triggering global reset in reset timer\n");
3072 	set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3073 	hclge_reset_event(hdev->pdev, NULL);
3074 }
3075 
3076 static void hclge_reset_subtask(struct hclge_dev *hdev)
3077 {
3078 	/* check if there is any ongoing reset in the hardware. This status can
3079 	 * be checked from reset_pending. If there is then, we need to wait for
3080 	 * hardware to complete reset.
3081 	 *    a. If we are able to figure out in reasonable time that hardware
3082 	 *       has fully resetted then, we can proceed with driver, client
3083 	 *       reset.
3084 	 *    b. else, we can come back later to check this status so re-sched
3085 	 *       now.
3086 	 */
3087 	hdev->last_reset_time = jiffies;
3088 	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3089 	if (hdev->reset_type != HNAE3_NONE_RESET)
3090 		hclge_reset(hdev);
3091 
3092 	/* check if we got any *new* reset requests to be honored */
3093 	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3094 	if (hdev->reset_type != HNAE3_NONE_RESET)
3095 		hclge_do_reset(hdev);
3096 
3097 	hdev->reset_type = HNAE3_NONE_RESET;
3098 }
3099 
3100 static void hclge_reset_service_task(struct work_struct *work)
3101 {
3102 	struct hclge_dev *hdev =
3103 		container_of(work, struct hclge_dev, rst_service_task);
3104 
3105 	if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3106 		return;
3107 
3108 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3109 
3110 	hclge_reset_subtask(hdev);
3111 
3112 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3113 }
3114 
3115 static void hclge_mailbox_service_task(struct work_struct *work)
3116 {
3117 	struct hclge_dev *hdev =
3118 		container_of(work, struct hclge_dev, mbx_service_task);
3119 
3120 	if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3121 		return;
3122 
3123 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3124 
3125 	hclge_mbx_handler(hdev);
3126 
3127 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3128 }
3129 
3130 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3131 {
3132 	int i;
3133 
3134 	/* start from vport 1 for PF is always alive */
3135 	for (i = 1; i < hdev->num_alloc_vport; i++) {
3136 		struct hclge_vport *vport = &hdev->vport[i];
3137 
3138 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3139 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3140 
3141 		/* If vf is not alive, set to default value */
3142 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3143 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3144 	}
3145 }
3146 
3147 static void hclge_service_task(struct work_struct *work)
3148 {
3149 	struct hclge_dev *hdev =
3150 		container_of(work, struct hclge_dev, service_task);
3151 
3152 	if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3153 		hclge_update_stats_for_all(hdev);
3154 		hdev->hw_stats.stats_timer = 0;
3155 	}
3156 
3157 	hclge_update_speed_duplex(hdev);
3158 	hclge_update_link_status(hdev);
3159 	hclge_update_vport_alive(hdev);
3160 	hclge_service_complete(hdev);
3161 }
3162 
3163 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3164 {
3165 	/* VF handle has no client */
3166 	if (!handle->client)
3167 		return container_of(handle, struct hclge_vport, nic);
3168 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
3169 		return container_of(handle, struct hclge_vport, roce);
3170 	else
3171 		return container_of(handle, struct hclge_vport, nic);
3172 }
3173 
3174 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3175 			    struct hnae3_vector_info *vector_info)
3176 {
3177 	struct hclge_vport *vport = hclge_get_vport(handle);
3178 	struct hnae3_vector_info *vector = vector_info;
3179 	struct hclge_dev *hdev = vport->back;
3180 	int alloc = 0;
3181 	int i, j;
3182 
3183 	vector_num = min(hdev->num_msi_left, vector_num);
3184 
3185 	for (j = 0; j < vector_num; j++) {
3186 		for (i = 1; i < hdev->num_msi; i++) {
3187 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3188 				vector->vector = pci_irq_vector(hdev->pdev, i);
3189 				vector->io_addr = hdev->hw.io_base +
3190 					HCLGE_VECTOR_REG_BASE +
3191 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
3192 					vport->vport_id *
3193 					HCLGE_VECTOR_VF_OFFSET;
3194 				hdev->vector_status[i] = vport->vport_id;
3195 				hdev->vector_irq[i] = vector->vector;
3196 
3197 				vector++;
3198 				alloc++;
3199 
3200 				break;
3201 			}
3202 		}
3203 	}
3204 	hdev->num_msi_left -= alloc;
3205 	hdev->num_msi_used += alloc;
3206 
3207 	return alloc;
3208 }
3209 
3210 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3211 {
3212 	int i;
3213 
3214 	for (i = 0; i < hdev->num_msi; i++)
3215 		if (vector == hdev->vector_irq[i])
3216 			return i;
3217 
3218 	return -EINVAL;
3219 }
3220 
3221 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3222 {
3223 	struct hclge_vport *vport = hclge_get_vport(handle);
3224 	struct hclge_dev *hdev = vport->back;
3225 	int vector_id;
3226 
3227 	vector_id = hclge_get_vector_index(hdev, vector);
3228 	if (vector_id < 0) {
3229 		dev_err(&hdev->pdev->dev,
3230 			"Get vector index fail. vector_id =%d\n", vector_id);
3231 		return vector_id;
3232 	}
3233 
3234 	hclge_free_vector(hdev, vector_id);
3235 
3236 	return 0;
3237 }
3238 
3239 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3240 {
3241 	return HCLGE_RSS_KEY_SIZE;
3242 }
3243 
3244 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3245 {
3246 	return HCLGE_RSS_IND_TBL_SIZE;
3247 }
3248 
3249 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3250 				  const u8 hfunc, const u8 *key)
3251 {
3252 	struct hclge_rss_config_cmd *req;
3253 	struct hclge_desc desc;
3254 	int key_offset;
3255 	int key_size;
3256 	int ret;
3257 
3258 	req = (struct hclge_rss_config_cmd *)desc.data;
3259 
3260 	for (key_offset = 0; key_offset < 3; key_offset++) {
3261 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3262 					   false);
3263 
3264 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3265 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3266 
3267 		if (key_offset == 2)
3268 			key_size =
3269 			HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3270 		else
3271 			key_size = HCLGE_RSS_HASH_KEY_NUM;
3272 
3273 		memcpy(req->hash_key,
3274 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3275 
3276 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3277 		if (ret) {
3278 			dev_err(&hdev->pdev->dev,
3279 				"Configure RSS config fail, status = %d\n",
3280 				ret);
3281 			return ret;
3282 		}
3283 	}
3284 	return 0;
3285 }
3286 
3287 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3288 {
3289 	struct hclge_rss_indirection_table_cmd *req;
3290 	struct hclge_desc desc;
3291 	int i, j;
3292 	int ret;
3293 
3294 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3295 
3296 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3297 		hclge_cmd_setup_basic_desc
3298 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3299 
3300 		req->start_table_index =
3301 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3302 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3303 
3304 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3305 			req->rss_result[j] =
3306 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3307 
3308 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3309 		if (ret) {
3310 			dev_err(&hdev->pdev->dev,
3311 				"Configure rss indir table fail,status = %d\n",
3312 				ret);
3313 			return ret;
3314 		}
3315 	}
3316 	return 0;
3317 }
3318 
3319 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3320 				 u16 *tc_size, u16 *tc_offset)
3321 {
3322 	struct hclge_rss_tc_mode_cmd *req;
3323 	struct hclge_desc desc;
3324 	int ret;
3325 	int i;
3326 
3327 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3328 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3329 
3330 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3331 		u16 mode = 0;
3332 
3333 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3334 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3335 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3336 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3337 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3338 
3339 		req->rss_tc_mode[i] = cpu_to_le16(mode);
3340 	}
3341 
3342 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3343 	if (ret)
3344 		dev_err(&hdev->pdev->dev,
3345 			"Configure rss tc mode fail, status = %d\n", ret);
3346 
3347 	return ret;
3348 }
3349 
3350 static void hclge_get_rss_type(struct hclge_vport *vport)
3351 {
3352 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
3353 	    vport->rss_tuple_sets.ipv4_udp_en ||
3354 	    vport->rss_tuple_sets.ipv4_sctp_en ||
3355 	    vport->rss_tuple_sets.ipv6_tcp_en ||
3356 	    vport->rss_tuple_sets.ipv6_udp_en ||
3357 	    vport->rss_tuple_sets.ipv6_sctp_en)
3358 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3359 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3360 		 vport->rss_tuple_sets.ipv6_fragment_en)
3361 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3362 	else
3363 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3364 }
3365 
3366 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3367 {
3368 	struct hclge_rss_input_tuple_cmd *req;
3369 	struct hclge_desc desc;
3370 	int ret;
3371 
3372 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3373 
3374 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3375 
3376 	/* Get the tuple cfg from pf */
3377 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3378 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3379 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3380 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3381 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3382 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3383 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3384 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3385 	hclge_get_rss_type(&hdev->vport[0]);
3386 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3387 	if (ret)
3388 		dev_err(&hdev->pdev->dev,
3389 			"Configure rss input fail, status = %d\n", ret);
3390 	return ret;
3391 }
3392 
3393 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3394 			 u8 *key, u8 *hfunc)
3395 {
3396 	struct hclge_vport *vport = hclge_get_vport(handle);
3397 	int i;
3398 
3399 	/* Get hash algorithm */
3400 	if (hfunc) {
3401 		switch (vport->rss_algo) {
3402 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3403 			*hfunc = ETH_RSS_HASH_TOP;
3404 			break;
3405 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
3406 			*hfunc = ETH_RSS_HASH_XOR;
3407 			break;
3408 		default:
3409 			*hfunc = ETH_RSS_HASH_UNKNOWN;
3410 			break;
3411 		}
3412 	}
3413 
3414 	/* Get the RSS Key required by the user */
3415 	if (key)
3416 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3417 
3418 	/* Get indirect table */
3419 	if (indir)
3420 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3421 			indir[i] =  vport->rss_indirection_tbl[i];
3422 
3423 	return 0;
3424 }
3425 
3426 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3427 			 const  u8 *key, const  u8 hfunc)
3428 {
3429 	struct hclge_vport *vport = hclge_get_vport(handle);
3430 	struct hclge_dev *hdev = vport->back;
3431 	u8 hash_algo;
3432 	int ret, i;
3433 
3434 	/* Set the RSS Hash Key if specififed by the user */
3435 	if (key) {
3436 		switch (hfunc) {
3437 		case ETH_RSS_HASH_TOP:
3438 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3439 			break;
3440 		case ETH_RSS_HASH_XOR:
3441 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3442 			break;
3443 		case ETH_RSS_HASH_NO_CHANGE:
3444 			hash_algo = vport->rss_algo;
3445 			break;
3446 		default:
3447 			return -EINVAL;
3448 		}
3449 
3450 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3451 		if (ret)
3452 			return ret;
3453 
3454 		/* Update the shadow RSS key with user specified qids */
3455 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3456 		vport->rss_algo = hash_algo;
3457 	}
3458 
3459 	/* Update the shadow RSS table with user specified qids */
3460 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3461 		vport->rss_indirection_tbl[i] = indir[i];
3462 
3463 	/* Update the hardware */
3464 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3465 }
3466 
3467 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3468 {
3469 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3470 
3471 	if (nfc->data & RXH_L4_B_2_3)
3472 		hash_sets |= HCLGE_D_PORT_BIT;
3473 	else
3474 		hash_sets &= ~HCLGE_D_PORT_BIT;
3475 
3476 	if (nfc->data & RXH_IP_SRC)
3477 		hash_sets |= HCLGE_S_IP_BIT;
3478 	else
3479 		hash_sets &= ~HCLGE_S_IP_BIT;
3480 
3481 	if (nfc->data & RXH_IP_DST)
3482 		hash_sets |= HCLGE_D_IP_BIT;
3483 	else
3484 		hash_sets &= ~HCLGE_D_IP_BIT;
3485 
3486 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3487 		hash_sets |= HCLGE_V_TAG_BIT;
3488 
3489 	return hash_sets;
3490 }
3491 
3492 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3493 			       struct ethtool_rxnfc *nfc)
3494 {
3495 	struct hclge_vport *vport = hclge_get_vport(handle);
3496 	struct hclge_dev *hdev = vport->back;
3497 	struct hclge_rss_input_tuple_cmd *req;
3498 	struct hclge_desc desc;
3499 	u8 tuple_sets;
3500 	int ret;
3501 
3502 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3503 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
3504 		return -EINVAL;
3505 
3506 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3507 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3508 
3509 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3510 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3511 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3512 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3513 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3514 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3515 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3516 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3517 
3518 	tuple_sets = hclge_get_rss_hash_bits(nfc);
3519 	switch (nfc->flow_type) {
3520 	case TCP_V4_FLOW:
3521 		req->ipv4_tcp_en = tuple_sets;
3522 		break;
3523 	case TCP_V6_FLOW:
3524 		req->ipv6_tcp_en = tuple_sets;
3525 		break;
3526 	case UDP_V4_FLOW:
3527 		req->ipv4_udp_en = tuple_sets;
3528 		break;
3529 	case UDP_V6_FLOW:
3530 		req->ipv6_udp_en = tuple_sets;
3531 		break;
3532 	case SCTP_V4_FLOW:
3533 		req->ipv4_sctp_en = tuple_sets;
3534 		break;
3535 	case SCTP_V6_FLOW:
3536 		if ((nfc->data & RXH_L4_B_0_1) ||
3537 		    (nfc->data & RXH_L4_B_2_3))
3538 			return -EINVAL;
3539 
3540 		req->ipv6_sctp_en = tuple_sets;
3541 		break;
3542 	case IPV4_FLOW:
3543 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3544 		break;
3545 	case IPV6_FLOW:
3546 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3547 		break;
3548 	default:
3549 		return -EINVAL;
3550 	}
3551 
3552 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3553 	if (ret) {
3554 		dev_err(&hdev->pdev->dev,
3555 			"Set rss tuple fail, status = %d\n", ret);
3556 		return ret;
3557 	}
3558 
3559 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3560 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3561 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3562 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3563 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3564 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3565 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3566 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3567 	hclge_get_rss_type(vport);
3568 	return 0;
3569 }
3570 
3571 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3572 			       struct ethtool_rxnfc *nfc)
3573 {
3574 	struct hclge_vport *vport = hclge_get_vport(handle);
3575 	u8 tuple_sets;
3576 
3577 	nfc->data = 0;
3578 
3579 	switch (nfc->flow_type) {
3580 	case TCP_V4_FLOW:
3581 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3582 		break;
3583 	case UDP_V4_FLOW:
3584 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3585 		break;
3586 	case TCP_V6_FLOW:
3587 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3588 		break;
3589 	case UDP_V6_FLOW:
3590 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3591 		break;
3592 	case SCTP_V4_FLOW:
3593 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3594 		break;
3595 	case SCTP_V6_FLOW:
3596 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3597 		break;
3598 	case IPV4_FLOW:
3599 	case IPV6_FLOW:
3600 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3601 		break;
3602 	default:
3603 		return -EINVAL;
3604 	}
3605 
3606 	if (!tuple_sets)
3607 		return 0;
3608 
3609 	if (tuple_sets & HCLGE_D_PORT_BIT)
3610 		nfc->data |= RXH_L4_B_2_3;
3611 	if (tuple_sets & HCLGE_S_PORT_BIT)
3612 		nfc->data |= RXH_L4_B_0_1;
3613 	if (tuple_sets & HCLGE_D_IP_BIT)
3614 		nfc->data |= RXH_IP_DST;
3615 	if (tuple_sets & HCLGE_S_IP_BIT)
3616 		nfc->data |= RXH_IP_SRC;
3617 
3618 	return 0;
3619 }
3620 
3621 static int hclge_get_tc_size(struct hnae3_handle *handle)
3622 {
3623 	struct hclge_vport *vport = hclge_get_vport(handle);
3624 	struct hclge_dev *hdev = vport->back;
3625 
3626 	return hdev->rss_size_max;
3627 }
3628 
3629 int hclge_rss_init_hw(struct hclge_dev *hdev)
3630 {
3631 	struct hclge_vport *vport = hdev->vport;
3632 	u8 *rss_indir = vport[0].rss_indirection_tbl;
3633 	u16 rss_size = vport[0].alloc_rss_size;
3634 	u8 *key = vport[0].rss_hash_key;
3635 	u8 hfunc = vport[0].rss_algo;
3636 	u16 tc_offset[HCLGE_MAX_TC_NUM];
3637 	u16 tc_valid[HCLGE_MAX_TC_NUM];
3638 	u16 tc_size[HCLGE_MAX_TC_NUM];
3639 	u16 roundup_size;
3640 	int i, ret;
3641 
3642 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
3643 	if (ret)
3644 		return ret;
3645 
3646 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3647 	if (ret)
3648 		return ret;
3649 
3650 	ret = hclge_set_rss_input_tuple(hdev);
3651 	if (ret)
3652 		return ret;
3653 
3654 	/* Each TC have the same queue size, and tc_size set to hardware is
3655 	 * the log2 of roundup power of two of rss_size, the acutal queue
3656 	 * size is limited by indirection table.
3657 	 */
3658 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3659 		dev_err(&hdev->pdev->dev,
3660 			"Configure rss tc size failed, invalid TC_SIZE = %d\n",
3661 			rss_size);
3662 		return -EINVAL;
3663 	}
3664 
3665 	roundup_size = roundup_pow_of_two(rss_size);
3666 	roundup_size = ilog2(roundup_size);
3667 
3668 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3669 		tc_valid[i] = 0;
3670 
3671 		if (!(hdev->hw_tc_map & BIT(i)))
3672 			continue;
3673 
3674 		tc_valid[i] = 1;
3675 		tc_size[i] = roundup_size;
3676 		tc_offset[i] = rss_size * i;
3677 	}
3678 
3679 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3680 }
3681 
3682 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3683 {
3684 	struct hclge_vport *vport = hdev->vport;
3685 	int i, j;
3686 
3687 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3688 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3689 			vport[j].rss_indirection_tbl[i] =
3690 				i % vport[j].alloc_rss_size;
3691 	}
3692 }
3693 
3694 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3695 {
3696 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3697 	struct hclge_vport *vport = hdev->vport;
3698 
3699 	if (hdev->pdev->revision >= 0x21)
3700 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3701 
3702 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3703 		vport[i].rss_tuple_sets.ipv4_tcp_en =
3704 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3705 		vport[i].rss_tuple_sets.ipv4_udp_en =
3706 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3707 		vport[i].rss_tuple_sets.ipv4_sctp_en =
3708 			HCLGE_RSS_INPUT_TUPLE_SCTP;
3709 		vport[i].rss_tuple_sets.ipv4_fragment_en =
3710 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3711 		vport[i].rss_tuple_sets.ipv6_tcp_en =
3712 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3713 		vport[i].rss_tuple_sets.ipv6_udp_en =
3714 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3715 		vport[i].rss_tuple_sets.ipv6_sctp_en =
3716 			HCLGE_RSS_INPUT_TUPLE_SCTP;
3717 		vport[i].rss_tuple_sets.ipv6_fragment_en =
3718 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3719 
3720 		vport[i].rss_algo = rss_algo;
3721 
3722 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
3723 		       HCLGE_RSS_KEY_SIZE);
3724 	}
3725 
3726 	hclge_rss_indir_init_cfg(hdev);
3727 }
3728 
3729 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3730 				int vector_id, bool en,
3731 				struct hnae3_ring_chain_node *ring_chain)
3732 {
3733 	struct hclge_dev *hdev = vport->back;
3734 	struct hnae3_ring_chain_node *node;
3735 	struct hclge_desc desc;
3736 	struct hclge_ctrl_vector_chain_cmd *req
3737 		= (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3738 	enum hclge_cmd_status status;
3739 	enum hclge_opcode_type op;
3740 	u16 tqp_type_and_id;
3741 	int i;
3742 
3743 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3744 	hclge_cmd_setup_basic_desc(&desc, op, false);
3745 	req->int_vector_id = vector_id;
3746 
3747 	i = 0;
3748 	for (node = ring_chain; node; node = node->next) {
3749 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3750 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
3751 				HCLGE_INT_TYPE_S,
3752 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3753 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3754 				HCLGE_TQP_ID_S, node->tqp_index);
3755 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3756 				HCLGE_INT_GL_IDX_S,
3757 				hnae3_get_field(node->int_gl_idx,
3758 						HNAE3_RING_GL_IDX_M,
3759 						HNAE3_RING_GL_IDX_S));
3760 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3761 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3762 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3763 			req->vfid = vport->vport_id;
3764 
3765 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
3766 			if (status) {
3767 				dev_err(&hdev->pdev->dev,
3768 					"Map TQP fail, status is %d.\n",
3769 					status);
3770 				return -EIO;
3771 			}
3772 			i = 0;
3773 
3774 			hclge_cmd_setup_basic_desc(&desc,
3775 						   op,
3776 						   false);
3777 			req->int_vector_id = vector_id;
3778 		}
3779 	}
3780 
3781 	if (i > 0) {
3782 		req->int_cause_num = i;
3783 		req->vfid = vport->vport_id;
3784 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
3785 		if (status) {
3786 			dev_err(&hdev->pdev->dev,
3787 				"Map TQP fail, status is %d.\n", status);
3788 			return -EIO;
3789 		}
3790 	}
3791 
3792 	return 0;
3793 }
3794 
3795 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3796 				    int vector,
3797 				    struct hnae3_ring_chain_node *ring_chain)
3798 {
3799 	struct hclge_vport *vport = hclge_get_vport(handle);
3800 	struct hclge_dev *hdev = vport->back;
3801 	int vector_id;
3802 
3803 	vector_id = hclge_get_vector_index(hdev, vector);
3804 	if (vector_id < 0) {
3805 		dev_err(&hdev->pdev->dev,
3806 			"Get vector index fail. vector_id =%d\n", vector_id);
3807 		return vector_id;
3808 	}
3809 
3810 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3811 }
3812 
3813 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3814 				       int vector,
3815 				       struct hnae3_ring_chain_node *ring_chain)
3816 {
3817 	struct hclge_vport *vport = hclge_get_vport(handle);
3818 	struct hclge_dev *hdev = vport->back;
3819 	int vector_id, ret;
3820 
3821 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3822 		return 0;
3823 
3824 	vector_id = hclge_get_vector_index(hdev, vector);
3825 	if (vector_id < 0) {
3826 		dev_err(&handle->pdev->dev,
3827 			"Get vector index fail. ret =%d\n", vector_id);
3828 		return vector_id;
3829 	}
3830 
3831 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3832 	if (ret)
3833 		dev_err(&handle->pdev->dev,
3834 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3835 			vector_id,
3836 			ret);
3837 
3838 	return ret;
3839 }
3840 
3841 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3842 			       struct hclge_promisc_param *param)
3843 {
3844 	struct hclge_promisc_cfg_cmd *req;
3845 	struct hclge_desc desc;
3846 	int ret;
3847 
3848 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3849 
3850 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
3851 	req->vf_id = param->vf_id;
3852 
3853 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3854 	 * pdev revision(0x20), new revision support them. The
3855 	 * value of this two fields will not return error when driver
3856 	 * send command to fireware in revision(0x20).
3857 	 */
3858 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3859 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3860 
3861 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3862 	if (ret)
3863 		dev_err(&hdev->pdev->dev,
3864 			"Set promisc mode fail, status is %d.\n", ret);
3865 
3866 	return ret;
3867 }
3868 
3869 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3870 			      bool en_mc, bool en_bc, int vport_id)
3871 {
3872 	if (!param)
3873 		return;
3874 
3875 	memset(param, 0, sizeof(struct hclge_promisc_param));
3876 	if (en_uc)
3877 		param->enable = HCLGE_PROMISC_EN_UC;
3878 	if (en_mc)
3879 		param->enable |= HCLGE_PROMISC_EN_MC;
3880 	if (en_bc)
3881 		param->enable |= HCLGE_PROMISC_EN_BC;
3882 	param->vf_id = vport_id;
3883 }
3884 
3885 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3886 				  bool en_mc_pmc)
3887 {
3888 	struct hclge_vport *vport = hclge_get_vport(handle);
3889 	struct hclge_dev *hdev = vport->back;
3890 	struct hclge_promisc_param param;
3891 	bool en_bc_pmc = true;
3892 
3893 	/* For revision 0x20, if broadcast promisc enabled, vlan filter is
3894 	 * always bypassed. So broadcast promisc should be disabled until
3895 	 * user enable promisc mode
3896 	 */
3897 	if (handle->pdev->revision == 0x20)
3898 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
3899 
3900 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
3901 				 vport->vport_id);
3902 	return hclge_cmd_set_promisc_mode(hdev, &param);
3903 }
3904 
3905 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3906 {
3907 	struct hclge_get_fd_mode_cmd *req;
3908 	struct hclge_desc desc;
3909 	int ret;
3910 
3911 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3912 
3913 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
3914 
3915 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3916 	if (ret) {
3917 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3918 		return ret;
3919 	}
3920 
3921 	*fd_mode = req->mode;
3922 
3923 	return ret;
3924 }
3925 
3926 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3927 				   u32 *stage1_entry_num,
3928 				   u32 *stage2_entry_num,
3929 				   u16 *stage1_counter_num,
3930 				   u16 *stage2_counter_num)
3931 {
3932 	struct hclge_get_fd_allocation_cmd *req;
3933 	struct hclge_desc desc;
3934 	int ret;
3935 
3936 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3937 
3938 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3939 
3940 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3941 	if (ret) {
3942 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3943 			ret);
3944 		return ret;
3945 	}
3946 
3947 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3948 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3949 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3950 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3951 
3952 	return ret;
3953 }
3954 
3955 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3956 {
3957 	struct hclge_set_fd_key_config_cmd *req;
3958 	struct hclge_fd_key_cfg *stage;
3959 	struct hclge_desc desc;
3960 	int ret;
3961 
3962 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3963 
3964 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
3965 	stage = &hdev->fd_cfg.key_cfg[stage_num];
3966 	req->stage = stage_num;
3967 	req->key_select = stage->key_sel;
3968 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
3969 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
3970 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
3971 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
3972 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
3973 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
3974 
3975 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3976 	if (ret)
3977 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
3978 
3979 	return ret;
3980 }
3981 
3982 static int hclge_init_fd_config(struct hclge_dev *hdev)
3983 {
3984 #define LOW_2_WORDS		0x03
3985 	struct hclge_fd_key_cfg *key_cfg;
3986 	int ret;
3987 
3988 	if (!hnae3_dev_fd_supported(hdev))
3989 		return 0;
3990 
3991 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
3992 	if (ret)
3993 		return ret;
3994 
3995 	switch (hdev->fd_cfg.fd_mode) {
3996 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
3997 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
3998 		break;
3999 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4000 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4001 		break;
4002 	default:
4003 		dev_err(&hdev->pdev->dev,
4004 			"Unsupported flow director mode %d\n",
4005 			hdev->fd_cfg.fd_mode);
4006 		return -EOPNOTSUPP;
4007 	}
4008 
4009 	hdev->fd_cfg.proto_support =
4010 		TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4011 		UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4012 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4013 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4014 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4015 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4016 	key_cfg->outer_sipv6_word_en = 0;
4017 	key_cfg->outer_dipv6_word_en = 0;
4018 
4019 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4020 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4021 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4022 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4023 
4024 	/* If use max 400bit key, we can support tuples for ether type */
4025 	if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4026 		hdev->fd_cfg.proto_support |= ETHER_FLOW;
4027 		key_cfg->tuple_active |=
4028 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4029 	}
4030 
4031 	/* roce_type is used to filter roce frames
4032 	 * dst_vport is used to specify the rule
4033 	 */
4034 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4035 
4036 	ret = hclge_get_fd_allocation(hdev,
4037 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4038 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4039 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4040 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4041 	if (ret)
4042 		return ret;
4043 
4044 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4045 }
4046 
4047 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4048 				int loc, u8 *key, bool is_add)
4049 {
4050 	struct hclge_fd_tcam_config_1_cmd *req1;
4051 	struct hclge_fd_tcam_config_2_cmd *req2;
4052 	struct hclge_fd_tcam_config_3_cmd *req3;
4053 	struct hclge_desc desc[3];
4054 	int ret;
4055 
4056 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4057 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4058 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4059 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4060 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4061 
4062 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4063 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4064 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4065 
4066 	req1->stage = stage;
4067 	req1->xy_sel = sel_x ? 1 : 0;
4068 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4069 	req1->index = cpu_to_le32(loc);
4070 	req1->entry_vld = sel_x ? is_add : 0;
4071 
4072 	if (key) {
4073 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4074 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4075 		       sizeof(req2->tcam_data));
4076 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4077 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4078 	}
4079 
4080 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
4081 	if (ret)
4082 		dev_err(&hdev->pdev->dev,
4083 			"config tcam key fail, ret=%d\n",
4084 			ret);
4085 
4086 	return ret;
4087 }
4088 
4089 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4090 			      struct hclge_fd_ad_data *action)
4091 {
4092 	struct hclge_fd_ad_config_cmd *req;
4093 	struct hclge_desc desc;
4094 	u64 ad_data = 0;
4095 	int ret;
4096 
4097 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4098 
4099 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
4100 	req->index = cpu_to_le32(loc);
4101 	req->stage = stage;
4102 
4103 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4104 		      action->write_rule_id_to_bd);
4105 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4106 			action->rule_id);
4107 	ad_data <<= 32;
4108 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4109 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4110 		      action->forward_to_direct_queue);
4111 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4112 			action->queue_id);
4113 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4114 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4115 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4116 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4117 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4118 			action->counter_id);
4119 
4120 	req->ad_data = cpu_to_le64(ad_data);
4121 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4122 	if (ret)
4123 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4124 
4125 	return ret;
4126 }
4127 
4128 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4129 				   struct hclge_fd_rule *rule)
4130 {
4131 	u16 tmp_x_s, tmp_y_s;
4132 	u32 tmp_x_l, tmp_y_l;
4133 	int i;
4134 
4135 	if (rule->unused_tuple & tuple_bit)
4136 		return true;
4137 
4138 	switch (tuple_bit) {
4139 	case 0:
4140 		return false;
4141 	case BIT(INNER_DST_MAC):
4142 		for (i = 0; i < 6; i++) {
4143 			calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4144 			       rule->tuples_mask.dst_mac[i]);
4145 			calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4146 			       rule->tuples_mask.dst_mac[i]);
4147 		}
4148 
4149 		return true;
4150 	case BIT(INNER_SRC_MAC):
4151 		for (i = 0; i < 6; i++) {
4152 			calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4153 			       rule->tuples.src_mac[i]);
4154 			calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4155 			       rule->tuples.src_mac[i]);
4156 		}
4157 
4158 		return true;
4159 	case BIT(INNER_VLAN_TAG_FST):
4160 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4161 		       rule->tuples_mask.vlan_tag1);
4162 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4163 		       rule->tuples_mask.vlan_tag1);
4164 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4165 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4166 
4167 		return true;
4168 	case BIT(INNER_ETH_TYPE):
4169 		calc_x(tmp_x_s, rule->tuples.ether_proto,
4170 		       rule->tuples_mask.ether_proto);
4171 		calc_y(tmp_y_s, rule->tuples.ether_proto,
4172 		       rule->tuples_mask.ether_proto);
4173 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4174 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4175 
4176 		return true;
4177 	case BIT(INNER_IP_TOS):
4178 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4179 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4180 
4181 		return true;
4182 	case BIT(INNER_IP_PROTO):
4183 		calc_x(*key_x, rule->tuples.ip_proto,
4184 		       rule->tuples_mask.ip_proto);
4185 		calc_y(*key_y, rule->tuples.ip_proto,
4186 		       rule->tuples_mask.ip_proto);
4187 
4188 		return true;
4189 	case BIT(INNER_SRC_IP):
4190 		calc_x(tmp_x_l, rule->tuples.src_ip[3],
4191 		       rule->tuples_mask.src_ip[3]);
4192 		calc_y(tmp_y_l, rule->tuples.src_ip[3],
4193 		       rule->tuples_mask.src_ip[3]);
4194 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4195 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4196 
4197 		return true;
4198 	case BIT(INNER_DST_IP):
4199 		calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4200 		       rule->tuples_mask.dst_ip[3]);
4201 		calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4202 		       rule->tuples_mask.dst_ip[3]);
4203 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4204 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4205 
4206 		return true;
4207 	case BIT(INNER_SRC_PORT):
4208 		calc_x(tmp_x_s, rule->tuples.src_port,
4209 		       rule->tuples_mask.src_port);
4210 		calc_y(tmp_y_s, rule->tuples.src_port,
4211 		       rule->tuples_mask.src_port);
4212 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4213 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4214 
4215 		return true;
4216 	case BIT(INNER_DST_PORT):
4217 		calc_x(tmp_x_s, rule->tuples.dst_port,
4218 		       rule->tuples_mask.dst_port);
4219 		calc_y(tmp_y_s, rule->tuples.dst_port,
4220 		       rule->tuples_mask.dst_port);
4221 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4222 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4223 
4224 		return true;
4225 	default:
4226 		return false;
4227 	}
4228 }
4229 
4230 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4231 				 u8 vf_id, u8 network_port_id)
4232 {
4233 	u32 port_number = 0;
4234 
4235 	if (port_type == HOST_PORT) {
4236 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4237 				pf_id);
4238 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4239 				vf_id);
4240 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4241 	} else {
4242 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4243 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
4244 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4245 	}
4246 
4247 	return port_number;
4248 }
4249 
4250 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4251 				       __le32 *key_x, __le32 *key_y,
4252 				       struct hclge_fd_rule *rule)
4253 {
4254 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4255 	u8 cur_pos = 0, tuple_size, shift_bits;
4256 	int i;
4257 
4258 	for (i = 0; i < MAX_META_DATA; i++) {
4259 		tuple_size = meta_data_key_info[i].key_length;
4260 		tuple_bit = key_cfg->meta_data_active & BIT(i);
4261 
4262 		switch (tuple_bit) {
4263 		case BIT(ROCE_TYPE):
4264 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4265 			cur_pos += tuple_size;
4266 			break;
4267 		case BIT(DST_VPORT):
4268 			port_number = hclge_get_port_number(HOST_PORT, 0,
4269 							    rule->vf_id, 0);
4270 			hnae3_set_field(meta_data,
4271 					GENMASK(cur_pos + tuple_size, cur_pos),
4272 					cur_pos, port_number);
4273 			cur_pos += tuple_size;
4274 			break;
4275 		default:
4276 			break;
4277 		}
4278 	}
4279 
4280 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4281 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4282 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
4283 
4284 	*key_x = cpu_to_le32(tmp_x << shift_bits);
4285 	*key_y = cpu_to_le32(tmp_y << shift_bits);
4286 }
4287 
4288 /* A complete key is combined with meta data key and tuple key.
4289  * Meta data key is stored at the MSB region, and tuple key is stored at
4290  * the LSB region, unused bits will be filled 0.
4291  */
4292 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4293 			    struct hclge_fd_rule *rule)
4294 {
4295 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4296 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4297 	u8 *cur_key_x, *cur_key_y;
4298 	int i, ret, tuple_size;
4299 	u8 meta_data_region;
4300 
4301 	memset(key_x, 0, sizeof(key_x));
4302 	memset(key_y, 0, sizeof(key_y));
4303 	cur_key_x = key_x;
4304 	cur_key_y = key_y;
4305 
4306 	for (i = 0 ; i < MAX_TUPLE; i++) {
4307 		bool tuple_valid;
4308 		u32 check_tuple;
4309 
4310 		tuple_size = tuple_key_info[i].key_length / 8;
4311 		check_tuple = key_cfg->tuple_active & BIT(i);
4312 
4313 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4314 						     cur_key_y, rule);
4315 		if (tuple_valid) {
4316 			cur_key_x += tuple_size;
4317 			cur_key_y += tuple_size;
4318 		}
4319 	}
4320 
4321 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4322 			MAX_META_DATA_LENGTH / 8;
4323 
4324 	hclge_fd_convert_meta_data(key_cfg,
4325 				   (__le32 *)(key_x + meta_data_region),
4326 				   (__le32 *)(key_y + meta_data_region),
4327 				   rule);
4328 
4329 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4330 				   true);
4331 	if (ret) {
4332 		dev_err(&hdev->pdev->dev,
4333 			"fd key_y config fail, loc=%d, ret=%d\n",
4334 			rule->queue_id, ret);
4335 		return ret;
4336 	}
4337 
4338 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4339 				   true);
4340 	if (ret)
4341 		dev_err(&hdev->pdev->dev,
4342 			"fd key_x config fail, loc=%d, ret=%d\n",
4343 			rule->queue_id, ret);
4344 	return ret;
4345 }
4346 
4347 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4348 			       struct hclge_fd_rule *rule)
4349 {
4350 	struct hclge_fd_ad_data ad_data;
4351 
4352 	ad_data.ad_id = rule->location;
4353 
4354 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4355 		ad_data.drop_packet = true;
4356 		ad_data.forward_to_direct_queue = false;
4357 		ad_data.queue_id = 0;
4358 	} else {
4359 		ad_data.drop_packet = false;
4360 		ad_data.forward_to_direct_queue = true;
4361 		ad_data.queue_id = rule->queue_id;
4362 	}
4363 
4364 	ad_data.use_counter = false;
4365 	ad_data.counter_id = 0;
4366 
4367 	ad_data.use_next_stage = false;
4368 	ad_data.next_input_key = 0;
4369 
4370 	ad_data.write_rule_id_to_bd = true;
4371 	ad_data.rule_id = rule->location;
4372 
4373 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4374 }
4375 
4376 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4377 			       struct ethtool_rx_flow_spec *fs, u32 *unused)
4378 {
4379 	struct ethtool_tcpip4_spec *tcp_ip4_spec;
4380 	struct ethtool_usrip4_spec *usr_ip4_spec;
4381 	struct ethtool_tcpip6_spec *tcp_ip6_spec;
4382 	struct ethtool_usrip6_spec *usr_ip6_spec;
4383 	struct ethhdr *ether_spec;
4384 
4385 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4386 		return -EINVAL;
4387 
4388 	if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4389 		return -EOPNOTSUPP;
4390 
4391 	if ((fs->flow_type & FLOW_EXT) &&
4392 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4393 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4394 		return -EOPNOTSUPP;
4395 	}
4396 
4397 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4398 	case SCTP_V4_FLOW:
4399 	case TCP_V4_FLOW:
4400 	case UDP_V4_FLOW:
4401 		tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4402 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4403 
4404 		if (!tcp_ip4_spec->ip4src)
4405 			*unused |= BIT(INNER_SRC_IP);
4406 
4407 		if (!tcp_ip4_spec->ip4dst)
4408 			*unused |= BIT(INNER_DST_IP);
4409 
4410 		if (!tcp_ip4_spec->psrc)
4411 			*unused |= BIT(INNER_SRC_PORT);
4412 
4413 		if (!tcp_ip4_spec->pdst)
4414 			*unused |= BIT(INNER_DST_PORT);
4415 
4416 		if (!tcp_ip4_spec->tos)
4417 			*unused |= BIT(INNER_IP_TOS);
4418 
4419 		break;
4420 	case IP_USER_FLOW:
4421 		usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4422 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4423 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4424 
4425 		if (!usr_ip4_spec->ip4src)
4426 			*unused |= BIT(INNER_SRC_IP);
4427 
4428 		if (!usr_ip4_spec->ip4dst)
4429 			*unused |= BIT(INNER_DST_IP);
4430 
4431 		if (!usr_ip4_spec->tos)
4432 			*unused |= BIT(INNER_IP_TOS);
4433 
4434 		if (!usr_ip4_spec->proto)
4435 			*unused |= BIT(INNER_IP_PROTO);
4436 
4437 		if (usr_ip4_spec->l4_4_bytes)
4438 			return -EOPNOTSUPP;
4439 
4440 		if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4441 			return -EOPNOTSUPP;
4442 
4443 		break;
4444 	case SCTP_V6_FLOW:
4445 	case TCP_V6_FLOW:
4446 	case UDP_V6_FLOW:
4447 		tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4448 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4449 			BIT(INNER_IP_TOS);
4450 
4451 		if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4452 		    !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4453 			*unused |= BIT(INNER_SRC_IP);
4454 
4455 		if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4456 		    !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4457 			*unused |= BIT(INNER_DST_IP);
4458 
4459 		if (!tcp_ip6_spec->psrc)
4460 			*unused |= BIT(INNER_SRC_PORT);
4461 
4462 		if (!tcp_ip6_spec->pdst)
4463 			*unused |= BIT(INNER_DST_PORT);
4464 
4465 		if (tcp_ip6_spec->tclass)
4466 			return -EOPNOTSUPP;
4467 
4468 		break;
4469 	case IPV6_USER_FLOW:
4470 		usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4471 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4472 			BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4473 			BIT(INNER_DST_PORT);
4474 
4475 		if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4476 		    !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4477 			*unused |= BIT(INNER_SRC_IP);
4478 
4479 		if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4480 		    !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4481 			*unused |= BIT(INNER_DST_IP);
4482 
4483 		if (!usr_ip6_spec->l4_proto)
4484 			*unused |= BIT(INNER_IP_PROTO);
4485 
4486 		if (usr_ip6_spec->tclass)
4487 			return -EOPNOTSUPP;
4488 
4489 		if (usr_ip6_spec->l4_4_bytes)
4490 			return -EOPNOTSUPP;
4491 
4492 		break;
4493 	case ETHER_FLOW:
4494 		ether_spec = &fs->h_u.ether_spec;
4495 		*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4496 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4497 			BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4498 
4499 		if (is_zero_ether_addr(ether_spec->h_source))
4500 			*unused |= BIT(INNER_SRC_MAC);
4501 
4502 		if (is_zero_ether_addr(ether_spec->h_dest))
4503 			*unused |= BIT(INNER_DST_MAC);
4504 
4505 		if (!ether_spec->h_proto)
4506 			*unused |= BIT(INNER_ETH_TYPE);
4507 
4508 		break;
4509 	default:
4510 		return -EOPNOTSUPP;
4511 	}
4512 
4513 	if ((fs->flow_type & FLOW_EXT)) {
4514 		if (fs->h_ext.vlan_etype)
4515 			return -EOPNOTSUPP;
4516 		if (!fs->h_ext.vlan_tci)
4517 			*unused |= BIT(INNER_VLAN_TAG_FST);
4518 
4519 		if (fs->m_ext.vlan_tci) {
4520 			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4521 				return -EINVAL;
4522 		}
4523 	} else {
4524 		*unused |= BIT(INNER_VLAN_TAG_FST);
4525 	}
4526 
4527 	if (fs->flow_type & FLOW_MAC_EXT) {
4528 		if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4529 			return -EOPNOTSUPP;
4530 
4531 		if (is_zero_ether_addr(fs->h_ext.h_dest))
4532 			*unused |= BIT(INNER_DST_MAC);
4533 		else
4534 			*unused &= ~(BIT(INNER_DST_MAC));
4535 	}
4536 
4537 	return 0;
4538 }
4539 
4540 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4541 {
4542 	struct hclge_fd_rule *rule = NULL;
4543 	struct hlist_node *node2;
4544 
4545 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4546 		if (rule->location >= location)
4547 			break;
4548 	}
4549 
4550 	return  rule && rule->location == location;
4551 }
4552 
4553 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4554 				     struct hclge_fd_rule *new_rule,
4555 				     u16 location,
4556 				     bool is_add)
4557 {
4558 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
4559 	struct hlist_node *node2;
4560 
4561 	if (is_add && !new_rule)
4562 		return -EINVAL;
4563 
4564 	hlist_for_each_entry_safe(rule, node2,
4565 				  &hdev->fd_rule_list, rule_node) {
4566 		if (rule->location >= location)
4567 			break;
4568 		parent = rule;
4569 	}
4570 
4571 	if (rule && rule->location == location) {
4572 		hlist_del(&rule->rule_node);
4573 		kfree(rule);
4574 		hdev->hclge_fd_rule_num--;
4575 
4576 		if (!is_add)
4577 			return 0;
4578 
4579 	} else if (!is_add) {
4580 		dev_err(&hdev->pdev->dev,
4581 			"delete fail, rule %d is inexistent\n",
4582 			location);
4583 		return -EINVAL;
4584 	}
4585 
4586 	INIT_HLIST_NODE(&new_rule->rule_node);
4587 
4588 	if (parent)
4589 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4590 	else
4591 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4592 
4593 	hdev->hclge_fd_rule_num++;
4594 
4595 	return 0;
4596 }
4597 
4598 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4599 			      struct ethtool_rx_flow_spec *fs,
4600 			      struct hclge_fd_rule *rule)
4601 {
4602 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4603 
4604 	switch (flow_type) {
4605 	case SCTP_V4_FLOW:
4606 	case TCP_V4_FLOW:
4607 	case UDP_V4_FLOW:
4608 		rule->tuples.src_ip[3] =
4609 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4610 		rule->tuples_mask.src_ip[3] =
4611 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4612 
4613 		rule->tuples.dst_ip[3] =
4614 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4615 		rule->tuples_mask.dst_ip[3] =
4616 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4617 
4618 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4619 		rule->tuples_mask.src_port =
4620 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4621 
4622 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4623 		rule->tuples_mask.dst_port =
4624 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4625 
4626 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4627 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4628 
4629 		rule->tuples.ether_proto = ETH_P_IP;
4630 		rule->tuples_mask.ether_proto = 0xFFFF;
4631 
4632 		break;
4633 	case IP_USER_FLOW:
4634 		rule->tuples.src_ip[3] =
4635 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4636 		rule->tuples_mask.src_ip[3] =
4637 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4638 
4639 		rule->tuples.dst_ip[3] =
4640 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4641 		rule->tuples_mask.dst_ip[3] =
4642 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4643 
4644 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4645 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4646 
4647 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4648 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4649 
4650 		rule->tuples.ether_proto = ETH_P_IP;
4651 		rule->tuples_mask.ether_proto = 0xFFFF;
4652 
4653 		break;
4654 	case SCTP_V6_FLOW:
4655 	case TCP_V6_FLOW:
4656 	case UDP_V6_FLOW:
4657 		be32_to_cpu_array(rule->tuples.src_ip,
4658 				  fs->h_u.tcp_ip6_spec.ip6src, 4);
4659 		be32_to_cpu_array(rule->tuples_mask.src_ip,
4660 				  fs->m_u.tcp_ip6_spec.ip6src, 4);
4661 
4662 		be32_to_cpu_array(rule->tuples.dst_ip,
4663 				  fs->h_u.tcp_ip6_spec.ip6dst, 4);
4664 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
4665 				  fs->m_u.tcp_ip6_spec.ip6dst, 4);
4666 
4667 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4668 		rule->tuples_mask.src_port =
4669 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4670 
4671 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4672 		rule->tuples_mask.dst_port =
4673 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4674 
4675 		rule->tuples.ether_proto = ETH_P_IPV6;
4676 		rule->tuples_mask.ether_proto = 0xFFFF;
4677 
4678 		break;
4679 	case IPV6_USER_FLOW:
4680 		be32_to_cpu_array(rule->tuples.src_ip,
4681 				  fs->h_u.usr_ip6_spec.ip6src, 4);
4682 		be32_to_cpu_array(rule->tuples_mask.src_ip,
4683 				  fs->m_u.usr_ip6_spec.ip6src, 4);
4684 
4685 		be32_to_cpu_array(rule->tuples.dst_ip,
4686 				  fs->h_u.usr_ip6_spec.ip6dst, 4);
4687 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
4688 				  fs->m_u.usr_ip6_spec.ip6dst, 4);
4689 
4690 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4691 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4692 
4693 		rule->tuples.ether_proto = ETH_P_IPV6;
4694 		rule->tuples_mask.ether_proto = 0xFFFF;
4695 
4696 		break;
4697 	case ETHER_FLOW:
4698 		ether_addr_copy(rule->tuples.src_mac,
4699 				fs->h_u.ether_spec.h_source);
4700 		ether_addr_copy(rule->tuples_mask.src_mac,
4701 				fs->m_u.ether_spec.h_source);
4702 
4703 		ether_addr_copy(rule->tuples.dst_mac,
4704 				fs->h_u.ether_spec.h_dest);
4705 		ether_addr_copy(rule->tuples_mask.dst_mac,
4706 				fs->m_u.ether_spec.h_dest);
4707 
4708 		rule->tuples.ether_proto =
4709 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
4710 		rule->tuples_mask.ether_proto =
4711 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
4712 
4713 		break;
4714 	default:
4715 		return -EOPNOTSUPP;
4716 	}
4717 
4718 	switch (flow_type) {
4719 	case SCTP_V4_FLOW:
4720 	case SCTP_V6_FLOW:
4721 		rule->tuples.ip_proto = IPPROTO_SCTP;
4722 		rule->tuples_mask.ip_proto = 0xFF;
4723 		break;
4724 	case TCP_V4_FLOW:
4725 	case TCP_V6_FLOW:
4726 		rule->tuples.ip_proto = IPPROTO_TCP;
4727 		rule->tuples_mask.ip_proto = 0xFF;
4728 		break;
4729 	case UDP_V4_FLOW:
4730 	case UDP_V6_FLOW:
4731 		rule->tuples.ip_proto = IPPROTO_UDP;
4732 		rule->tuples_mask.ip_proto = 0xFF;
4733 		break;
4734 	default:
4735 		break;
4736 	}
4737 
4738 	if ((fs->flow_type & FLOW_EXT)) {
4739 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4740 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4741 	}
4742 
4743 	if (fs->flow_type & FLOW_MAC_EXT) {
4744 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4745 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4746 	}
4747 
4748 	return 0;
4749 }
4750 
4751 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4752 			      struct ethtool_rxnfc *cmd)
4753 {
4754 	struct hclge_vport *vport = hclge_get_vport(handle);
4755 	struct hclge_dev *hdev = vport->back;
4756 	u16 dst_vport_id = 0, q_index = 0;
4757 	struct ethtool_rx_flow_spec *fs;
4758 	struct hclge_fd_rule *rule;
4759 	u32 unused = 0;
4760 	u8 action;
4761 	int ret;
4762 
4763 	if (!hnae3_dev_fd_supported(hdev))
4764 		return -EOPNOTSUPP;
4765 
4766 	if (!hdev->fd_en) {
4767 		dev_warn(&hdev->pdev->dev,
4768 			 "Please enable flow director first\n");
4769 		return -EOPNOTSUPP;
4770 	}
4771 
4772 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4773 
4774 	ret = hclge_fd_check_spec(hdev, fs, &unused);
4775 	if (ret) {
4776 		dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4777 		return ret;
4778 	}
4779 
4780 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4781 		action = HCLGE_FD_ACTION_DROP_PACKET;
4782 	} else {
4783 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4784 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4785 		u16 tqps;
4786 
4787 		if (vf > hdev->num_req_vfs) {
4788 			dev_err(&hdev->pdev->dev,
4789 				"Error: vf id (%d) > max vf num (%d)\n",
4790 				vf, hdev->num_req_vfs);
4791 			return -EINVAL;
4792 		}
4793 
4794 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4795 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4796 
4797 		if (ring >= tqps) {
4798 			dev_err(&hdev->pdev->dev,
4799 				"Error: queue id (%d) > max tqp num (%d)\n",
4800 				ring, tqps - 1);
4801 			return -EINVAL;
4802 		}
4803 
4804 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4805 		q_index = ring;
4806 	}
4807 
4808 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4809 	if (!rule)
4810 		return -ENOMEM;
4811 
4812 	ret = hclge_fd_get_tuple(hdev, fs, rule);
4813 	if (ret)
4814 		goto free_rule;
4815 
4816 	rule->flow_type = fs->flow_type;
4817 
4818 	rule->location = fs->location;
4819 	rule->unused_tuple = unused;
4820 	rule->vf_id = dst_vport_id;
4821 	rule->queue_id = q_index;
4822 	rule->action = action;
4823 
4824 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4825 	if (ret)
4826 		goto free_rule;
4827 
4828 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4829 	if (ret)
4830 		goto free_rule;
4831 
4832 	ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4833 	if (ret)
4834 		goto free_rule;
4835 
4836 	return ret;
4837 
4838 free_rule:
4839 	kfree(rule);
4840 	return ret;
4841 }
4842 
4843 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4844 			      struct ethtool_rxnfc *cmd)
4845 {
4846 	struct hclge_vport *vport = hclge_get_vport(handle);
4847 	struct hclge_dev *hdev = vport->back;
4848 	struct ethtool_rx_flow_spec *fs;
4849 	int ret;
4850 
4851 	if (!hnae3_dev_fd_supported(hdev))
4852 		return -EOPNOTSUPP;
4853 
4854 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4855 
4856 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4857 		return -EINVAL;
4858 
4859 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
4860 		dev_err(&hdev->pdev->dev,
4861 			"Delete fail, rule %d is inexistent\n",
4862 			fs->location);
4863 		return -ENOENT;
4864 	}
4865 
4866 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4867 				   fs->location, NULL, false);
4868 	if (ret)
4869 		return ret;
4870 
4871 	return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4872 					 false);
4873 }
4874 
4875 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4876 				     bool clear_list)
4877 {
4878 	struct hclge_vport *vport = hclge_get_vport(handle);
4879 	struct hclge_dev *hdev = vport->back;
4880 	struct hclge_fd_rule *rule;
4881 	struct hlist_node *node;
4882 
4883 	if (!hnae3_dev_fd_supported(hdev))
4884 		return;
4885 
4886 	if (clear_list) {
4887 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4888 					  rule_node) {
4889 			hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4890 					     rule->location, NULL, false);
4891 			hlist_del(&rule->rule_node);
4892 			kfree(rule);
4893 			hdev->hclge_fd_rule_num--;
4894 		}
4895 	} else {
4896 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4897 					  rule_node)
4898 			hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4899 					     rule->location, NULL, false);
4900 	}
4901 }
4902 
4903 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4904 {
4905 	struct hclge_vport *vport = hclge_get_vport(handle);
4906 	struct hclge_dev *hdev = vport->back;
4907 	struct hclge_fd_rule *rule;
4908 	struct hlist_node *node;
4909 	int ret;
4910 
4911 	/* Return ok here, because reset error handling will check this
4912 	 * return value. If error is returned here, the reset process will
4913 	 * fail.
4914 	 */
4915 	if (!hnae3_dev_fd_supported(hdev))
4916 		return 0;
4917 
4918 	/* if fd is disabled, should not restore it when reset */
4919 	if (!hdev->fd_en)
4920 		return 0;
4921 
4922 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4923 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4924 		if (!ret)
4925 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4926 
4927 		if (ret) {
4928 			dev_warn(&hdev->pdev->dev,
4929 				 "Restore rule %d failed, remove it\n",
4930 				 rule->location);
4931 			hlist_del(&rule->rule_node);
4932 			kfree(rule);
4933 			hdev->hclge_fd_rule_num--;
4934 		}
4935 	}
4936 	return 0;
4937 }
4938 
4939 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4940 				 struct ethtool_rxnfc *cmd)
4941 {
4942 	struct hclge_vport *vport = hclge_get_vport(handle);
4943 	struct hclge_dev *hdev = vport->back;
4944 
4945 	if (!hnae3_dev_fd_supported(hdev))
4946 		return -EOPNOTSUPP;
4947 
4948 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
4949 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4950 
4951 	return 0;
4952 }
4953 
4954 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
4955 				  struct ethtool_rxnfc *cmd)
4956 {
4957 	struct hclge_vport *vport = hclge_get_vport(handle);
4958 	struct hclge_fd_rule *rule = NULL;
4959 	struct hclge_dev *hdev = vport->back;
4960 	struct ethtool_rx_flow_spec *fs;
4961 	struct hlist_node *node2;
4962 
4963 	if (!hnae3_dev_fd_supported(hdev))
4964 		return -EOPNOTSUPP;
4965 
4966 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4967 
4968 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4969 		if (rule->location >= fs->location)
4970 			break;
4971 	}
4972 
4973 	if (!rule || fs->location != rule->location)
4974 		return -ENOENT;
4975 
4976 	fs->flow_type = rule->flow_type;
4977 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4978 	case SCTP_V4_FLOW:
4979 	case TCP_V4_FLOW:
4980 	case UDP_V4_FLOW:
4981 		fs->h_u.tcp_ip4_spec.ip4src =
4982 				cpu_to_be32(rule->tuples.src_ip[3]);
4983 		fs->m_u.tcp_ip4_spec.ip4src =
4984 				rule->unused_tuple & BIT(INNER_SRC_IP) ?
4985 				0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4986 
4987 		fs->h_u.tcp_ip4_spec.ip4dst =
4988 				cpu_to_be32(rule->tuples.dst_ip[3]);
4989 		fs->m_u.tcp_ip4_spec.ip4dst =
4990 				rule->unused_tuple & BIT(INNER_DST_IP) ?
4991 				0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4992 
4993 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4994 		fs->m_u.tcp_ip4_spec.psrc =
4995 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4996 				0 : cpu_to_be16(rule->tuples_mask.src_port);
4997 
4998 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4999 		fs->m_u.tcp_ip4_spec.pdst =
5000 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5001 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5002 
5003 		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5004 		fs->m_u.tcp_ip4_spec.tos =
5005 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5006 				0 : rule->tuples_mask.ip_tos;
5007 
5008 		break;
5009 	case IP_USER_FLOW:
5010 		fs->h_u.usr_ip4_spec.ip4src =
5011 				cpu_to_be32(rule->tuples.src_ip[3]);
5012 		fs->m_u.tcp_ip4_spec.ip4src =
5013 				rule->unused_tuple & BIT(INNER_SRC_IP) ?
5014 				0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5015 
5016 		fs->h_u.usr_ip4_spec.ip4dst =
5017 				cpu_to_be32(rule->tuples.dst_ip[3]);
5018 		fs->m_u.usr_ip4_spec.ip4dst =
5019 				rule->unused_tuple & BIT(INNER_DST_IP) ?
5020 				0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5021 
5022 		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5023 		fs->m_u.usr_ip4_spec.tos =
5024 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5025 				0 : rule->tuples_mask.ip_tos;
5026 
5027 		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5028 		fs->m_u.usr_ip4_spec.proto =
5029 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5030 				0 : rule->tuples_mask.ip_proto;
5031 
5032 		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5033 
5034 		break;
5035 	case SCTP_V6_FLOW:
5036 	case TCP_V6_FLOW:
5037 	case UDP_V6_FLOW:
5038 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5039 				  rule->tuples.src_ip, 4);
5040 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5041 			memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5042 		else
5043 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5044 					  rule->tuples_mask.src_ip, 4);
5045 
5046 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5047 				  rule->tuples.dst_ip, 4);
5048 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5049 			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5050 		else
5051 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5052 					  rule->tuples_mask.dst_ip, 4);
5053 
5054 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5055 		fs->m_u.tcp_ip6_spec.psrc =
5056 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5057 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5058 
5059 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5060 		fs->m_u.tcp_ip6_spec.pdst =
5061 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5062 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5063 
5064 		break;
5065 	case IPV6_USER_FLOW:
5066 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5067 				  rule->tuples.src_ip, 4);
5068 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5069 			memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5070 		else
5071 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5072 					  rule->tuples_mask.src_ip, 4);
5073 
5074 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5075 				  rule->tuples.dst_ip, 4);
5076 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5077 			memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5078 		else
5079 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5080 					  rule->tuples_mask.dst_ip, 4);
5081 
5082 		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5083 		fs->m_u.usr_ip6_spec.l4_proto =
5084 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5085 				0 : rule->tuples_mask.ip_proto;
5086 
5087 		break;
5088 	case ETHER_FLOW:
5089 		ether_addr_copy(fs->h_u.ether_spec.h_source,
5090 				rule->tuples.src_mac);
5091 		if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5092 			eth_zero_addr(fs->m_u.ether_spec.h_source);
5093 		else
5094 			ether_addr_copy(fs->m_u.ether_spec.h_source,
5095 					rule->tuples_mask.src_mac);
5096 
5097 		ether_addr_copy(fs->h_u.ether_spec.h_dest,
5098 				rule->tuples.dst_mac);
5099 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5100 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5101 		else
5102 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5103 					rule->tuples_mask.dst_mac);
5104 
5105 		fs->h_u.ether_spec.h_proto =
5106 				cpu_to_be16(rule->tuples.ether_proto);
5107 		fs->m_u.ether_spec.h_proto =
5108 				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5109 				0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5110 
5111 		break;
5112 	default:
5113 		return -EOPNOTSUPP;
5114 	}
5115 
5116 	if (fs->flow_type & FLOW_EXT) {
5117 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5118 		fs->m_ext.vlan_tci =
5119 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5120 				cpu_to_be16(VLAN_VID_MASK) :
5121 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
5122 	}
5123 
5124 	if (fs->flow_type & FLOW_MAC_EXT) {
5125 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5126 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5127 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5128 		else
5129 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5130 					rule->tuples_mask.dst_mac);
5131 	}
5132 
5133 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5134 		fs->ring_cookie = RX_CLS_FLOW_DISC;
5135 	} else {
5136 		u64 vf_id;
5137 
5138 		fs->ring_cookie = rule->queue_id;
5139 		vf_id = rule->vf_id;
5140 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5141 		fs->ring_cookie |= vf_id;
5142 	}
5143 
5144 	return 0;
5145 }
5146 
5147 static int hclge_get_all_rules(struct hnae3_handle *handle,
5148 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
5149 {
5150 	struct hclge_vport *vport = hclge_get_vport(handle);
5151 	struct hclge_dev *hdev = vport->back;
5152 	struct hclge_fd_rule *rule;
5153 	struct hlist_node *node2;
5154 	int cnt = 0;
5155 
5156 	if (!hnae3_dev_fd_supported(hdev))
5157 		return -EOPNOTSUPP;
5158 
5159 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5160 
5161 	hlist_for_each_entry_safe(rule, node2,
5162 				  &hdev->fd_rule_list, rule_node) {
5163 		if (cnt == cmd->rule_cnt)
5164 			return -EMSGSIZE;
5165 
5166 		rule_locs[cnt] = rule->location;
5167 		cnt++;
5168 	}
5169 
5170 	cmd->rule_cnt = cnt;
5171 
5172 	return 0;
5173 }
5174 
5175 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5176 {
5177 	struct hclge_vport *vport = hclge_get_vport(handle);
5178 	struct hclge_dev *hdev = vport->back;
5179 
5180 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5181 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5182 }
5183 
5184 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5185 {
5186 	struct hclge_vport *vport = hclge_get_vport(handle);
5187 	struct hclge_dev *hdev = vport->back;
5188 
5189 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5190 }
5191 
5192 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5193 {
5194 	struct hclge_vport *vport = hclge_get_vport(handle);
5195 	struct hclge_dev *hdev = vport->back;
5196 
5197 	return hdev->reset_count;
5198 }
5199 
5200 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5201 {
5202 	struct hclge_vport *vport = hclge_get_vport(handle);
5203 	struct hclge_dev *hdev = vport->back;
5204 
5205 	hdev->fd_en = enable;
5206 	if (!enable)
5207 		hclge_del_all_fd_entries(handle, false);
5208 	else
5209 		hclge_restore_fd_entries(handle);
5210 }
5211 
5212 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5213 {
5214 	struct hclge_desc desc;
5215 	struct hclge_config_mac_mode_cmd *req =
5216 		(struct hclge_config_mac_mode_cmd *)desc.data;
5217 	u32 loop_en = 0;
5218 	int ret;
5219 
5220 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5221 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5222 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5223 	hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5224 	hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5225 	hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5226 	hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5227 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5228 	hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5229 	hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5230 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5231 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5232 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5233 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5234 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5235 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5236 
5237 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5238 	if (ret)
5239 		dev_err(&hdev->pdev->dev,
5240 			"mac enable fail, ret =%d.\n", ret);
5241 }
5242 
5243 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5244 {
5245 	struct hclge_config_mac_mode_cmd *req;
5246 	struct hclge_desc desc;
5247 	u32 loop_en;
5248 	int ret;
5249 
5250 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5251 	/* 1 Read out the MAC mode config at first */
5252 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5253 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5254 	if (ret) {
5255 		dev_err(&hdev->pdev->dev,
5256 			"mac loopback get fail, ret =%d.\n", ret);
5257 		return ret;
5258 	}
5259 
5260 	/* 2 Then setup the loopback flag */
5261 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5262 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5263 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5264 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5265 
5266 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5267 
5268 	/* 3 Config mac work mode with loopback flag
5269 	 * and its original configure parameters
5270 	 */
5271 	hclge_cmd_reuse_desc(&desc, false);
5272 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5273 	if (ret)
5274 		dev_err(&hdev->pdev->dev,
5275 			"mac loopback set fail, ret =%d.\n", ret);
5276 	return ret;
5277 }
5278 
5279 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5280 				     enum hnae3_loop loop_mode)
5281 {
5282 #define HCLGE_SERDES_RETRY_MS	10
5283 #define HCLGE_SERDES_RETRY_NUM	100
5284 
5285 #define HCLGE_MAC_LINK_STATUS_MS   20
5286 #define HCLGE_MAC_LINK_STATUS_NUM  10
5287 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5288 #define HCLGE_MAC_LINK_STATUS_UP   1
5289 
5290 	struct hclge_serdes_lb_cmd *req;
5291 	struct hclge_desc desc;
5292 	int mac_link_ret = 0;
5293 	int ret, i = 0;
5294 	u8 loop_mode_b;
5295 
5296 	req = (struct hclge_serdes_lb_cmd *)desc.data;
5297 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5298 
5299 	switch (loop_mode) {
5300 	case HNAE3_LOOP_SERIAL_SERDES:
5301 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5302 		break;
5303 	case HNAE3_LOOP_PARALLEL_SERDES:
5304 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5305 		break;
5306 	default:
5307 		dev_err(&hdev->pdev->dev,
5308 			"unsupported serdes loopback mode %d\n", loop_mode);
5309 		return -ENOTSUPP;
5310 	}
5311 
5312 	if (en) {
5313 		req->enable = loop_mode_b;
5314 		req->mask = loop_mode_b;
5315 		mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5316 	} else {
5317 		req->mask = loop_mode_b;
5318 		mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5319 	}
5320 
5321 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5322 	if (ret) {
5323 		dev_err(&hdev->pdev->dev,
5324 			"serdes loopback set fail, ret = %d\n", ret);
5325 		return ret;
5326 	}
5327 
5328 	do {
5329 		msleep(HCLGE_SERDES_RETRY_MS);
5330 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5331 					   true);
5332 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5333 		if (ret) {
5334 			dev_err(&hdev->pdev->dev,
5335 				"serdes loopback get, ret = %d\n", ret);
5336 			return ret;
5337 		}
5338 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
5339 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5340 
5341 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5342 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5343 		return -EBUSY;
5344 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5345 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5346 		return -EIO;
5347 	}
5348 
5349 	hclge_cfg_mac_mode(hdev, en);
5350 
5351 	i = 0;
5352 	do {
5353 		/* serdes Internal loopback, independent of the network cable.*/
5354 		msleep(HCLGE_MAC_LINK_STATUS_MS);
5355 		ret = hclge_get_mac_link_status(hdev);
5356 		if (ret == mac_link_ret)
5357 			return 0;
5358 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5359 
5360 	dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5361 
5362 	return -EBUSY;
5363 }
5364 
5365 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5366 			    int stream_id, bool enable)
5367 {
5368 	struct hclge_desc desc;
5369 	struct hclge_cfg_com_tqp_queue_cmd *req =
5370 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5371 	int ret;
5372 
5373 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5374 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5375 	req->stream_id = cpu_to_le16(stream_id);
5376 	req->enable |= enable << HCLGE_TQP_ENABLE_B;
5377 
5378 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5379 	if (ret)
5380 		dev_err(&hdev->pdev->dev,
5381 			"Tqp enable fail, status =%d.\n", ret);
5382 	return ret;
5383 }
5384 
5385 static int hclge_set_loopback(struct hnae3_handle *handle,
5386 			      enum hnae3_loop loop_mode, bool en)
5387 {
5388 	struct hclge_vport *vport = hclge_get_vport(handle);
5389 	struct hnae3_knic_private_info *kinfo;
5390 	struct hclge_dev *hdev = vport->back;
5391 	int i, ret;
5392 
5393 	switch (loop_mode) {
5394 	case HNAE3_LOOP_APP:
5395 		ret = hclge_set_app_loopback(hdev, en);
5396 		break;
5397 	case HNAE3_LOOP_SERIAL_SERDES:
5398 	case HNAE3_LOOP_PARALLEL_SERDES:
5399 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5400 		break;
5401 	default:
5402 		ret = -ENOTSUPP;
5403 		dev_err(&hdev->pdev->dev,
5404 			"loop_mode %d is not supported\n", loop_mode);
5405 		break;
5406 	}
5407 
5408 	if (ret)
5409 		return ret;
5410 
5411 	kinfo = &vport->nic.kinfo;
5412 	for (i = 0; i < kinfo->num_tqps; i++) {
5413 		ret = hclge_tqp_enable(hdev, i, 0, en);
5414 		if (ret)
5415 			return ret;
5416 	}
5417 
5418 	return 0;
5419 }
5420 
5421 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5422 {
5423 	struct hclge_vport *vport = hclge_get_vport(handle);
5424 	struct hnae3_knic_private_info *kinfo;
5425 	struct hnae3_queue *queue;
5426 	struct hclge_tqp *tqp;
5427 	int i;
5428 
5429 	kinfo = &vport->nic.kinfo;
5430 	for (i = 0; i < kinfo->num_tqps; i++) {
5431 		queue = handle->kinfo.tqp[i];
5432 		tqp = container_of(queue, struct hclge_tqp, q);
5433 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5434 	}
5435 }
5436 
5437 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5438 {
5439 	struct hclge_vport *vport = hclge_get_vport(handle);
5440 	struct hclge_dev *hdev = vport->back;
5441 
5442 	if (enable) {
5443 		mod_timer(&hdev->service_timer, jiffies + HZ);
5444 	} else {
5445 		del_timer_sync(&hdev->service_timer);
5446 		cancel_work_sync(&hdev->service_task);
5447 		clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5448 	}
5449 }
5450 
5451 static int hclge_ae_start(struct hnae3_handle *handle)
5452 {
5453 	struct hclge_vport *vport = hclge_get_vport(handle);
5454 	struct hclge_dev *hdev = vport->back;
5455 
5456 	/* mac enable */
5457 	hclge_cfg_mac_mode(hdev, true);
5458 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5459 	hdev->hw.mac.link = 0;
5460 
5461 	/* reset tqp stats */
5462 	hclge_reset_tqp_stats(handle);
5463 
5464 	hclge_mac_start_phy(hdev);
5465 
5466 	return 0;
5467 }
5468 
5469 static void hclge_ae_stop(struct hnae3_handle *handle)
5470 {
5471 	struct hclge_vport *vport = hclge_get_vport(handle);
5472 	struct hclge_dev *hdev = vport->back;
5473 	int i;
5474 
5475 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
5476 
5477 	/* If it is not PF reset, the firmware will disable the MAC,
5478 	 * so it only need to stop phy here.
5479 	 */
5480 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5481 	    hdev->reset_type != HNAE3_FUNC_RESET) {
5482 		hclge_mac_stop_phy(hdev);
5483 		return;
5484 	}
5485 
5486 	for (i = 0; i < handle->kinfo.num_tqps; i++)
5487 		hclge_reset_tqp(handle, i);
5488 
5489 	/* Mac disable */
5490 	hclge_cfg_mac_mode(hdev, false);
5491 
5492 	hclge_mac_stop_phy(hdev);
5493 
5494 	/* reset tqp stats */
5495 	hclge_reset_tqp_stats(handle);
5496 	hclge_update_link_status(hdev);
5497 }
5498 
5499 int hclge_vport_start(struct hclge_vport *vport)
5500 {
5501 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5502 	vport->last_active_jiffies = jiffies;
5503 	return 0;
5504 }
5505 
5506 void hclge_vport_stop(struct hclge_vport *vport)
5507 {
5508 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5509 }
5510 
5511 static int hclge_client_start(struct hnae3_handle *handle)
5512 {
5513 	struct hclge_vport *vport = hclge_get_vport(handle);
5514 
5515 	return hclge_vport_start(vport);
5516 }
5517 
5518 static void hclge_client_stop(struct hnae3_handle *handle)
5519 {
5520 	struct hclge_vport *vport = hclge_get_vport(handle);
5521 
5522 	hclge_vport_stop(vport);
5523 }
5524 
5525 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5526 					 u16 cmdq_resp, u8  resp_code,
5527 					 enum hclge_mac_vlan_tbl_opcode op)
5528 {
5529 	struct hclge_dev *hdev = vport->back;
5530 	int return_status = -EIO;
5531 
5532 	if (cmdq_resp) {
5533 		dev_err(&hdev->pdev->dev,
5534 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5535 			cmdq_resp);
5536 		return -EIO;
5537 	}
5538 
5539 	if (op == HCLGE_MAC_VLAN_ADD) {
5540 		if ((!resp_code) || (resp_code == 1)) {
5541 			return_status = 0;
5542 		} else if (resp_code == 2) {
5543 			return_status = -ENOSPC;
5544 			dev_err(&hdev->pdev->dev,
5545 				"add mac addr failed for uc_overflow.\n");
5546 		} else if (resp_code == 3) {
5547 			return_status = -ENOSPC;
5548 			dev_err(&hdev->pdev->dev,
5549 				"add mac addr failed for mc_overflow.\n");
5550 		} else {
5551 			dev_err(&hdev->pdev->dev,
5552 				"add mac addr failed for undefined, code=%d.\n",
5553 				resp_code);
5554 		}
5555 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
5556 		if (!resp_code) {
5557 			return_status = 0;
5558 		} else if (resp_code == 1) {
5559 			return_status = -ENOENT;
5560 			dev_dbg(&hdev->pdev->dev,
5561 				"remove mac addr failed for miss.\n");
5562 		} else {
5563 			dev_err(&hdev->pdev->dev,
5564 				"remove mac addr failed for undefined, code=%d.\n",
5565 				resp_code);
5566 		}
5567 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
5568 		if (!resp_code) {
5569 			return_status = 0;
5570 		} else if (resp_code == 1) {
5571 			return_status = -ENOENT;
5572 			dev_dbg(&hdev->pdev->dev,
5573 				"lookup mac addr failed for miss.\n");
5574 		} else {
5575 			dev_err(&hdev->pdev->dev,
5576 				"lookup mac addr failed for undefined, code=%d.\n",
5577 				resp_code);
5578 		}
5579 	} else {
5580 		return_status = -EINVAL;
5581 		dev_err(&hdev->pdev->dev,
5582 			"unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5583 			op);
5584 	}
5585 
5586 	return return_status;
5587 }
5588 
5589 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5590 {
5591 	int word_num;
5592 	int bit_num;
5593 
5594 	if (vfid > 255 || vfid < 0)
5595 		return -EIO;
5596 
5597 	if (vfid >= 0 && vfid <= 191) {
5598 		word_num = vfid / 32;
5599 		bit_num  = vfid % 32;
5600 		if (clr)
5601 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5602 		else
5603 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5604 	} else {
5605 		word_num = (vfid - 192) / 32;
5606 		bit_num  = vfid % 32;
5607 		if (clr)
5608 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5609 		else
5610 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5611 	}
5612 
5613 	return 0;
5614 }
5615 
5616 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5617 {
5618 #define HCLGE_DESC_NUMBER 3
5619 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5620 	int i, j;
5621 
5622 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5623 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5624 			if (desc[i].data[j])
5625 				return false;
5626 
5627 	return true;
5628 }
5629 
5630 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5631 				   const u8 *addr, bool is_mc)
5632 {
5633 	const unsigned char *mac_addr = addr;
5634 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5635 		       (mac_addr[0]) | (mac_addr[1] << 8);
5636 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
5637 
5638 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5639 	if (is_mc) {
5640 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5641 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5642 	}
5643 
5644 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5645 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5646 }
5647 
5648 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5649 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
5650 {
5651 	struct hclge_dev *hdev = vport->back;
5652 	struct hclge_desc desc;
5653 	u8 resp_code;
5654 	u16 retval;
5655 	int ret;
5656 
5657 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5658 
5659 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5660 
5661 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5662 	if (ret) {
5663 		dev_err(&hdev->pdev->dev,
5664 			"del mac addr failed for cmd_send, ret =%d.\n",
5665 			ret);
5666 		return ret;
5667 	}
5668 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5669 	retval = le16_to_cpu(desc.retval);
5670 
5671 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5672 					     HCLGE_MAC_VLAN_REMOVE);
5673 }
5674 
5675 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5676 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
5677 				     struct hclge_desc *desc,
5678 				     bool is_mc)
5679 {
5680 	struct hclge_dev *hdev = vport->back;
5681 	u8 resp_code;
5682 	u16 retval;
5683 	int ret;
5684 
5685 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5686 	if (is_mc) {
5687 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5688 		memcpy(desc[0].data,
5689 		       req,
5690 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5691 		hclge_cmd_setup_basic_desc(&desc[1],
5692 					   HCLGE_OPC_MAC_VLAN_ADD,
5693 					   true);
5694 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5695 		hclge_cmd_setup_basic_desc(&desc[2],
5696 					   HCLGE_OPC_MAC_VLAN_ADD,
5697 					   true);
5698 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
5699 	} else {
5700 		memcpy(desc[0].data,
5701 		       req,
5702 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5703 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
5704 	}
5705 	if (ret) {
5706 		dev_err(&hdev->pdev->dev,
5707 			"lookup mac addr failed for cmd_send, ret =%d.\n",
5708 			ret);
5709 		return ret;
5710 	}
5711 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5712 	retval = le16_to_cpu(desc[0].retval);
5713 
5714 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5715 					     HCLGE_MAC_VLAN_LKUP);
5716 }
5717 
5718 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5719 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
5720 				  struct hclge_desc *mc_desc)
5721 {
5722 	struct hclge_dev *hdev = vport->back;
5723 	int cfg_status;
5724 	u8 resp_code;
5725 	u16 retval;
5726 	int ret;
5727 
5728 	if (!mc_desc) {
5729 		struct hclge_desc desc;
5730 
5731 		hclge_cmd_setup_basic_desc(&desc,
5732 					   HCLGE_OPC_MAC_VLAN_ADD,
5733 					   false);
5734 		memcpy(desc.data, req,
5735 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5736 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5737 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5738 		retval = le16_to_cpu(desc.retval);
5739 
5740 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5741 							   resp_code,
5742 							   HCLGE_MAC_VLAN_ADD);
5743 	} else {
5744 		hclge_cmd_reuse_desc(&mc_desc[0], false);
5745 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5746 		hclge_cmd_reuse_desc(&mc_desc[1], false);
5747 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5748 		hclge_cmd_reuse_desc(&mc_desc[2], false);
5749 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5750 		memcpy(mc_desc[0].data, req,
5751 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5752 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5753 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5754 		retval = le16_to_cpu(mc_desc[0].retval);
5755 
5756 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5757 							   resp_code,
5758 							   HCLGE_MAC_VLAN_ADD);
5759 	}
5760 
5761 	if (ret) {
5762 		dev_err(&hdev->pdev->dev,
5763 			"add mac addr failed for cmd_send, ret =%d.\n",
5764 			ret);
5765 		return ret;
5766 	}
5767 
5768 	return cfg_status;
5769 }
5770 
5771 static int hclge_init_umv_space(struct hclge_dev *hdev)
5772 {
5773 	u16 allocated_size = 0;
5774 	int ret;
5775 
5776 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5777 				  true);
5778 	if (ret)
5779 		return ret;
5780 
5781 	if (allocated_size < hdev->wanted_umv_size)
5782 		dev_warn(&hdev->pdev->dev,
5783 			 "Alloc umv space failed, want %d, get %d\n",
5784 			 hdev->wanted_umv_size, allocated_size);
5785 
5786 	mutex_init(&hdev->umv_mutex);
5787 	hdev->max_umv_size = allocated_size;
5788 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5789 	hdev->share_umv_size = hdev->priv_umv_size +
5790 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
5791 
5792 	return 0;
5793 }
5794 
5795 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5796 {
5797 	int ret;
5798 
5799 	if (hdev->max_umv_size > 0) {
5800 		ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5801 					  false);
5802 		if (ret)
5803 			return ret;
5804 		hdev->max_umv_size = 0;
5805 	}
5806 	mutex_destroy(&hdev->umv_mutex);
5807 
5808 	return 0;
5809 }
5810 
5811 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5812 			       u16 *allocated_size, bool is_alloc)
5813 {
5814 	struct hclge_umv_spc_alc_cmd *req;
5815 	struct hclge_desc desc;
5816 	int ret;
5817 
5818 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5819 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5820 	hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5821 	req->space_size = cpu_to_le32(space_size);
5822 
5823 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5824 	if (ret) {
5825 		dev_err(&hdev->pdev->dev,
5826 			"%s umv space failed for cmd_send, ret =%d\n",
5827 			is_alloc ? "allocate" : "free", ret);
5828 		return ret;
5829 	}
5830 
5831 	if (is_alloc && allocated_size)
5832 		*allocated_size = le32_to_cpu(desc.data[1]);
5833 
5834 	return 0;
5835 }
5836 
5837 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5838 {
5839 	struct hclge_vport *vport;
5840 	int i;
5841 
5842 	for (i = 0; i < hdev->num_alloc_vport; i++) {
5843 		vport = &hdev->vport[i];
5844 		vport->used_umv_num = 0;
5845 	}
5846 
5847 	mutex_lock(&hdev->umv_mutex);
5848 	hdev->share_umv_size = hdev->priv_umv_size +
5849 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
5850 	mutex_unlock(&hdev->umv_mutex);
5851 }
5852 
5853 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5854 {
5855 	struct hclge_dev *hdev = vport->back;
5856 	bool is_full;
5857 
5858 	mutex_lock(&hdev->umv_mutex);
5859 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5860 		   hdev->share_umv_size == 0);
5861 	mutex_unlock(&hdev->umv_mutex);
5862 
5863 	return is_full;
5864 }
5865 
5866 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5867 {
5868 	struct hclge_dev *hdev = vport->back;
5869 
5870 	mutex_lock(&hdev->umv_mutex);
5871 	if (is_free) {
5872 		if (vport->used_umv_num > hdev->priv_umv_size)
5873 			hdev->share_umv_size++;
5874 
5875 		if (vport->used_umv_num > 0)
5876 			vport->used_umv_num--;
5877 	} else {
5878 		if (vport->used_umv_num >= hdev->priv_umv_size &&
5879 		    hdev->share_umv_size > 0)
5880 			hdev->share_umv_size--;
5881 		vport->used_umv_num++;
5882 	}
5883 	mutex_unlock(&hdev->umv_mutex);
5884 }
5885 
5886 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5887 			     const unsigned char *addr)
5888 {
5889 	struct hclge_vport *vport = hclge_get_vport(handle);
5890 
5891 	return hclge_add_uc_addr_common(vport, addr);
5892 }
5893 
5894 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5895 			     const unsigned char *addr)
5896 {
5897 	struct hclge_dev *hdev = vport->back;
5898 	struct hclge_mac_vlan_tbl_entry_cmd req;
5899 	struct hclge_desc desc;
5900 	u16 egress_port = 0;
5901 	int ret;
5902 
5903 	/* mac addr check */
5904 	if (is_zero_ether_addr(addr) ||
5905 	    is_broadcast_ether_addr(addr) ||
5906 	    is_multicast_ether_addr(addr)) {
5907 		dev_err(&hdev->pdev->dev,
5908 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5909 			 addr,
5910 			 is_zero_ether_addr(addr),
5911 			 is_broadcast_ether_addr(addr),
5912 			 is_multicast_ether_addr(addr));
5913 		return -EINVAL;
5914 	}
5915 
5916 	memset(&req, 0, sizeof(req));
5917 
5918 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5919 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5920 
5921 	req.egress_port = cpu_to_le16(egress_port);
5922 
5923 	hclge_prepare_mac_addr(&req, addr, false);
5924 
5925 	/* Lookup the mac address in the mac_vlan table, and add
5926 	 * it if the entry is inexistent. Repeated unicast entry
5927 	 * is not allowed in the mac vlan table.
5928 	 */
5929 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5930 	if (ret == -ENOENT) {
5931 		if (!hclge_is_umv_space_full(vport)) {
5932 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5933 			if (!ret)
5934 				hclge_update_umv_space(vport, false);
5935 			return ret;
5936 		}
5937 
5938 		dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5939 			hdev->priv_umv_size);
5940 
5941 		return -ENOSPC;
5942 	}
5943 
5944 	/* check if we just hit the duplicate */
5945 	if (!ret)
5946 		ret = -EINVAL;
5947 
5948 	dev_err(&hdev->pdev->dev,
5949 		"PF failed to add unicast entry(%pM) in the MAC table\n",
5950 		addr);
5951 
5952 	return ret;
5953 }
5954 
5955 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5956 			    const unsigned char *addr)
5957 {
5958 	struct hclge_vport *vport = hclge_get_vport(handle);
5959 
5960 	return hclge_rm_uc_addr_common(vport, addr);
5961 }
5962 
5963 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
5964 			    const unsigned char *addr)
5965 {
5966 	struct hclge_dev *hdev = vport->back;
5967 	struct hclge_mac_vlan_tbl_entry_cmd req;
5968 	int ret;
5969 
5970 	/* mac addr check */
5971 	if (is_zero_ether_addr(addr) ||
5972 	    is_broadcast_ether_addr(addr) ||
5973 	    is_multicast_ether_addr(addr)) {
5974 		dev_dbg(&hdev->pdev->dev,
5975 			"Remove mac err! invalid mac:%pM.\n",
5976 			 addr);
5977 		return -EINVAL;
5978 	}
5979 
5980 	memset(&req, 0, sizeof(req));
5981 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5982 	hclge_prepare_mac_addr(&req, addr, false);
5983 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
5984 	if (!ret)
5985 		hclge_update_umv_space(vport, true);
5986 
5987 	return ret;
5988 }
5989 
5990 static int hclge_add_mc_addr(struct hnae3_handle *handle,
5991 			     const unsigned char *addr)
5992 {
5993 	struct hclge_vport *vport = hclge_get_vport(handle);
5994 
5995 	return hclge_add_mc_addr_common(vport, addr);
5996 }
5997 
5998 int hclge_add_mc_addr_common(struct hclge_vport *vport,
5999 			     const unsigned char *addr)
6000 {
6001 	struct hclge_dev *hdev = vport->back;
6002 	struct hclge_mac_vlan_tbl_entry_cmd req;
6003 	struct hclge_desc desc[3];
6004 	int status;
6005 
6006 	/* mac addr check */
6007 	if (!is_multicast_ether_addr(addr)) {
6008 		dev_err(&hdev->pdev->dev,
6009 			"Add mc mac err! invalid mac:%pM.\n",
6010 			 addr);
6011 		return -EINVAL;
6012 	}
6013 	memset(&req, 0, sizeof(req));
6014 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6015 	hclge_prepare_mac_addr(&req, addr, true);
6016 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6017 	if (!status) {
6018 		/* This mac addr exist, update VFID for it */
6019 		hclge_update_desc_vfid(desc, vport->vport_id, false);
6020 		status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6021 	} else {
6022 		/* This mac addr do not exist, add new entry for it */
6023 		memset(desc[0].data, 0, sizeof(desc[0].data));
6024 		memset(desc[1].data, 0, sizeof(desc[0].data));
6025 		memset(desc[2].data, 0, sizeof(desc[0].data));
6026 		hclge_update_desc_vfid(desc, vport->vport_id, false);
6027 		status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6028 	}
6029 
6030 	if (status == -ENOSPC)
6031 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6032 
6033 	return status;
6034 }
6035 
6036 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6037 			    const unsigned char *addr)
6038 {
6039 	struct hclge_vport *vport = hclge_get_vport(handle);
6040 
6041 	return hclge_rm_mc_addr_common(vport, addr);
6042 }
6043 
6044 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6045 			    const unsigned char *addr)
6046 {
6047 	struct hclge_dev *hdev = vport->back;
6048 	struct hclge_mac_vlan_tbl_entry_cmd req;
6049 	enum hclge_cmd_status status;
6050 	struct hclge_desc desc[3];
6051 
6052 	/* mac addr check */
6053 	if (!is_multicast_ether_addr(addr)) {
6054 		dev_dbg(&hdev->pdev->dev,
6055 			"Remove mc mac err! invalid mac:%pM.\n",
6056 			 addr);
6057 		return -EINVAL;
6058 	}
6059 
6060 	memset(&req, 0, sizeof(req));
6061 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6062 	hclge_prepare_mac_addr(&req, addr, true);
6063 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6064 	if (!status) {
6065 		/* This mac addr exist, remove this handle's VFID for it */
6066 		hclge_update_desc_vfid(desc, vport->vport_id, true);
6067 
6068 		if (hclge_is_all_function_id_zero(desc))
6069 			/* All the vfid is zero, so need to delete this entry */
6070 			status = hclge_remove_mac_vlan_tbl(vport, &req);
6071 		else
6072 			/* Not all the vfid is zero, update the vfid */
6073 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6074 
6075 	} else {
6076 		/* Maybe this mac address is in mta table, but it cannot be
6077 		 * deleted here because an entry of mta represents an address
6078 		 * range rather than a specific address. the delete action to
6079 		 * all entries will take effect in update_mta_status called by
6080 		 * hns3_nic_set_rx_mode.
6081 		 */
6082 		status = 0;
6083 	}
6084 
6085 	return status;
6086 }
6087 
6088 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6089 			       enum HCLGE_MAC_ADDR_TYPE mac_type)
6090 {
6091 	struct hclge_vport_mac_addr_cfg *mac_cfg;
6092 	struct list_head *list;
6093 
6094 	if (!vport->vport_id)
6095 		return;
6096 
6097 	mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6098 	if (!mac_cfg)
6099 		return;
6100 
6101 	mac_cfg->hd_tbl_status = true;
6102 	memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6103 
6104 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6105 	       &vport->uc_mac_list : &vport->mc_mac_list;
6106 
6107 	list_add_tail(&mac_cfg->node, list);
6108 }
6109 
6110 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6111 			      bool is_write_tbl,
6112 			      enum HCLGE_MAC_ADDR_TYPE mac_type)
6113 {
6114 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6115 	struct list_head *list;
6116 	bool uc_flag, mc_flag;
6117 
6118 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6119 	       &vport->uc_mac_list : &vport->mc_mac_list;
6120 
6121 	uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6122 	mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6123 
6124 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6125 		if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6126 			if (uc_flag && mac_cfg->hd_tbl_status)
6127 				hclge_rm_uc_addr_common(vport, mac_addr);
6128 
6129 			if (mc_flag && mac_cfg->hd_tbl_status)
6130 				hclge_rm_mc_addr_common(vport, mac_addr);
6131 
6132 			list_del(&mac_cfg->node);
6133 			kfree(mac_cfg);
6134 			break;
6135 		}
6136 	}
6137 }
6138 
6139 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6140 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
6141 {
6142 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6143 	struct list_head *list;
6144 
6145 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6146 	       &vport->uc_mac_list : &vport->mc_mac_list;
6147 
6148 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6149 		if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6150 			hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6151 
6152 		if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6153 			hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6154 
6155 		mac_cfg->hd_tbl_status = false;
6156 		if (is_del_list) {
6157 			list_del(&mac_cfg->node);
6158 			kfree(mac_cfg);
6159 		}
6160 	}
6161 }
6162 
6163 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6164 {
6165 	struct hclge_vport_mac_addr_cfg *mac, *tmp;
6166 	struct hclge_vport *vport;
6167 	int i;
6168 
6169 	mutex_lock(&hdev->vport_cfg_mutex);
6170 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6171 		vport = &hdev->vport[i];
6172 		list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6173 			list_del(&mac->node);
6174 			kfree(mac);
6175 		}
6176 
6177 		list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6178 			list_del(&mac->node);
6179 			kfree(mac);
6180 		}
6181 	}
6182 	mutex_unlock(&hdev->vport_cfg_mutex);
6183 }
6184 
6185 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6186 					      u16 cmdq_resp, u8 resp_code)
6187 {
6188 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
6189 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
6190 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
6191 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
6192 
6193 	int return_status;
6194 
6195 	if (cmdq_resp) {
6196 		dev_err(&hdev->pdev->dev,
6197 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6198 			cmdq_resp);
6199 		return -EIO;
6200 	}
6201 
6202 	switch (resp_code) {
6203 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
6204 	case HCLGE_ETHERTYPE_ALREADY_ADD:
6205 		return_status = 0;
6206 		break;
6207 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6208 		dev_err(&hdev->pdev->dev,
6209 			"add mac ethertype failed for manager table overflow.\n");
6210 		return_status = -EIO;
6211 		break;
6212 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
6213 		dev_err(&hdev->pdev->dev,
6214 			"add mac ethertype failed for key conflict.\n");
6215 		return_status = -EIO;
6216 		break;
6217 	default:
6218 		dev_err(&hdev->pdev->dev,
6219 			"add mac ethertype failed for undefined, code=%d.\n",
6220 			resp_code);
6221 		return_status = -EIO;
6222 	}
6223 
6224 	return return_status;
6225 }
6226 
6227 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6228 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
6229 {
6230 	struct hclge_desc desc;
6231 	u8 resp_code;
6232 	u16 retval;
6233 	int ret;
6234 
6235 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6236 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6237 
6238 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6239 	if (ret) {
6240 		dev_err(&hdev->pdev->dev,
6241 			"add mac ethertype failed for cmd_send, ret =%d.\n",
6242 			ret);
6243 		return ret;
6244 	}
6245 
6246 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6247 	retval = le16_to_cpu(desc.retval);
6248 
6249 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6250 }
6251 
6252 static int init_mgr_tbl(struct hclge_dev *hdev)
6253 {
6254 	int ret;
6255 	int i;
6256 
6257 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6258 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6259 		if (ret) {
6260 			dev_err(&hdev->pdev->dev,
6261 				"add mac ethertype failed, ret =%d.\n",
6262 				ret);
6263 			return ret;
6264 		}
6265 	}
6266 
6267 	return 0;
6268 }
6269 
6270 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6271 {
6272 	struct hclge_vport *vport = hclge_get_vport(handle);
6273 	struct hclge_dev *hdev = vport->back;
6274 
6275 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
6276 }
6277 
6278 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6279 			      bool is_first)
6280 {
6281 	const unsigned char *new_addr = (const unsigned char *)p;
6282 	struct hclge_vport *vport = hclge_get_vport(handle);
6283 	struct hclge_dev *hdev = vport->back;
6284 	int ret;
6285 
6286 	/* mac addr check */
6287 	if (is_zero_ether_addr(new_addr) ||
6288 	    is_broadcast_ether_addr(new_addr) ||
6289 	    is_multicast_ether_addr(new_addr)) {
6290 		dev_err(&hdev->pdev->dev,
6291 			"Change uc mac err! invalid mac:%p.\n",
6292 			 new_addr);
6293 		return -EINVAL;
6294 	}
6295 
6296 	if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6297 		dev_warn(&hdev->pdev->dev,
6298 			 "remove old uc mac address fail.\n");
6299 
6300 	ret = hclge_add_uc_addr(handle, new_addr);
6301 	if (ret) {
6302 		dev_err(&hdev->pdev->dev,
6303 			"add uc mac address fail, ret =%d.\n",
6304 			ret);
6305 
6306 		if (!is_first &&
6307 		    hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6308 			dev_err(&hdev->pdev->dev,
6309 				"restore uc mac address fail.\n");
6310 
6311 		return -EIO;
6312 	}
6313 
6314 	ret = hclge_pause_addr_cfg(hdev, new_addr);
6315 	if (ret) {
6316 		dev_err(&hdev->pdev->dev,
6317 			"configure mac pause address fail, ret =%d.\n",
6318 			ret);
6319 		return -EIO;
6320 	}
6321 
6322 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6323 
6324 	return 0;
6325 }
6326 
6327 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6328 			  int cmd)
6329 {
6330 	struct hclge_vport *vport = hclge_get_vport(handle);
6331 	struct hclge_dev *hdev = vport->back;
6332 
6333 	if (!hdev->hw.mac.phydev)
6334 		return -EOPNOTSUPP;
6335 
6336 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6337 }
6338 
6339 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6340 				      u8 fe_type, bool filter_en, u8 vf_id)
6341 {
6342 	struct hclge_vlan_filter_ctrl_cmd *req;
6343 	struct hclge_desc desc;
6344 	int ret;
6345 
6346 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6347 
6348 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6349 	req->vlan_type = vlan_type;
6350 	req->vlan_fe = filter_en ? fe_type : 0;
6351 	req->vf_id = vf_id;
6352 
6353 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6354 	if (ret)
6355 		dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6356 			ret);
6357 
6358 	return ret;
6359 }
6360 
6361 #define HCLGE_FILTER_TYPE_VF		0
6362 #define HCLGE_FILTER_TYPE_PORT		1
6363 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
6364 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
6365 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
6366 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
6367 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
6368 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
6369 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
6370 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
6371 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
6372 
6373 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6374 {
6375 	struct hclge_vport *vport = hclge_get_vport(handle);
6376 	struct hclge_dev *hdev = vport->back;
6377 
6378 	if (hdev->pdev->revision >= 0x21) {
6379 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6380 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
6381 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6382 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
6383 	} else {
6384 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6385 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
6386 					   0);
6387 	}
6388 	if (enable)
6389 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
6390 	else
6391 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6392 }
6393 
6394 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6395 				    bool is_kill, u16 vlan, u8 qos,
6396 				    __be16 proto)
6397 {
6398 #define HCLGE_MAX_VF_BYTES  16
6399 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
6400 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
6401 	struct hclge_desc desc[2];
6402 	u8 vf_byte_val;
6403 	u8 vf_byte_off;
6404 	int ret;
6405 
6406 	hclge_cmd_setup_basic_desc(&desc[0],
6407 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6408 	hclge_cmd_setup_basic_desc(&desc[1],
6409 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6410 
6411 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6412 
6413 	vf_byte_off = vfid / 8;
6414 	vf_byte_val = 1 << (vfid % 8);
6415 
6416 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6417 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6418 
6419 	req0->vlan_id  = cpu_to_le16(vlan);
6420 	req0->vlan_cfg = is_kill;
6421 
6422 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6423 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6424 	else
6425 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6426 
6427 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
6428 	if (ret) {
6429 		dev_err(&hdev->pdev->dev,
6430 			"Send vf vlan command fail, ret =%d.\n",
6431 			ret);
6432 		return ret;
6433 	}
6434 
6435 	if (!is_kill) {
6436 #define HCLGE_VF_VLAN_NO_ENTRY	2
6437 		if (!req0->resp_code || req0->resp_code == 1)
6438 			return 0;
6439 
6440 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6441 			dev_warn(&hdev->pdev->dev,
6442 				 "vf vlan table is full, vf vlan filter is disabled\n");
6443 			return 0;
6444 		}
6445 
6446 		dev_err(&hdev->pdev->dev,
6447 			"Add vf vlan filter fail, ret =%d.\n",
6448 			req0->resp_code);
6449 	} else {
6450 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
6451 		if (!req0->resp_code)
6452 			return 0;
6453 
6454 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6455 			dev_warn(&hdev->pdev->dev,
6456 				 "vlan %d filter is not in vf vlan table\n",
6457 				 vlan);
6458 			return 0;
6459 		}
6460 
6461 		dev_err(&hdev->pdev->dev,
6462 			"Kill vf vlan filter fail, ret =%d.\n",
6463 			req0->resp_code);
6464 	}
6465 
6466 	return -EIO;
6467 }
6468 
6469 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6470 				      u16 vlan_id, bool is_kill)
6471 {
6472 	struct hclge_vlan_filter_pf_cfg_cmd *req;
6473 	struct hclge_desc desc;
6474 	u8 vlan_offset_byte_val;
6475 	u8 vlan_offset_byte;
6476 	u8 vlan_offset_160;
6477 	int ret;
6478 
6479 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6480 
6481 	vlan_offset_160 = vlan_id / 160;
6482 	vlan_offset_byte = (vlan_id % 160) / 8;
6483 	vlan_offset_byte_val = 1 << (vlan_id % 8);
6484 
6485 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6486 	req->vlan_offset = vlan_offset_160;
6487 	req->vlan_cfg = is_kill;
6488 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6489 
6490 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6491 	if (ret)
6492 		dev_err(&hdev->pdev->dev,
6493 			"port vlan command, send fail, ret =%d.\n", ret);
6494 	return ret;
6495 }
6496 
6497 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6498 				    u16 vport_id, u16 vlan_id, u8 qos,
6499 				    bool is_kill)
6500 {
6501 	u16 vport_idx, vport_num = 0;
6502 	int ret;
6503 
6504 	if (is_kill && !vlan_id)
6505 		return 0;
6506 
6507 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6508 				       0, proto);
6509 	if (ret) {
6510 		dev_err(&hdev->pdev->dev,
6511 			"Set %d vport vlan filter config fail, ret =%d.\n",
6512 			vport_id, ret);
6513 		return ret;
6514 	}
6515 
6516 	/* vlan 0 may be added twice when 8021q module is enabled */
6517 	if (!is_kill && !vlan_id &&
6518 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
6519 		return 0;
6520 
6521 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6522 		dev_err(&hdev->pdev->dev,
6523 			"Add port vlan failed, vport %d is already in vlan %d\n",
6524 			vport_id, vlan_id);
6525 		return -EINVAL;
6526 	}
6527 
6528 	if (is_kill &&
6529 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6530 		dev_err(&hdev->pdev->dev,
6531 			"Delete port vlan failed, vport %d is not in vlan %d\n",
6532 			vport_id, vlan_id);
6533 		return -EINVAL;
6534 	}
6535 
6536 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6537 		vport_num++;
6538 
6539 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6540 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6541 						 is_kill);
6542 
6543 	return ret;
6544 }
6545 
6546 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
6547 			  u16 vlan_id, bool is_kill)
6548 {
6549 	struct hclge_vport *vport = hclge_get_vport(handle);
6550 	struct hclge_dev *hdev = vport->back;
6551 
6552 	return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
6553 					0, is_kill);
6554 }
6555 
6556 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
6557 				    u16 vlan, u8 qos, __be16 proto)
6558 {
6559 	struct hclge_vport *vport = hclge_get_vport(handle);
6560 	struct hclge_dev *hdev = vport->back;
6561 
6562 	if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
6563 		return -EINVAL;
6564 	if (proto != htons(ETH_P_8021Q))
6565 		return -EPROTONOSUPPORT;
6566 
6567 	return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
6568 }
6569 
6570 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6571 {
6572 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6573 	struct hclge_vport_vtag_tx_cfg_cmd *req;
6574 	struct hclge_dev *hdev = vport->back;
6575 	struct hclge_desc desc;
6576 	int status;
6577 
6578 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6579 
6580 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6581 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6582 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6583 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6584 		      vcfg->accept_tag1 ? 1 : 0);
6585 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6586 		      vcfg->accept_untag1 ? 1 : 0);
6587 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6588 		      vcfg->accept_tag2 ? 1 : 0);
6589 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6590 		      vcfg->accept_untag2 ? 1 : 0);
6591 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6592 		      vcfg->insert_tag1_en ? 1 : 0);
6593 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6594 		      vcfg->insert_tag2_en ? 1 : 0);
6595 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6596 
6597 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6598 	req->vf_bitmap[req->vf_offset] =
6599 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6600 
6601 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6602 	if (status)
6603 		dev_err(&hdev->pdev->dev,
6604 			"Send port txvlan cfg command fail, ret =%d\n",
6605 			status);
6606 
6607 	return status;
6608 }
6609 
6610 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6611 {
6612 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6613 	struct hclge_vport_vtag_rx_cfg_cmd *req;
6614 	struct hclge_dev *hdev = vport->back;
6615 	struct hclge_desc desc;
6616 	int status;
6617 
6618 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6619 
6620 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6621 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6622 		      vcfg->strip_tag1_en ? 1 : 0);
6623 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6624 		      vcfg->strip_tag2_en ? 1 : 0);
6625 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6626 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
6627 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6628 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
6629 
6630 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6631 	req->vf_bitmap[req->vf_offset] =
6632 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6633 
6634 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6635 	if (status)
6636 		dev_err(&hdev->pdev->dev,
6637 			"Send port rxvlan cfg command fail, ret =%d\n",
6638 			status);
6639 
6640 	return status;
6641 }
6642 
6643 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6644 {
6645 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6646 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6647 	struct hclge_desc desc;
6648 	int status;
6649 
6650 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6651 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6652 	rx_req->ot_fst_vlan_type =
6653 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6654 	rx_req->ot_sec_vlan_type =
6655 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6656 	rx_req->in_fst_vlan_type =
6657 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6658 	rx_req->in_sec_vlan_type =
6659 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6660 
6661 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6662 	if (status) {
6663 		dev_err(&hdev->pdev->dev,
6664 			"Send rxvlan protocol type command fail, ret =%d\n",
6665 			status);
6666 		return status;
6667 	}
6668 
6669 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6670 
6671 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6672 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6673 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6674 
6675 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6676 	if (status)
6677 		dev_err(&hdev->pdev->dev,
6678 			"Send txvlan protocol type command fail, ret =%d\n",
6679 			status);
6680 
6681 	return status;
6682 }
6683 
6684 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6685 {
6686 #define HCLGE_DEF_VLAN_TYPE		0x8100
6687 
6688 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6689 	struct hclge_vport *vport;
6690 	int ret;
6691 	int i;
6692 
6693 	if (hdev->pdev->revision >= 0x21) {
6694 		/* for revision 0x21, vf vlan filter is per function */
6695 		for (i = 0; i < hdev->num_alloc_vport; i++) {
6696 			vport = &hdev->vport[i];
6697 			ret = hclge_set_vlan_filter_ctrl(hdev,
6698 							 HCLGE_FILTER_TYPE_VF,
6699 							 HCLGE_FILTER_FE_EGRESS,
6700 							 true,
6701 							 vport->vport_id);
6702 			if (ret)
6703 				return ret;
6704 		}
6705 
6706 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6707 						 HCLGE_FILTER_FE_INGRESS, true,
6708 						 0);
6709 		if (ret)
6710 			return ret;
6711 	} else {
6712 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6713 						 HCLGE_FILTER_FE_EGRESS_V1_B,
6714 						 true, 0);
6715 		if (ret)
6716 			return ret;
6717 	}
6718 
6719 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
6720 
6721 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6722 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6723 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6724 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6725 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6726 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6727 
6728 	ret = hclge_set_vlan_protocol_type(hdev);
6729 	if (ret)
6730 		return ret;
6731 
6732 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6733 		vport = &hdev->vport[i];
6734 		vport->txvlan_cfg.accept_tag1 = true;
6735 		vport->txvlan_cfg.accept_untag1 = true;
6736 
6737 		/* accept_tag2 and accept_untag2 are not supported on
6738 		 * pdev revision(0x20), new revision support them. The
6739 		 * value of this two fields will not return error when driver
6740 		 * send command to fireware in revision(0x20).
6741 		 * This two fields can not configured by user.
6742 		 */
6743 		vport->txvlan_cfg.accept_tag2 = true;
6744 		vport->txvlan_cfg.accept_untag2 = true;
6745 
6746 		vport->txvlan_cfg.insert_tag1_en = false;
6747 		vport->txvlan_cfg.insert_tag2_en = false;
6748 		vport->txvlan_cfg.default_tag1 = 0;
6749 		vport->txvlan_cfg.default_tag2 = 0;
6750 
6751 		ret = hclge_set_vlan_tx_offload_cfg(vport);
6752 		if (ret)
6753 			return ret;
6754 
6755 		vport->rxvlan_cfg.strip_tag1_en = false;
6756 		vport->rxvlan_cfg.strip_tag2_en = true;
6757 		vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6758 		vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6759 
6760 		ret = hclge_set_vlan_rx_offload_cfg(vport);
6761 		if (ret)
6762 			return ret;
6763 	}
6764 
6765 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6766 }
6767 
6768 void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id)
6769 {
6770 	struct hclge_vport_vlan_cfg *vlan;
6771 
6772 	/* vlan 0 is reserved */
6773 	if (!vlan_id)
6774 		return;
6775 
6776 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
6777 	if (!vlan)
6778 		return;
6779 
6780 	vlan->hd_tbl_status = true;
6781 	vlan->vlan_id = vlan_id;
6782 
6783 	list_add_tail(&vlan->node, &vport->vlan_list);
6784 }
6785 
6786 void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
6787 			       bool is_write_tbl)
6788 {
6789 	struct hclge_vport_vlan_cfg *vlan, *tmp;
6790 	struct hclge_dev *hdev = vport->back;
6791 
6792 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6793 		if (vlan->vlan_id == vlan_id) {
6794 			if (is_write_tbl && vlan->hd_tbl_status)
6795 				hclge_set_vlan_filter_hw(hdev,
6796 							 htons(ETH_P_8021Q),
6797 							 vport->vport_id,
6798 							 vlan_id, 0,
6799 							 true);
6800 
6801 			list_del(&vlan->node);
6802 			kfree(vlan);
6803 			break;
6804 		}
6805 	}
6806 }
6807 
6808 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
6809 {
6810 	struct hclge_vport_vlan_cfg *vlan, *tmp;
6811 	struct hclge_dev *hdev = vport->back;
6812 
6813 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6814 		if (vlan->hd_tbl_status)
6815 			hclge_set_vlan_filter_hw(hdev,
6816 						 htons(ETH_P_8021Q),
6817 						 vport->vport_id,
6818 						 vlan->vlan_id, 0,
6819 						 true);
6820 
6821 		vlan->hd_tbl_status = false;
6822 		if (is_del_list) {
6823 			list_del(&vlan->node);
6824 			kfree(vlan);
6825 		}
6826 	}
6827 }
6828 
6829 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
6830 {
6831 	struct hclge_vport_vlan_cfg *vlan, *tmp;
6832 	struct hclge_vport *vport;
6833 	int i;
6834 
6835 	mutex_lock(&hdev->vport_cfg_mutex);
6836 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6837 		vport = &hdev->vport[i];
6838 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6839 			list_del(&vlan->node);
6840 			kfree(vlan);
6841 		}
6842 	}
6843 	mutex_unlock(&hdev->vport_cfg_mutex);
6844 }
6845 
6846 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6847 {
6848 	struct hclge_vport *vport = hclge_get_vport(handle);
6849 
6850 	vport->rxvlan_cfg.strip_tag1_en = false;
6851 	vport->rxvlan_cfg.strip_tag2_en = enable;
6852 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6853 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6854 
6855 	return hclge_set_vlan_rx_offload_cfg(vport);
6856 }
6857 
6858 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
6859 {
6860 	struct hclge_config_max_frm_size_cmd *req;
6861 	struct hclge_desc desc;
6862 
6863 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
6864 
6865 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
6866 	req->max_frm_size = cpu_to_le16(new_mps);
6867 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
6868 
6869 	return hclge_cmd_send(&hdev->hw, &desc, 1);
6870 }
6871 
6872 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
6873 {
6874 	struct hclge_vport *vport = hclge_get_vport(handle);
6875 
6876 	return hclge_set_vport_mtu(vport, new_mtu);
6877 }
6878 
6879 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
6880 {
6881 	struct hclge_dev *hdev = vport->back;
6882 	int i, max_frm_size, ret = 0;
6883 
6884 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
6885 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
6886 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
6887 		return -EINVAL;
6888 
6889 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
6890 	mutex_lock(&hdev->vport_lock);
6891 	/* VF's mps must fit within hdev->mps */
6892 	if (vport->vport_id && max_frm_size > hdev->mps) {
6893 		mutex_unlock(&hdev->vport_lock);
6894 		return -EINVAL;
6895 	} else if (vport->vport_id) {
6896 		vport->mps = max_frm_size;
6897 		mutex_unlock(&hdev->vport_lock);
6898 		return 0;
6899 	}
6900 
6901 	/* PF's mps must be greater then VF's mps */
6902 	for (i = 1; i < hdev->num_alloc_vport; i++)
6903 		if (max_frm_size < hdev->vport[i].mps) {
6904 			mutex_unlock(&hdev->vport_lock);
6905 			return -EINVAL;
6906 		}
6907 
6908 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
6909 
6910 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
6911 	if (ret) {
6912 		dev_err(&hdev->pdev->dev,
6913 			"Change mtu fail, ret =%d\n", ret);
6914 		goto out;
6915 	}
6916 
6917 	hdev->mps = max_frm_size;
6918 	vport->mps = max_frm_size;
6919 
6920 	ret = hclge_buffer_alloc(hdev);
6921 	if (ret)
6922 		dev_err(&hdev->pdev->dev,
6923 			"Allocate buffer fail, ret =%d\n", ret);
6924 
6925 out:
6926 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6927 	mutex_unlock(&hdev->vport_lock);
6928 	return ret;
6929 }
6930 
6931 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
6932 				    bool enable)
6933 {
6934 	struct hclge_reset_tqp_queue_cmd *req;
6935 	struct hclge_desc desc;
6936 	int ret;
6937 
6938 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
6939 
6940 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6941 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6942 	hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
6943 
6944 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6945 	if (ret) {
6946 		dev_err(&hdev->pdev->dev,
6947 			"Send tqp reset cmd error, status =%d\n", ret);
6948 		return ret;
6949 	}
6950 
6951 	return 0;
6952 }
6953 
6954 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
6955 {
6956 	struct hclge_reset_tqp_queue_cmd *req;
6957 	struct hclge_desc desc;
6958 	int ret;
6959 
6960 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
6961 
6962 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6963 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6964 
6965 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6966 	if (ret) {
6967 		dev_err(&hdev->pdev->dev,
6968 			"Get reset status error, status =%d\n", ret);
6969 		return ret;
6970 	}
6971 
6972 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
6973 }
6974 
6975 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
6976 {
6977 	struct hnae3_queue *queue;
6978 	struct hclge_tqp *tqp;
6979 
6980 	queue = handle->kinfo.tqp[queue_id];
6981 	tqp = container_of(queue, struct hclge_tqp, q);
6982 
6983 	return tqp->index;
6984 }
6985 
6986 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
6987 {
6988 	struct hclge_vport *vport = hclge_get_vport(handle);
6989 	struct hclge_dev *hdev = vport->back;
6990 	int reset_try_times = 0;
6991 	int reset_status;
6992 	u16 queue_gid;
6993 	int ret = 0;
6994 
6995 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
6996 
6997 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
6998 	if (ret) {
6999 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7000 		return ret;
7001 	}
7002 
7003 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7004 	if (ret) {
7005 		dev_err(&hdev->pdev->dev,
7006 			"Send reset tqp cmd fail, ret = %d\n", ret);
7007 		return ret;
7008 	}
7009 
7010 	reset_try_times = 0;
7011 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7012 		/* Wait for tqp hw reset */
7013 		msleep(20);
7014 		reset_status = hclge_get_reset_status(hdev, queue_gid);
7015 		if (reset_status)
7016 			break;
7017 	}
7018 
7019 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7020 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7021 		return ret;
7022 	}
7023 
7024 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7025 	if (ret)
7026 		dev_err(&hdev->pdev->dev,
7027 			"Deassert the soft reset fail, ret = %d\n", ret);
7028 
7029 	return ret;
7030 }
7031 
7032 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7033 {
7034 	struct hclge_dev *hdev = vport->back;
7035 	int reset_try_times = 0;
7036 	int reset_status;
7037 	u16 queue_gid;
7038 	int ret;
7039 
7040 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7041 
7042 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7043 	if (ret) {
7044 		dev_warn(&hdev->pdev->dev,
7045 			 "Send reset tqp cmd fail, ret = %d\n", ret);
7046 		return;
7047 	}
7048 
7049 	reset_try_times = 0;
7050 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7051 		/* Wait for tqp hw reset */
7052 		msleep(20);
7053 		reset_status = hclge_get_reset_status(hdev, queue_gid);
7054 		if (reset_status)
7055 			break;
7056 	}
7057 
7058 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7059 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7060 		return;
7061 	}
7062 
7063 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7064 	if (ret)
7065 		dev_warn(&hdev->pdev->dev,
7066 			 "Deassert the soft reset fail, ret = %d\n", ret);
7067 }
7068 
7069 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7070 {
7071 	struct hclge_vport *vport = hclge_get_vport(handle);
7072 	struct hclge_dev *hdev = vport->back;
7073 
7074 	return hdev->fw_version;
7075 }
7076 
7077 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7078 {
7079 	struct phy_device *phydev = hdev->hw.mac.phydev;
7080 
7081 	if (!phydev)
7082 		return;
7083 
7084 	phy_set_asym_pause(phydev, rx_en, tx_en);
7085 }
7086 
7087 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7088 {
7089 	int ret;
7090 
7091 	if (rx_en && tx_en)
7092 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
7093 	else if (rx_en && !tx_en)
7094 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7095 	else if (!rx_en && tx_en)
7096 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7097 	else
7098 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
7099 
7100 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7101 		return 0;
7102 
7103 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7104 	if (ret) {
7105 		dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7106 			ret);
7107 		return ret;
7108 	}
7109 
7110 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7111 
7112 	return 0;
7113 }
7114 
7115 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7116 {
7117 	struct phy_device *phydev = hdev->hw.mac.phydev;
7118 	u16 remote_advertising = 0;
7119 	u16 local_advertising = 0;
7120 	u32 rx_pause, tx_pause;
7121 	u8 flowctl;
7122 
7123 	if (!phydev->link || !phydev->autoneg)
7124 		return 0;
7125 
7126 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7127 
7128 	if (phydev->pause)
7129 		remote_advertising = LPA_PAUSE_CAP;
7130 
7131 	if (phydev->asym_pause)
7132 		remote_advertising |= LPA_PAUSE_ASYM;
7133 
7134 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7135 					   remote_advertising);
7136 	tx_pause = flowctl & FLOW_CTRL_TX;
7137 	rx_pause = flowctl & FLOW_CTRL_RX;
7138 
7139 	if (phydev->duplex == HCLGE_MAC_HALF) {
7140 		tx_pause = 0;
7141 		rx_pause = 0;
7142 	}
7143 
7144 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
7145 }
7146 
7147 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
7148 				 u32 *rx_en, u32 *tx_en)
7149 {
7150 	struct hclge_vport *vport = hclge_get_vport(handle);
7151 	struct hclge_dev *hdev = vport->back;
7152 
7153 	*auto_neg = hclge_get_autoneg(handle);
7154 
7155 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7156 		*rx_en = 0;
7157 		*tx_en = 0;
7158 		return;
7159 	}
7160 
7161 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
7162 		*rx_en = 1;
7163 		*tx_en = 0;
7164 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
7165 		*tx_en = 1;
7166 		*rx_en = 0;
7167 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
7168 		*rx_en = 1;
7169 		*tx_en = 1;
7170 	} else {
7171 		*rx_en = 0;
7172 		*tx_en = 0;
7173 	}
7174 }
7175 
7176 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
7177 				u32 rx_en, u32 tx_en)
7178 {
7179 	struct hclge_vport *vport = hclge_get_vport(handle);
7180 	struct hclge_dev *hdev = vport->back;
7181 	struct phy_device *phydev = hdev->hw.mac.phydev;
7182 	u32 fc_autoneg;
7183 
7184 	fc_autoneg = hclge_get_autoneg(handle);
7185 	if (auto_neg != fc_autoneg) {
7186 		dev_info(&hdev->pdev->dev,
7187 			 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
7188 		return -EOPNOTSUPP;
7189 	}
7190 
7191 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7192 		dev_info(&hdev->pdev->dev,
7193 			 "Priority flow control enabled. Cannot set link flow control.\n");
7194 		return -EOPNOTSUPP;
7195 	}
7196 
7197 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
7198 
7199 	if (!fc_autoneg)
7200 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
7201 
7202 	/* Only support flow control negotiation for netdev with
7203 	 * phy attached for now.
7204 	 */
7205 	if (!phydev)
7206 		return -EOPNOTSUPP;
7207 
7208 	return phy_start_aneg(phydev);
7209 }
7210 
7211 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
7212 					  u8 *auto_neg, u32 *speed, u8 *duplex)
7213 {
7214 	struct hclge_vport *vport = hclge_get_vport(handle);
7215 	struct hclge_dev *hdev = vport->back;
7216 
7217 	if (speed)
7218 		*speed = hdev->hw.mac.speed;
7219 	if (duplex)
7220 		*duplex = hdev->hw.mac.duplex;
7221 	if (auto_neg)
7222 		*auto_neg = hdev->hw.mac.autoneg;
7223 }
7224 
7225 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
7226 {
7227 	struct hclge_vport *vport = hclge_get_vport(handle);
7228 	struct hclge_dev *hdev = vport->back;
7229 
7230 	if (media_type)
7231 		*media_type = hdev->hw.mac.media_type;
7232 }
7233 
7234 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
7235 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
7236 {
7237 	struct hclge_vport *vport = hclge_get_vport(handle);
7238 	struct hclge_dev *hdev = vport->back;
7239 	struct phy_device *phydev = hdev->hw.mac.phydev;
7240 	int mdix_ctrl, mdix, retval, is_resolved;
7241 
7242 	if (!phydev) {
7243 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7244 		*tp_mdix = ETH_TP_MDI_INVALID;
7245 		return;
7246 	}
7247 
7248 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
7249 
7250 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
7251 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
7252 				    HCLGE_PHY_MDIX_CTRL_S);
7253 
7254 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
7255 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
7256 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
7257 
7258 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7259 
7260 	switch (mdix_ctrl) {
7261 	case 0x0:
7262 		*tp_mdix_ctrl = ETH_TP_MDI;
7263 		break;
7264 	case 0x1:
7265 		*tp_mdix_ctrl = ETH_TP_MDI_X;
7266 		break;
7267 	case 0x3:
7268 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7269 		break;
7270 	default:
7271 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7272 		break;
7273 	}
7274 
7275 	if (!is_resolved)
7276 		*tp_mdix = ETH_TP_MDI_INVALID;
7277 	else if (mdix)
7278 		*tp_mdix = ETH_TP_MDI_X;
7279 	else
7280 		*tp_mdix = ETH_TP_MDI;
7281 }
7282 
7283 static int hclge_init_client_instance(struct hnae3_client *client,
7284 				      struct hnae3_ae_dev *ae_dev)
7285 {
7286 	struct hclge_dev *hdev = ae_dev->priv;
7287 	struct hclge_vport *vport;
7288 	int i, ret;
7289 
7290 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
7291 		vport = &hdev->vport[i];
7292 
7293 		switch (client->type) {
7294 		case HNAE3_CLIENT_KNIC:
7295 
7296 			hdev->nic_client = client;
7297 			vport->nic.client = client;
7298 			ret = client->ops->init_instance(&vport->nic);
7299 			if (ret)
7300 				goto clear_nic;
7301 
7302 			hnae3_set_client_init_flag(client, ae_dev, 1);
7303 
7304 			if (hdev->roce_client &&
7305 			    hnae3_dev_roce_supported(hdev)) {
7306 				struct hnae3_client *rc = hdev->roce_client;
7307 
7308 				ret = hclge_init_roce_base_info(vport);
7309 				if (ret)
7310 					goto clear_roce;
7311 
7312 				ret = rc->ops->init_instance(&vport->roce);
7313 				if (ret)
7314 					goto clear_roce;
7315 
7316 				hnae3_set_client_init_flag(hdev->roce_client,
7317 							   ae_dev, 1);
7318 			}
7319 
7320 			break;
7321 		case HNAE3_CLIENT_UNIC:
7322 			hdev->nic_client = client;
7323 			vport->nic.client = client;
7324 
7325 			ret = client->ops->init_instance(&vport->nic);
7326 			if (ret)
7327 				goto clear_nic;
7328 
7329 			hnae3_set_client_init_flag(client, ae_dev, 1);
7330 
7331 			break;
7332 		case HNAE3_CLIENT_ROCE:
7333 			if (hnae3_dev_roce_supported(hdev)) {
7334 				hdev->roce_client = client;
7335 				vport->roce.client = client;
7336 			}
7337 
7338 			if (hdev->roce_client && hdev->nic_client) {
7339 				ret = hclge_init_roce_base_info(vport);
7340 				if (ret)
7341 					goto clear_roce;
7342 
7343 				ret = client->ops->init_instance(&vport->roce);
7344 				if (ret)
7345 					goto clear_roce;
7346 
7347 				hnae3_set_client_init_flag(client, ae_dev, 1);
7348 			}
7349 
7350 			break;
7351 		default:
7352 			return -EINVAL;
7353 		}
7354 	}
7355 
7356 	return 0;
7357 
7358 clear_nic:
7359 	hdev->nic_client = NULL;
7360 	vport->nic.client = NULL;
7361 	return ret;
7362 clear_roce:
7363 	hdev->roce_client = NULL;
7364 	vport->roce.client = NULL;
7365 	return ret;
7366 }
7367 
7368 static void hclge_uninit_client_instance(struct hnae3_client *client,
7369 					 struct hnae3_ae_dev *ae_dev)
7370 {
7371 	struct hclge_dev *hdev = ae_dev->priv;
7372 	struct hclge_vport *vport;
7373 	int i;
7374 
7375 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7376 		vport = &hdev->vport[i];
7377 		if (hdev->roce_client) {
7378 			hdev->roce_client->ops->uninit_instance(&vport->roce,
7379 								0);
7380 			hdev->roce_client = NULL;
7381 			vport->roce.client = NULL;
7382 		}
7383 		if (client->type == HNAE3_CLIENT_ROCE)
7384 			return;
7385 		if (hdev->nic_client && client->ops->uninit_instance) {
7386 			client->ops->uninit_instance(&vport->nic, 0);
7387 			hdev->nic_client = NULL;
7388 			vport->nic.client = NULL;
7389 		}
7390 	}
7391 }
7392 
7393 static int hclge_pci_init(struct hclge_dev *hdev)
7394 {
7395 	struct pci_dev *pdev = hdev->pdev;
7396 	struct hclge_hw *hw;
7397 	int ret;
7398 
7399 	ret = pci_enable_device(pdev);
7400 	if (ret) {
7401 		dev_err(&pdev->dev, "failed to enable PCI device\n");
7402 		return ret;
7403 	}
7404 
7405 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7406 	if (ret) {
7407 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7408 		if (ret) {
7409 			dev_err(&pdev->dev,
7410 				"can't set consistent PCI DMA");
7411 			goto err_disable_device;
7412 		}
7413 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7414 	}
7415 
7416 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7417 	if (ret) {
7418 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7419 		goto err_disable_device;
7420 	}
7421 
7422 	pci_set_master(pdev);
7423 	hw = &hdev->hw;
7424 	hw->io_base = pcim_iomap(pdev, 2, 0);
7425 	if (!hw->io_base) {
7426 		dev_err(&pdev->dev, "Can't map configuration register space\n");
7427 		ret = -ENOMEM;
7428 		goto err_clr_master;
7429 	}
7430 
7431 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7432 
7433 	return 0;
7434 err_clr_master:
7435 	pci_clear_master(pdev);
7436 	pci_release_regions(pdev);
7437 err_disable_device:
7438 	pci_disable_device(pdev);
7439 
7440 	return ret;
7441 }
7442 
7443 static void hclge_pci_uninit(struct hclge_dev *hdev)
7444 {
7445 	struct pci_dev *pdev = hdev->pdev;
7446 
7447 	pcim_iounmap(pdev, hdev->hw.io_base);
7448 	pci_free_irq_vectors(pdev);
7449 	pci_clear_master(pdev);
7450 	pci_release_mem_regions(pdev);
7451 	pci_disable_device(pdev);
7452 }
7453 
7454 static void hclge_state_init(struct hclge_dev *hdev)
7455 {
7456 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7457 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7458 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7459 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7460 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7461 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7462 }
7463 
7464 static void hclge_state_uninit(struct hclge_dev *hdev)
7465 {
7466 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7467 
7468 	if (hdev->service_timer.function)
7469 		del_timer_sync(&hdev->service_timer);
7470 	if (hdev->reset_timer.function)
7471 		del_timer_sync(&hdev->reset_timer);
7472 	if (hdev->service_task.func)
7473 		cancel_work_sync(&hdev->service_task);
7474 	if (hdev->rst_service_task.func)
7475 		cancel_work_sync(&hdev->rst_service_task);
7476 	if (hdev->mbx_service_task.func)
7477 		cancel_work_sync(&hdev->mbx_service_task);
7478 }
7479 
7480 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7481 {
7482 #define HCLGE_FLR_WAIT_MS	100
7483 #define HCLGE_FLR_WAIT_CNT	50
7484 	struct hclge_dev *hdev = ae_dev->priv;
7485 	int cnt = 0;
7486 
7487 	clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7488 	clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7489 	set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7490 	hclge_reset_event(hdev->pdev, NULL);
7491 
7492 	while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7493 	       cnt++ < HCLGE_FLR_WAIT_CNT)
7494 		msleep(HCLGE_FLR_WAIT_MS);
7495 
7496 	if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7497 		dev_err(&hdev->pdev->dev,
7498 			"flr wait down timeout: %d\n", cnt);
7499 }
7500 
7501 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7502 {
7503 	struct hclge_dev *hdev = ae_dev->priv;
7504 
7505 	set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7506 }
7507 
7508 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7509 {
7510 	struct pci_dev *pdev = ae_dev->pdev;
7511 	struct hclge_dev *hdev;
7512 	int ret;
7513 
7514 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7515 	if (!hdev) {
7516 		ret = -ENOMEM;
7517 		goto out;
7518 	}
7519 
7520 	hdev->pdev = pdev;
7521 	hdev->ae_dev = ae_dev;
7522 	hdev->reset_type = HNAE3_NONE_RESET;
7523 	hdev->reset_level = HNAE3_FUNC_RESET;
7524 	ae_dev->priv = hdev;
7525 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7526 
7527 	mutex_init(&hdev->vport_lock);
7528 	mutex_init(&hdev->vport_cfg_mutex);
7529 
7530 	ret = hclge_pci_init(hdev);
7531 	if (ret) {
7532 		dev_err(&pdev->dev, "PCI init failed\n");
7533 		goto out;
7534 	}
7535 
7536 	/* Firmware command queue initialize */
7537 	ret = hclge_cmd_queue_init(hdev);
7538 	if (ret) {
7539 		dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7540 		goto err_pci_uninit;
7541 	}
7542 
7543 	/* Firmware command initialize */
7544 	ret = hclge_cmd_init(hdev);
7545 	if (ret)
7546 		goto err_cmd_uninit;
7547 
7548 	ret = hclge_get_cap(hdev);
7549 	if (ret) {
7550 		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7551 			ret);
7552 		goto err_cmd_uninit;
7553 	}
7554 
7555 	ret = hclge_configure(hdev);
7556 	if (ret) {
7557 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7558 		goto err_cmd_uninit;
7559 	}
7560 
7561 	ret = hclge_init_msi(hdev);
7562 	if (ret) {
7563 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
7564 		goto err_cmd_uninit;
7565 	}
7566 
7567 	ret = hclge_misc_irq_init(hdev);
7568 	if (ret) {
7569 		dev_err(&pdev->dev,
7570 			"Misc IRQ(vector0) init error, ret = %d.\n",
7571 			ret);
7572 		goto err_msi_uninit;
7573 	}
7574 
7575 	ret = hclge_alloc_tqps(hdev);
7576 	if (ret) {
7577 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
7578 		goto err_msi_irq_uninit;
7579 	}
7580 
7581 	ret = hclge_alloc_vport(hdev);
7582 	if (ret) {
7583 		dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
7584 		goto err_msi_irq_uninit;
7585 	}
7586 
7587 	ret = hclge_map_tqp(hdev);
7588 	if (ret) {
7589 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7590 		goto err_msi_irq_uninit;
7591 	}
7592 
7593 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7594 		ret = hclge_mac_mdio_config(hdev);
7595 		if (ret) {
7596 			dev_err(&hdev->pdev->dev,
7597 				"mdio config fail ret=%d\n", ret);
7598 			goto err_msi_irq_uninit;
7599 		}
7600 	}
7601 
7602 	ret = hclge_init_umv_space(hdev);
7603 	if (ret) {
7604 		dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
7605 		goto err_mdiobus_unreg;
7606 	}
7607 
7608 	ret = hclge_mac_init(hdev);
7609 	if (ret) {
7610 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7611 		goto err_mdiobus_unreg;
7612 	}
7613 
7614 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7615 	if (ret) {
7616 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7617 		goto err_mdiobus_unreg;
7618 	}
7619 
7620 	ret = hclge_config_gro(hdev, true);
7621 	if (ret)
7622 		goto err_mdiobus_unreg;
7623 
7624 	ret = hclge_init_vlan_config(hdev);
7625 	if (ret) {
7626 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7627 		goto err_mdiobus_unreg;
7628 	}
7629 
7630 	ret = hclge_tm_schd_init(hdev);
7631 	if (ret) {
7632 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
7633 		goto err_mdiobus_unreg;
7634 	}
7635 
7636 	hclge_rss_init_cfg(hdev);
7637 	ret = hclge_rss_init_hw(hdev);
7638 	if (ret) {
7639 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7640 		goto err_mdiobus_unreg;
7641 	}
7642 
7643 	ret = init_mgr_tbl(hdev);
7644 	if (ret) {
7645 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
7646 		goto err_mdiobus_unreg;
7647 	}
7648 
7649 	ret = hclge_init_fd_config(hdev);
7650 	if (ret) {
7651 		dev_err(&pdev->dev,
7652 			"fd table init fail, ret=%d\n", ret);
7653 		goto err_mdiobus_unreg;
7654 	}
7655 
7656 	ret = hclge_hw_error_set_state(hdev, true);
7657 	if (ret) {
7658 		dev_err(&pdev->dev,
7659 			"fail(%d) to enable hw error interrupts\n", ret);
7660 		goto err_mdiobus_unreg;
7661 	}
7662 
7663 	hclge_dcb_ops_set(hdev);
7664 
7665 	timer_setup(&hdev->service_timer, hclge_service_timer, 0);
7666 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7667 	INIT_WORK(&hdev->service_task, hclge_service_task);
7668 	INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
7669 	INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
7670 
7671 	hclge_clear_all_event_cause(hdev);
7672 
7673 	/* Enable MISC vector(vector0) */
7674 	hclge_enable_vector(&hdev->misc_vector, true);
7675 
7676 	hclge_state_init(hdev);
7677 	hdev->last_reset_time = jiffies;
7678 
7679 	pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7680 	return 0;
7681 
7682 err_mdiobus_unreg:
7683 	if (hdev->hw.mac.phydev)
7684 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
7685 err_msi_irq_uninit:
7686 	hclge_misc_irq_uninit(hdev);
7687 err_msi_uninit:
7688 	pci_free_irq_vectors(pdev);
7689 err_cmd_uninit:
7690 	hclge_cmd_uninit(hdev);
7691 err_pci_uninit:
7692 	pcim_iounmap(pdev, hdev->hw.io_base);
7693 	pci_clear_master(pdev);
7694 	pci_release_regions(pdev);
7695 	pci_disable_device(pdev);
7696 out:
7697 	return ret;
7698 }
7699 
7700 static void hclge_stats_clear(struct hclge_dev *hdev)
7701 {
7702 	memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7703 }
7704 
7705 static void hclge_reset_vport_state(struct hclge_dev *hdev)
7706 {
7707 	struct hclge_vport *vport = hdev->vport;
7708 	int i;
7709 
7710 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7711 		hclge_vport_start(vport);
7712 		vport++;
7713 	}
7714 }
7715 
7716 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7717 {
7718 	struct hclge_dev *hdev = ae_dev->priv;
7719 	struct pci_dev *pdev = ae_dev->pdev;
7720 	int ret;
7721 
7722 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7723 
7724 	hclge_stats_clear(hdev);
7725 	memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
7726 
7727 	ret = hclge_cmd_init(hdev);
7728 	if (ret) {
7729 		dev_err(&pdev->dev, "Cmd queue init failed\n");
7730 		return ret;
7731 	}
7732 
7733 	ret = hclge_map_tqp(hdev);
7734 	if (ret) {
7735 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7736 		return ret;
7737 	}
7738 
7739 	hclge_reset_umv_space(hdev);
7740 
7741 	ret = hclge_mac_init(hdev);
7742 	if (ret) {
7743 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7744 		return ret;
7745 	}
7746 
7747 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7748 	if (ret) {
7749 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7750 		return ret;
7751 	}
7752 
7753 	ret = hclge_config_gro(hdev, true);
7754 	if (ret)
7755 		return ret;
7756 
7757 	ret = hclge_init_vlan_config(hdev);
7758 	if (ret) {
7759 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7760 		return ret;
7761 	}
7762 
7763 	ret = hclge_tm_init_hw(hdev, true);
7764 	if (ret) {
7765 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
7766 		return ret;
7767 	}
7768 
7769 	ret = hclge_rss_init_hw(hdev);
7770 	if (ret) {
7771 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7772 		return ret;
7773 	}
7774 
7775 	ret = hclge_init_fd_config(hdev);
7776 	if (ret) {
7777 		dev_err(&pdev->dev,
7778 			"fd table init fail, ret=%d\n", ret);
7779 		return ret;
7780 	}
7781 
7782 	/* Re-enable the hw error interrupts because
7783 	 * the interrupts get disabled on core/global reset.
7784 	 */
7785 	ret = hclge_hw_error_set_state(hdev, true);
7786 	if (ret) {
7787 		dev_err(&pdev->dev,
7788 			"fail(%d) to re-enable HNS hw error interrupts\n", ret);
7789 		return ret;
7790 	}
7791 
7792 	hclge_reset_vport_state(hdev);
7793 
7794 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
7795 		 HCLGE_DRIVER_NAME);
7796 
7797 	return 0;
7798 }
7799 
7800 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
7801 {
7802 	struct hclge_dev *hdev = ae_dev->priv;
7803 	struct hclge_mac *mac = &hdev->hw.mac;
7804 
7805 	hclge_state_uninit(hdev);
7806 
7807 	if (mac->phydev)
7808 		mdiobus_unregister(mac->mdio_bus);
7809 
7810 	hclge_uninit_umv_space(hdev);
7811 
7812 	/* Disable MISC vector(vector0) */
7813 	hclge_enable_vector(&hdev->misc_vector, false);
7814 	synchronize_irq(hdev->misc_vector.vector_irq);
7815 
7816 	hclge_hw_error_set_state(hdev, false);
7817 	hclge_cmd_uninit(hdev);
7818 	hclge_misc_irq_uninit(hdev);
7819 	hclge_pci_uninit(hdev);
7820 	mutex_destroy(&hdev->vport_lock);
7821 	hclge_uninit_vport_mac_table(hdev);
7822 	hclge_uninit_vport_vlan_table(hdev);
7823 	mutex_destroy(&hdev->vport_cfg_mutex);
7824 	ae_dev->priv = NULL;
7825 }
7826 
7827 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
7828 {
7829 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7830 	struct hclge_vport *vport = hclge_get_vport(handle);
7831 	struct hclge_dev *hdev = vport->back;
7832 
7833 	return min_t(u32, hdev->rss_size_max,
7834 		     vport->alloc_tqps / kinfo->num_tc);
7835 }
7836 
7837 static void hclge_get_channels(struct hnae3_handle *handle,
7838 			       struct ethtool_channels *ch)
7839 {
7840 	ch->max_combined = hclge_get_max_channels(handle);
7841 	ch->other_count = 1;
7842 	ch->max_other = 1;
7843 	ch->combined_count = handle->kinfo.rss_size;
7844 }
7845 
7846 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
7847 					u16 *alloc_tqps, u16 *max_rss_size)
7848 {
7849 	struct hclge_vport *vport = hclge_get_vport(handle);
7850 	struct hclge_dev *hdev = vport->back;
7851 
7852 	*alloc_tqps = vport->alloc_tqps;
7853 	*max_rss_size = hdev->rss_size_max;
7854 }
7855 
7856 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
7857 			      bool rxfh_configured)
7858 {
7859 	struct hclge_vport *vport = hclge_get_vport(handle);
7860 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7861 	struct hclge_dev *hdev = vport->back;
7862 	int cur_rss_size = kinfo->rss_size;
7863 	int cur_tqps = kinfo->num_tqps;
7864 	u16 tc_offset[HCLGE_MAX_TC_NUM];
7865 	u16 tc_valid[HCLGE_MAX_TC_NUM];
7866 	u16 tc_size[HCLGE_MAX_TC_NUM];
7867 	u16 roundup_size;
7868 	u32 *rss_indir;
7869 	int ret, i;
7870 
7871 	kinfo->req_rss_size = new_tqps_num;
7872 
7873 	ret = hclge_tm_vport_map_update(hdev);
7874 	if (ret) {
7875 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
7876 		return ret;
7877 	}
7878 
7879 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
7880 	roundup_size = ilog2(roundup_size);
7881 	/* Set the RSS TC mode according to the new RSS size */
7882 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
7883 		tc_valid[i] = 0;
7884 
7885 		if (!(hdev->hw_tc_map & BIT(i)))
7886 			continue;
7887 
7888 		tc_valid[i] = 1;
7889 		tc_size[i] = roundup_size;
7890 		tc_offset[i] = kinfo->rss_size * i;
7891 	}
7892 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
7893 	if (ret)
7894 		return ret;
7895 
7896 	/* RSS indirection table has been configuared by user */
7897 	if (rxfh_configured)
7898 		goto out;
7899 
7900 	/* Reinitializes the rss indirect table according to the new RSS size */
7901 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
7902 	if (!rss_indir)
7903 		return -ENOMEM;
7904 
7905 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
7906 		rss_indir[i] = i % kinfo->rss_size;
7907 
7908 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
7909 	if (ret)
7910 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
7911 			ret);
7912 
7913 	kfree(rss_indir);
7914 
7915 out:
7916 	if (!ret)
7917 		dev_info(&hdev->pdev->dev,
7918 			 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
7919 			 cur_rss_size, kinfo->rss_size,
7920 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
7921 
7922 	return ret;
7923 }
7924 
7925 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
7926 			      u32 *regs_num_64_bit)
7927 {
7928 	struct hclge_desc desc;
7929 	u32 total_num;
7930 	int ret;
7931 
7932 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
7933 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7934 	if (ret) {
7935 		dev_err(&hdev->pdev->dev,
7936 			"Query register number cmd failed, ret = %d.\n", ret);
7937 		return ret;
7938 	}
7939 
7940 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
7941 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
7942 
7943 	total_num = *regs_num_32_bit + *regs_num_64_bit;
7944 	if (!total_num)
7945 		return -EINVAL;
7946 
7947 	return 0;
7948 }
7949 
7950 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7951 				 void *data)
7952 {
7953 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
7954 
7955 	struct hclge_desc *desc;
7956 	u32 *reg_val = data;
7957 	__le32 *desc_data;
7958 	int cmd_num;
7959 	int i, k, n;
7960 	int ret;
7961 
7962 	if (regs_num == 0)
7963 		return 0;
7964 
7965 	cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
7966 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7967 	if (!desc)
7968 		return -ENOMEM;
7969 
7970 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
7971 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7972 	if (ret) {
7973 		dev_err(&hdev->pdev->dev,
7974 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
7975 		kfree(desc);
7976 		return ret;
7977 	}
7978 
7979 	for (i = 0; i < cmd_num; i++) {
7980 		if (i == 0) {
7981 			desc_data = (__le32 *)(&desc[i].data[0]);
7982 			n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
7983 		} else {
7984 			desc_data = (__le32 *)(&desc[i]);
7985 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
7986 		}
7987 		for (k = 0; k < n; k++) {
7988 			*reg_val++ = le32_to_cpu(*desc_data++);
7989 
7990 			regs_num--;
7991 			if (!regs_num)
7992 				break;
7993 		}
7994 	}
7995 
7996 	kfree(desc);
7997 	return 0;
7998 }
7999 
8000 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8001 				 void *data)
8002 {
8003 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8004 
8005 	struct hclge_desc *desc;
8006 	u64 *reg_val = data;
8007 	__le64 *desc_data;
8008 	int cmd_num;
8009 	int i, k, n;
8010 	int ret;
8011 
8012 	if (regs_num == 0)
8013 		return 0;
8014 
8015 	cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8016 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8017 	if (!desc)
8018 		return -ENOMEM;
8019 
8020 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8021 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8022 	if (ret) {
8023 		dev_err(&hdev->pdev->dev,
8024 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
8025 		kfree(desc);
8026 		return ret;
8027 	}
8028 
8029 	for (i = 0; i < cmd_num; i++) {
8030 		if (i == 0) {
8031 			desc_data = (__le64 *)(&desc[i].data[0]);
8032 			n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8033 		} else {
8034 			desc_data = (__le64 *)(&desc[i]);
8035 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
8036 		}
8037 		for (k = 0; k < n; k++) {
8038 			*reg_val++ = le64_to_cpu(*desc_data++);
8039 
8040 			regs_num--;
8041 			if (!regs_num)
8042 				break;
8043 		}
8044 	}
8045 
8046 	kfree(desc);
8047 	return 0;
8048 }
8049 
8050 #define MAX_SEPARATE_NUM	4
8051 #define SEPARATOR_VALUE		0xFFFFFFFF
8052 #define REG_NUM_PER_LINE	4
8053 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
8054 
8055 static int hclge_get_regs_len(struct hnae3_handle *handle)
8056 {
8057 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8058 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8059 	struct hclge_vport *vport = hclge_get_vport(handle);
8060 	struct hclge_dev *hdev = vport->back;
8061 	u32 regs_num_32_bit, regs_num_64_bit;
8062 	int ret;
8063 
8064 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8065 	if (ret) {
8066 		dev_err(&hdev->pdev->dev,
8067 			"Get register number failed, ret = %d.\n", ret);
8068 		return -EOPNOTSUPP;
8069 	}
8070 
8071 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8072 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8073 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8074 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8075 
8076 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8077 		tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8078 		regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8079 }
8080 
8081 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8082 			   void *data)
8083 {
8084 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8085 	struct hclge_vport *vport = hclge_get_vport(handle);
8086 	struct hclge_dev *hdev = vport->back;
8087 	u32 regs_num_32_bit, regs_num_64_bit;
8088 	int i, j, reg_um, separator_num;
8089 	u32 *reg = data;
8090 	int ret;
8091 
8092 	*version = hdev->fw_version;
8093 
8094 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8095 	if (ret) {
8096 		dev_err(&hdev->pdev->dev,
8097 			"Get register number failed, ret = %d.\n", ret);
8098 		return;
8099 	}
8100 
8101 	/* fetching per-PF registers valus from PF PCIe register space */
8102 	reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8103 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8104 	for (i = 0; i < reg_um; i++)
8105 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8106 	for (i = 0; i < separator_num; i++)
8107 		*reg++ = SEPARATOR_VALUE;
8108 
8109 	reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
8110 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8111 	for (i = 0; i < reg_um; i++)
8112 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
8113 	for (i = 0; i < separator_num; i++)
8114 		*reg++ = SEPARATOR_VALUE;
8115 
8116 	reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
8117 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8118 	for (j = 0; j < kinfo->num_tqps; j++) {
8119 		for (i = 0; i < reg_um; i++)
8120 			*reg++ = hclge_read_dev(&hdev->hw,
8121 						ring_reg_addr_list[i] +
8122 						0x200 * j);
8123 		for (i = 0; i < separator_num; i++)
8124 			*reg++ = SEPARATOR_VALUE;
8125 	}
8126 
8127 	reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
8128 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8129 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
8130 		for (i = 0; i < reg_um; i++)
8131 			*reg++ = hclge_read_dev(&hdev->hw,
8132 						tqp_intr_reg_addr_list[i] +
8133 						4 * j);
8134 		for (i = 0; i < separator_num; i++)
8135 			*reg++ = SEPARATOR_VALUE;
8136 	}
8137 
8138 	/* fetching PF common registers values from firmware */
8139 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
8140 	if (ret) {
8141 		dev_err(&hdev->pdev->dev,
8142 			"Get 32 bit register failed, ret = %d.\n", ret);
8143 		return;
8144 	}
8145 
8146 	reg += regs_num_32_bit;
8147 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
8148 	if (ret)
8149 		dev_err(&hdev->pdev->dev,
8150 			"Get 64 bit register failed, ret = %d.\n", ret);
8151 }
8152 
8153 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
8154 {
8155 	struct hclge_set_led_state_cmd *req;
8156 	struct hclge_desc desc;
8157 	int ret;
8158 
8159 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
8160 
8161 	req = (struct hclge_set_led_state_cmd *)desc.data;
8162 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
8163 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
8164 
8165 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8166 	if (ret)
8167 		dev_err(&hdev->pdev->dev,
8168 			"Send set led state cmd error, ret =%d\n", ret);
8169 
8170 	return ret;
8171 }
8172 
8173 enum hclge_led_status {
8174 	HCLGE_LED_OFF,
8175 	HCLGE_LED_ON,
8176 	HCLGE_LED_NO_CHANGE = 0xFF,
8177 };
8178 
8179 static int hclge_set_led_id(struct hnae3_handle *handle,
8180 			    enum ethtool_phys_id_state status)
8181 {
8182 	struct hclge_vport *vport = hclge_get_vport(handle);
8183 	struct hclge_dev *hdev = vport->back;
8184 
8185 	switch (status) {
8186 	case ETHTOOL_ID_ACTIVE:
8187 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
8188 	case ETHTOOL_ID_INACTIVE:
8189 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
8190 	default:
8191 		return -EINVAL;
8192 	}
8193 }
8194 
8195 static void hclge_get_link_mode(struct hnae3_handle *handle,
8196 				unsigned long *supported,
8197 				unsigned long *advertising)
8198 {
8199 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
8200 	struct hclge_vport *vport = hclge_get_vport(handle);
8201 	struct hclge_dev *hdev = vport->back;
8202 	unsigned int idx = 0;
8203 
8204 	for (; idx < size; idx++) {
8205 		supported[idx] = hdev->hw.mac.supported[idx];
8206 		advertising[idx] = hdev->hw.mac.advertising[idx];
8207 	}
8208 }
8209 
8210 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
8211 {
8212 	struct hclge_vport *vport = hclge_get_vport(handle);
8213 	struct hclge_dev *hdev = vport->back;
8214 
8215 	return hclge_config_gro(hdev, enable);
8216 }
8217 
8218 static const struct hnae3_ae_ops hclge_ops = {
8219 	.init_ae_dev = hclge_init_ae_dev,
8220 	.uninit_ae_dev = hclge_uninit_ae_dev,
8221 	.flr_prepare = hclge_flr_prepare,
8222 	.flr_done = hclge_flr_done,
8223 	.init_client_instance = hclge_init_client_instance,
8224 	.uninit_client_instance = hclge_uninit_client_instance,
8225 	.map_ring_to_vector = hclge_map_ring_to_vector,
8226 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
8227 	.get_vector = hclge_get_vector,
8228 	.put_vector = hclge_put_vector,
8229 	.set_promisc_mode = hclge_set_promisc_mode,
8230 	.set_loopback = hclge_set_loopback,
8231 	.start = hclge_ae_start,
8232 	.stop = hclge_ae_stop,
8233 	.client_start = hclge_client_start,
8234 	.client_stop = hclge_client_stop,
8235 	.get_status = hclge_get_status,
8236 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
8237 	.update_speed_duplex_h = hclge_update_speed_duplex_h,
8238 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
8239 	.get_media_type = hclge_get_media_type,
8240 	.get_rss_key_size = hclge_get_rss_key_size,
8241 	.get_rss_indir_size = hclge_get_rss_indir_size,
8242 	.get_rss = hclge_get_rss,
8243 	.set_rss = hclge_set_rss,
8244 	.set_rss_tuple = hclge_set_rss_tuple,
8245 	.get_rss_tuple = hclge_get_rss_tuple,
8246 	.get_tc_size = hclge_get_tc_size,
8247 	.get_mac_addr = hclge_get_mac_addr,
8248 	.set_mac_addr = hclge_set_mac_addr,
8249 	.do_ioctl = hclge_do_ioctl,
8250 	.add_uc_addr = hclge_add_uc_addr,
8251 	.rm_uc_addr = hclge_rm_uc_addr,
8252 	.add_mc_addr = hclge_add_mc_addr,
8253 	.rm_mc_addr = hclge_rm_mc_addr,
8254 	.set_autoneg = hclge_set_autoneg,
8255 	.get_autoneg = hclge_get_autoneg,
8256 	.get_pauseparam = hclge_get_pauseparam,
8257 	.set_pauseparam = hclge_set_pauseparam,
8258 	.set_mtu = hclge_set_mtu,
8259 	.reset_queue = hclge_reset_tqp,
8260 	.get_stats = hclge_get_stats,
8261 	.update_stats = hclge_update_stats,
8262 	.get_strings = hclge_get_strings,
8263 	.get_sset_count = hclge_get_sset_count,
8264 	.get_fw_version = hclge_get_fw_version,
8265 	.get_mdix_mode = hclge_get_mdix_mode,
8266 	.enable_vlan_filter = hclge_enable_vlan_filter,
8267 	.set_vlan_filter = hclge_set_vlan_filter,
8268 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
8269 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
8270 	.reset_event = hclge_reset_event,
8271 	.set_default_reset_request = hclge_set_def_reset_request,
8272 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8273 	.set_channels = hclge_set_channels,
8274 	.get_channels = hclge_get_channels,
8275 	.get_regs_len = hclge_get_regs_len,
8276 	.get_regs = hclge_get_regs,
8277 	.set_led_id = hclge_set_led_id,
8278 	.get_link_mode = hclge_get_link_mode,
8279 	.add_fd_entry = hclge_add_fd_entry,
8280 	.del_fd_entry = hclge_del_fd_entry,
8281 	.del_all_fd_entries = hclge_del_all_fd_entries,
8282 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8283 	.get_fd_rule_info = hclge_get_fd_rule_info,
8284 	.get_fd_all_rules = hclge_get_all_rules,
8285 	.restore_fd_rules = hclge_restore_fd_entries,
8286 	.enable_fd = hclge_enable_fd,
8287 	.dbg_run_cmd = hclge_dbg_run_cmd,
8288 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
8289 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
8290 	.ae_dev_resetting = hclge_ae_dev_resetting,
8291 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
8292 	.set_gro_en = hclge_gro_en,
8293 	.get_global_queue_id = hclge_covert_handle_qid_global,
8294 	.set_timer_task = hclge_set_timer_task,
8295 	.mac_connect_phy = hclge_mac_connect_phy,
8296 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
8297 };
8298 
8299 static struct hnae3_ae_algo ae_algo = {
8300 	.ops = &hclge_ops,
8301 	.pdev_id_table = ae_algo_pci_tbl,
8302 };
8303 
8304 static int hclge_init(void)
8305 {
8306 	pr_info("%s is initializing\n", HCLGE_NAME);
8307 
8308 	hnae3_register_ae_algo(&ae_algo);
8309 
8310 	return 0;
8311 }
8312 
8313 static void hclge_exit(void)
8314 {
8315 	hnae3_unregister_ae_algo(&ae_algo);
8316 }
8317 module_init(hclge_init);
8318 module_exit(hclge_exit);
8319 
8320 MODULE_LICENSE("GPL");
8321 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8322 MODULE_DESCRIPTION("HCLGE Driver");
8323 MODULE_VERSION(HCLGE_MOD_VERSION);
8324