1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256
31 
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37 			       u16 *allocated_size, bool is_alloc);
38 
39 static struct hnae3_ae_algo ae_algo;
40 
41 static const struct pci_device_id ae_algo_pci_tbl[] = {
42 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
49 	/* required last entry */
50 	{0, }
51 };
52 
53 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
54 
55 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
56 					 HCLGE_CMDQ_TX_ADDR_H_REG,
57 					 HCLGE_CMDQ_TX_DEPTH_REG,
58 					 HCLGE_CMDQ_TX_TAIL_REG,
59 					 HCLGE_CMDQ_TX_HEAD_REG,
60 					 HCLGE_CMDQ_RX_ADDR_L_REG,
61 					 HCLGE_CMDQ_RX_ADDR_H_REG,
62 					 HCLGE_CMDQ_RX_DEPTH_REG,
63 					 HCLGE_CMDQ_RX_TAIL_REG,
64 					 HCLGE_CMDQ_RX_HEAD_REG,
65 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
66 					 HCLGE_CMDQ_INTR_STS_REG,
67 					 HCLGE_CMDQ_INTR_EN_REG,
68 					 HCLGE_CMDQ_INTR_GEN_REG};
69 
70 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
71 					   HCLGE_VECTOR0_OTER_EN_REG,
72 					   HCLGE_MISC_RESET_STS_REG,
73 					   HCLGE_MISC_VECTOR_INT_STS,
74 					   HCLGE_GLOBAL_RESET_REG,
75 					   HCLGE_FUN_RST_ING,
76 					   HCLGE_GRO_EN_REG};
77 
78 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
79 					 HCLGE_RING_RX_ADDR_H_REG,
80 					 HCLGE_RING_RX_BD_NUM_REG,
81 					 HCLGE_RING_RX_BD_LENGTH_REG,
82 					 HCLGE_RING_RX_MERGE_EN_REG,
83 					 HCLGE_RING_RX_TAIL_REG,
84 					 HCLGE_RING_RX_HEAD_REG,
85 					 HCLGE_RING_RX_FBD_NUM_REG,
86 					 HCLGE_RING_RX_OFFSET_REG,
87 					 HCLGE_RING_RX_FBD_OFFSET_REG,
88 					 HCLGE_RING_RX_STASH_REG,
89 					 HCLGE_RING_RX_BD_ERR_REG,
90 					 HCLGE_RING_TX_ADDR_L_REG,
91 					 HCLGE_RING_TX_ADDR_H_REG,
92 					 HCLGE_RING_TX_BD_NUM_REG,
93 					 HCLGE_RING_TX_PRIORITY_REG,
94 					 HCLGE_RING_TX_TC_REG,
95 					 HCLGE_RING_TX_MERGE_EN_REG,
96 					 HCLGE_RING_TX_TAIL_REG,
97 					 HCLGE_RING_TX_HEAD_REG,
98 					 HCLGE_RING_TX_FBD_NUM_REG,
99 					 HCLGE_RING_TX_OFFSET_REG,
100 					 HCLGE_RING_TX_EBD_NUM_REG,
101 					 HCLGE_RING_TX_EBD_OFFSET_REG,
102 					 HCLGE_RING_TX_BD_ERR_REG,
103 					 HCLGE_RING_EN_REG};
104 
105 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
106 					     HCLGE_TQP_INTR_GL0_REG,
107 					     HCLGE_TQP_INTR_GL1_REG,
108 					     HCLGE_TQP_INTR_GL2_REG,
109 					     HCLGE_TQP_INTR_RL_REG};
110 
111 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
112 	"App    Loopback test",
113 	"Serdes serial Loopback test",
114 	"Serdes parallel Loopback test",
115 	"Phy    Loopback test"
116 };
117 
118 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
119 	{"mac_tx_mac_pause_num",
120 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
121 	{"mac_rx_mac_pause_num",
122 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
123 	{"mac_tx_control_pkt_num",
124 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
125 	{"mac_rx_control_pkt_num",
126 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
127 	{"mac_tx_pfc_pkt_num",
128 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
129 	{"mac_tx_pfc_pri0_pkt_num",
130 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
131 	{"mac_tx_pfc_pri1_pkt_num",
132 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
133 	{"mac_tx_pfc_pri2_pkt_num",
134 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
135 	{"mac_tx_pfc_pri3_pkt_num",
136 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
137 	{"mac_tx_pfc_pri4_pkt_num",
138 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
139 	{"mac_tx_pfc_pri5_pkt_num",
140 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
141 	{"mac_tx_pfc_pri6_pkt_num",
142 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
143 	{"mac_tx_pfc_pri7_pkt_num",
144 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
145 	{"mac_rx_pfc_pkt_num",
146 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
147 	{"mac_rx_pfc_pri0_pkt_num",
148 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
149 	{"mac_rx_pfc_pri1_pkt_num",
150 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
151 	{"mac_rx_pfc_pri2_pkt_num",
152 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
153 	{"mac_rx_pfc_pri3_pkt_num",
154 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
155 	{"mac_rx_pfc_pri4_pkt_num",
156 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
157 	{"mac_rx_pfc_pri5_pkt_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
159 	{"mac_rx_pfc_pri6_pkt_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
161 	{"mac_rx_pfc_pri7_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
163 	{"mac_tx_total_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
165 	{"mac_tx_total_oct_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
167 	{"mac_tx_good_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
169 	{"mac_tx_bad_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
171 	{"mac_tx_good_oct_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
173 	{"mac_tx_bad_oct_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
175 	{"mac_tx_uni_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
177 	{"mac_tx_multi_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
179 	{"mac_tx_broad_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
181 	{"mac_tx_undersize_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
183 	{"mac_tx_oversize_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
185 	{"mac_tx_64_oct_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
187 	{"mac_tx_65_127_oct_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
189 	{"mac_tx_128_255_oct_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
191 	{"mac_tx_256_511_oct_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
193 	{"mac_tx_512_1023_oct_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
195 	{"mac_tx_1024_1518_oct_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
197 	{"mac_tx_1519_2047_oct_pkt_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
199 	{"mac_tx_2048_4095_oct_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
201 	{"mac_tx_4096_8191_oct_pkt_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
203 	{"mac_tx_8192_9216_oct_pkt_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
205 	{"mac_tx_9217_12287_oct_pkt_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
207 	{"mac_tx_12288_16383_oct_pkt_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
209 	{"mac_tx_1519_max_good_pkt_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
211 	{"mac_tx_1519_max_bad_pkt_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
213 	{"mac_rx_total_pkt_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
215 	{"mac_rx_total_oct_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
217 	{"mac_rx_good_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
219 	{"mac_rx_bad_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
221 	{"mac_rx_good_oct_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
223 	{"mac_rx_bad_oct_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
225 	{"mac_rx_uni_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
227 	{"mac_rx_multi_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
229 	{"mac_rx_broad_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
231 	{"mac_rx_undersize_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
233 	{"mac_rx_oversize_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
235 	{"mac_rx_64_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
237 	{"mac_rx_65_127_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
239 	{"mac_rx_128_255_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
241 	{"mac_rx_256_511_oct_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
243 	{"mac_rx_512_1023_oct_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
245 	{"mac_rx_1024_1518_oct_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
247 	{"mac_rx_1519_2047_oct_pkt_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
249 	{"mac_rx_2048_4095_oct_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
251 	{"mac_rx_4096_8191_oct_pkt_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
253 	{"mac_rx_8192_9216_oct_pkt_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
255 	{"mac_rx_9217_12287_oct_pkt_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
257 	{"mac_rx_12288_16383_oct_pkt_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
259 	{"mac_rx_1519_max_good_pkt_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
261 	{"mac_rx_1519_max_bad_pkt_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
263 
264 	{"mac_tx_fragment_pkt_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
266 	{"mac_tx_undermin_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
268 	{"mac_tx_jabber_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
270 	{"mac_tx_err_all_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
272 	{"mac_tx_from_app_good_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
274 	{"mac_tx_from_app_bad_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
276 	{"mac_rx_fragment_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
278 	{"mac_rx_undermin_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
280 	{"mac_rx_jabber_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
282 	{"mac_rx_fcs_err_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
284 	{"mac_rx_send_app_good_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
286 	{"mac_rx_send_app_bad_pkt_num",
287 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
288 };
289 
290 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
291 	{
292 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
293 		.ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
294 		.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
295 		.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
296 		.i_port_bitmap = 0x1,
297 	},
298 };
299 
300 static const u8 hclge_hash_key[] = {
301 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
302 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
303 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
304 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
305 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
306 };
307 
308 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
309 {
310 #define HCLGE_MAC_CMD_NUM 21
311 
312 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
313 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
314 	__le64 *desc_data;
315 	int i, k, n;
316 	int ret;
317 
318 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
319 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
320 	if (ret) {
321 		dev_err(&hdev->pdev->dev,
322 			"Get MAC pkt stats fail, status = %d.\n", ret);
323 
324 		return ret;
325 	}
326 
327 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
328 		/* for special opcode 0032, only the first desc has the head */
329 		if (unlikely(i == 0)) {
330 			desc_data = (__le64 *)(&desc[i].data[0]);
331 			n = HCLGE_RD_FIRST_STATS_NUM;
332 		} else {
333 			desc_data = (__le64 *)(&desc[i]);
334 			n = HCLGE_RD_OTHER_STATS_NUM;
335 		}
336 
337 		for (k = 0; k < n; k++) {
338 			*data += le64_to_cpu(*desc_data);
339 			data++;
340 			desc_data++;
341 		}
342 	}
343 
344 	return 0;
345 }
346 
347 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
348 {
349 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
350 	struct hclge_desc *desc;
351 	__le64 *desc_data;
352 	u16 i, k, n;
353 	int ret;
354 
355 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
356 	if (!desc)
357 		return -ENOMEM;
358 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
359 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
360 	if (ret) {
361 		kfree(desc);
362 		return ret;
363 	}
364 
365 	for (i = 0; i < desc_num; i++) {
366 		/* for special opcode 0034, only the first desc has the head */
367 		if (i == 0) {
368 			desc_data = (__le64 *)(&desc[i].data[0]);
369 			n = HCLGE_RD_FIRST_STATS_NUM;
370 		} else {
371 			desc_data = (__le64 *)(&desc[i]);
372 			n = HCLGE_RD_OTHER_STATS_NUM;
373 		}
374 
375 		for (k = 0; k < n; k++) {
376 			*data += le64_to_cpu(*desc_data);
377 			data++;
378 			desc_data++;
379 		}
380 	}
381 
382 	kfree(desc);
383 
384 	return 0;
385 }
386 
387 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
388 {
389 	struct hclge_desc desc;
390 	__le32 *desc_data;
391 	u32 reg_num;
392 	int ret;
393 
394 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
395 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
396 	if (ret)
397 		return ret;
398 
399 	desc_data = (__le32 *)(&desc.data[0]);
400 	reg_num = le32_to_cpu(*desc_data);
401 
402 	*desc_num = 1 + ((reg_num - 3) >> 2) +
403 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
404 
405 	return 0;
406 }
407 
408 static int hclge_mac_update_stats(struct hclge_dev *hdev)
409 {
410 	u32 desc_num;
411 	int ret;
412 
413 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
414 
415 	/* The firmware supports the new statistics acquisition method */
416 	if (!ret)
417 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
418 	else if (ret == -EOPNOTSUPP)
419 		ret = hclge_mac_update_stats_defective(hdev);
420 	else
421 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
422 
423 	return ret;
424 }
425 
426 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
427 {
428 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
429 	struct hclge_vport *vport = hclge_get_vport(handle);
430 	struct hclge_dev *hdev = vport->back;
431 	struct hnae3_queue *queue;
432 	struct hclge_desc desc[1];
433 	struct hclge_tqp *tqp;
434 	int ret, i;
435 
436 	for (i = 0; i < kinfo->num_tqps; i++) {
437 		queue = handle->kinfo.tqp[i];
438 		tqp = container_of(queue, struct hclge_tqp, q);
439 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
440 		hclge_cmd_setup_basic_desc(&desc[0],
441 					   HCLGE_OPC_QUERY_RX_STATUS,
442 					   true);
443 
444 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
445 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
446 		if (ret) {
447 			dev_err(&hdev->pdev->dev,
448 				"Query tqp stat fail, status = %d,queue = %d\n",
449 				ret,	i);
450 			return ret;
451 		}
452 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
453 			le32_to_cpu(desc[0].data[1]);
454 	}
455 
456 	for (i = 0; i < kinfo->num_tqps; i++) {
457 		queue = handle->kinfo.tqp[i];
458 		tqp = container_of(queue, struct hclge_tqp, q);
459 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
460 		hclge_cmd_setup_basic_desc(&desc[0],
461 					   HCLGE_OPC_QUERY_TX_STATUS,
462 					   true);
463 
464 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
465 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
466 		if (ret) {
467 			dev_err(&hdev->pdev->dev,
468 				"Query tqp stat fail, status = %d,queue = %d\n",
469 				ret, i);
470 			return ret;
471 		}
472 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
473 			le32_to_cpu(desc[0].data[1]);
474 	}
475 
476 	return 0;
477 }
478 
479 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
480 {
481 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
482 	struct hclge_tqp *tqp;
483 	u64 *buff = data;
484 	int i;
485 
486 	for (i = 0; i < kinfo->num_tqps; i++) {
487 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
488 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
489 	}
490 
491 	for (i = 0; i < kinfo->num_tqps; i++) {
492 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
493 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
494 	}
495 
496 	return buff;
497 }
498 
499 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
500 {
501 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
502 
503 	return kinfo->num_tqps * (2);
504 }
505 
506 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
507 {
508 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
509 	u8 *buff = data;
510 	int i = 0;
511 
512 	for (i = 0; i < kinfo->num_tqps; i++) {
513 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
514 			struct hclge_tqp, q);
515 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
516 			 tqp->index);
517 		buff = buff + ETH_GSTRING_LEN;
518 	}
519 
520 	for (i = 0; i < kinfo->num_tqps; i++) {
521 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
522 			struct hclge_tqp, q);
523 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
524 			 tqp->index);
525 		buff = buff + ETH_GSTRING_LEN;
526 	}
527 
528 	return buff;
529 }
530 
531 static u64 *hclge_comm_get_stats(void *comm_stats,
532 				 const struct hclge_comm_stats_str strs[],
533 				 int size, u64 *data)
534 {
535 	u64 *buf = data;
536 	u32 i;
537 
538 	for (i = 0; i < size; i++)
539 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
540 
541 	return buf + size;
542 }
543 
544 static u8 *hclge_comm_get_strings(u32 stringset,
545 				  const struct hclge_comm_stats_str strs[],
546 				  int size, u8 *data)
547 {
548 	char *buff = (char *)data;
549 	u32 i;
550 
551 	if (stringset != ETH_SS_STATS)
552 		return buff;
553 
554 	for (i = 0; i < size; i++) {
555 		snprintf(buff, ETH_GSTRING_LEN,
556 			 strs[i].desc);
557 		buff = buff + ETH_GSTRING_LEN;
558 	}
559 
560 	return (u8 *)buff;
561 }
562 
563 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
564 {
565 	struct hnae3_handle *handle;
566 	int status;
567 
568 	handle = &hdev->vport[0].nic;
569 	if (handle->client) {
570 		status = hclge_tqps_update_stats(handle);
571 		if (status) {
572 			dev_err(&hdev->pdev->dev,
573 				"Update TQPS stats fail, status = %d.\n",
574 				status);
575 		}
576 	}
577 
578 	status = hclge_mac_update_stats(hdev);
579 	if (status)
580 		dev_err(&hdev->pdev->dev,
581 			"Update MAC stats fail, status = %d.\n", status);
582 }
583 
584 static void hclge_update_stats(struct hnae3_handle *handle,
585 			       struct net_device_stats *net_stats)
586 {
587 	struct hclge_vport *vport = hclge_get_vport(handle);
588 	struct hclge_dev *hdev = vport->back;
589 	int status;
590 
591 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
592 		return;
593 
594 	status = hclge_mac_update_stats(hdev);
595 	if (status)
596 		dev_err(&hdev->pdev->dev,
597 			"Update MAC stats fail, status = %d.\n",
598 			status);
599 
600 	status = hclge_tqps_update_stats(handle);
601 	if (status)
602 		dev_err(&hdev->pdev->dev,
603 			"Update TQPS stats fail, status = %d.\n",
604 			status);
605 
606 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
607 }
608 
609 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
610 {
611 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
612 		HNAE3_SUPPORT_PHY_LOOPBACK |\
613 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
614 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
615 
616 	struct hclge_vport *vport = hclge_get_vport(handle);
617 	struct hclge_dev *hdev = vport->back;
618 	int count = 0;
619 
620 	/* Loopback test support rules:
621 	 * mac: only GE mode support
622 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
623 	 * phy: only support when phy device exist on board
624 	 */
625 	if (stringset == ETH_SS_TEST) {
626 		/* clear loopback bit flags at first */
627 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
628 		if (hdev->pdev->revision >= 0x21 ||
629 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
630 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
631 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
632 			count += 1;
633 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
634 		}
635 
636 		count += 2;
637 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
638 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
639 	} else if (stringset == ETH_SS_STATS) {
640 		count = ARRAY_SIZE(g_mac_stats_string) +
641 			hclge_tqps_get_sset_count(handle, stringset);
642 	}
643 
644 	return count;
645 }
646 
647 static void hclge_get_strings(struct hnae3_handle *handle,
648 			      u32 stringset,
649 			      u8 *data)
650 {
651 	u8 *p = (char *)data;
652 	int size;
653 
654 	if (stringset == ETH_SS_STATS) {
655 		size = ARRAY_SIZE(g_mac_stats_string);
656 		p = hclge_comm_get_strings(stringset,
657 					   g_mac_stats_string,
658 					   size,
659 					   p);
660 		p = hclge_tqps_get_strings(handle, p);
661 	} else if (stringset == ETH_SS_TEST) {
662 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
663 			memcpy(p,
664 			       hns3_nic_test_strs[HNAE3_LOOP_APP],
665 			       ETH_GSTRING_LEN);
666 			p += ETH_GSTRING_LEN;
667 		}
668 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
669 			memcpy(p,
670 			       hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
671 			       ETH_GSTRING_LEN);
672 			p += ETH_GSTRING_LEN;
673 		}
674 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
675 			memcpy(p,
676 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
677 			       ETH_GSTRING_LEN);
678 			p += ETH_GSTRING_LEN;
679 		}
680 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
681 			memcpy(p,
682 			       hns3_nic_test_strs[HNAE3_LOOP_PHY],
683 			       ETH_GSTRING_LEN);
684 			p += ETH_GSTRING_LEN;
685 		}
686 	}
687 }
688 
689 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
690 {
691 	struct hclge_vport *vport = hclge_get_vport(handle);
692 	struct hclge_dev *hdev = vport->back;
693 	u64 *p;
694 
695 	p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
696 				 g_mac_stats_string,
697 				 ARRAY_SIZE(g_mac_stats_string),
698 				 data);
699 	p = hclge_tqps_get_stats(handle, p);
700 }
701 
702 static int hclge_parse_func_status(struct hclge_dev *hdev,
703 				   struct hclge_func_status_cmd *status)
704 {
705 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
706 		return -EINVAL;
707 
708 	/* Set the pf to main pf */
709 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
710 		hdev->flag |= HCLGE_FLAG_MAIN;
711 	else
712 		hdev->flag &= ~HCLGE_FLAG_MAIN;
713 
714 	return 0;
715 }
716 
717 static int hclge_query_function_status(struct hclge_dev *hdev)
718 {
719 	struct hclge_func_status_cmd *req;
720 	struct hclge_desc desc;
721 	int timeout = 0;
722 	int ret;
723 
724 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
725 	req = (struct hclge_func_status_cmd *)desc.data;
726 
727 	do {
728 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
729 		if (ret) {
730 			dev_err(&hdev->pdev->dev,
731 				"query function status failed %d.\n",
732 				ret);
733 
734 			return ret;
735 		}
736 
737 		/* Check pf reset is done */
738 		if (req->pf_state)
739 			break;
740 		usleep_range(1000, 2000);
741 	} while (timeout++ < 5);
742 
743 	ret = hclge_parse_func_status(hdev, req);
744 
745 	return ret;
746 }
747 
748 static int hclge_query_pf_resource(struct hclge_dev *hdev)
749 {
750 	struct hclge_pf_res_cmd *req;
751 	struct hclge_desc desc;
752 	int ret;
753 
754 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
755 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
756 	if (ret) {
757 		dev_err(&hdev->pdev->dev,
758 			"query pf resource failed %d.\n", ret);
759 		return ret;
760 	}
761 
762 	req = (struct hclge_pf_res_cmd *)desc.data;
763 	hdev->num_tqps = __le16_to_cpu(req->tqp_num);
764 	hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
765 
766 	if (req->tx_buf_size)
767 		hdev->tx_buf_size =
768 			__le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
769 	else
770 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
771 
772 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
773 
774 	if (req->dv_buf_size)
775 		hdev->dv_buf_size =
776 			__le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
777 	else
778 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
779 
780 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
781 
782 	if (hnae3_dev_roce_supported(hdev)) {
783 		hdev->roce_base_msix_offset =
784 		hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
785 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
786 		hdev->num_roce_msi =
787 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
788 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
789 
790 		/* PF should have NIC vectors and Roce vectors,
791 		 * NIC vectors are queued before Roce vectors.
792 		 */
793 		hdev->num_msi = hdev->num_roce_msi  +
794 				hdev->roce_base_msix_offset;
795 	} else {
796 		hdev->num_msi =
797 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
798 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
799 	}
800 
801 	return 0;
802 }
803 
804 static int hclge_parse_speed(int speed_cmd, int *speed)
805 {
806 	switch (speed_cmd) {
807 	case 6:
808 		*speed = HCLGE_MAC_SPEED_10M;
809 		break;
810 	case 7:
811 		*speed = HCLGE_MAC_SPEED_100M;
812 		break;
813 	case 0:
814 		*speed = HCLGE_MAC_SPEED_1G;
815 		break;
816 	case 1:
817 		*speed = HCLGE_MAC_SPEED_10G;
818 		break;
819 	case 2:
820 		*speed = HCLGE_MAC_SPEED_25G;
821 		break;
822 	case 3:
823 		*speed = HCLGE_MAC_SPEED_40G;
824 		break;
825 	case 4:
826 		*speed = HCLGE_MAC_SPEED_50G;
827 		break;
828 	case 5:
829 		*speed = HCLGE_MAC_SPEED_100G;
830 		break;
831 	default:
832 		return -EINVAL;
833 	}
834 
835 	return 0;
836 }
837 
838 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
839 					u8 speed_ability)
840 {
841 	unsigned long *supported = hdev->hw.mac.supported;
842 
843 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
844 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
845 				 supported);
846 
847 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
848 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
849 				 supported);
850 
851 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
852 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
853 				 supported);
854 
855 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
856 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
857 				 supported);
858 
859 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
860 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
861 				 supported);
862 
863 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
864 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
865 }
866 
867 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
868 					 u8 speed_ability)
869 {
870 	unsigned long *supported = hdev->hw.mac.supported;
871 
872 	/* default to support all speed for GE port */
873 	if (!speed_ability)
874 		speed_ability = HCLGE_SUPPORT_GE;
875 
876 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
877 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
878 				 supported);
879 
880 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
881 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
882 				 supported);
883 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
884 				 supported);
885 	}
886 
887 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
888 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
889 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
890 	}
891 
892 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
893 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
894 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
895 }
896 
897 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
898 {
899 	u8 media_type = hdev->hw.mac.media_type;
900 
901 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
902 		hclge_parse_fiber_link_mode(hdev, speed_ability);
903 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
904 		hclge_parse_copper_link_mode(hdev, speed_ability);
905 }
906 
907 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
908 {
909 	struct hclge_cfg_param_cmd *req;
910 	u64 mac_addr_tmp_high;
911 	u64 mac_addr_tmp;
912 	int i;
913 
914 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
915 
916 	/* get the configuration */
917 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
918 					      HCLGE_CFG_VMDQ_M,
919 					      HCLGE_CFG_VMDQ_S);
920 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
921 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
922 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
923 					    HCLGE_CFG_TQP_DESC_N_M,
924 					    HCLGE_CFG_TQP_DESC_N_S);
925 
926 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
927 					HCLGE_CFG_PHY_ADDR_M,
928 					HCLGE_CFG_PHY_ADDR_S);
929 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
930 					  HCLGE_CFG_MEDIA_TP_M,
931 					  HCLGE_CFG_MEDIA_TP_S);
932 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
933 					  HCLGE_CFG_RX_BUF_LEN_M,
934 					  HCLGE_CFG_RX_BUF_LEN_S);
935 	/* get mac_address */
936 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
937 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
938 					    HCLGE_CFG_MAC_ADDR_H_M,
939 					    HCLGE_CFG_MAC_ADDR_H_S);
940 
941 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
942 
943 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
944 					     HCLGE_CFG_DEFAULT_SPEED_M,
945 					     HCLGE_CFG_DEFAULT_SPEED_S);
946 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
947 					    HCLGE_CFG_RSS_SIZE_M,
948 					    HCLGE_CFG_RSS_SIZE_S);
949 
950 	for (i = 0; i < ETH_ALEN; i++)
951 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
952 
953 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
954 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
955 
956 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
957 					     HCLGE_CFG_SPEED_ABILITY_M,
958 					     HCLGE_CFG_SPEED_ABILITY_S);
959 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
960 					 HCLGE_CFG_UMV_TBL_SPACE_M,
961 					 HCLGE_CFG_UMV_TBL_SPACE_S);
962 	if (!cfg->umv_space)
963 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
964 }
965 
966 /* hclge_get_cfg: query the static parameter from flash
967  * @hdev: pointer to struct hclge_dev
968  * @hcfg: the config structure to be getted
969  */
970 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
971 {
972 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
973 	struct hclge_cfg_param_cmd *req;
974 	int i, ret;
975 
976 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
977 		u32 offset = 0;
978 
979 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
980 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
981 					   true);
982 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
983 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
984 		/* Len should be united by 4 bytes when send to hardware */
985 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
986 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
987 		req->offset = cpu_to_le32(offset);
988 	}
989 
990 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
991 	if (ret) {
992 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
993 		return ret;
994 	}
995 
996 	hclge_parse_cfg(hcfg, desc);
997 
998 	return 0;
999 }
1000 
1001 static int hclge_get_cap(struct hclge_dev *hdev)
1002 {
1003 	int ret;
1004 
1005 	ret = hclge_query_function_status(hdev);
1006 	if (ret) {
1007 		dev_err(&hdev->pdev->dev,
1008 			"query function status error %d.\n", ret);
1009 		return ret;
1010 	}
1011 
1012 	/* get pf resource */
1013 	ret = hclge_query_pf_resource(hdev);
1014 	if (ret)
1015 		dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1016 
1017 	return ret;
1018 }
1019 
1020 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1021 {
1022 #define HCLGE_MIN_TX_DESC	64
1023 #define HCLGE_MIN_RX_DESC	64
1024 
1025 	if (!is_kdump_kernel())
1026 		return;
1027 
1028 	dev_info(&hdev->pdev->dev,
1029 		 "Running kdump kernel. Using minimal resources\n");
1030 
1031 	/* minimal queue pairs equals to the number of vports */
1032 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1033 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1034 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1035 }
1036 
1037 static int hclge_configure(struct hclge_dev *hdev)
1038 {
1039 	struct hclge_cfg cfg;
1040 	int ret, i;
1041 
1042 	ret = hclge_get_cfg(hdev, &cfg);
1043 	if (ret) {
1044 		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1045 		return ret;
1046 	}
1047 
1048 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1049 	hdev->base_tqp_pid = 0;
1050 	hdev->rss_size_max = cfg.rss_size_max;
1051 	hdev->rx_buf_len = cfg.rx_buf_len;
1052 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1053 	hdev->hw.mac.media_type = cfg.media_type;
1054 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1055 	hdev->num_tx_desc = cfg.tqp_desc_num;
1056 	hdev->num_rx_desc = cfg.tqp_desc_num;
1057 	hdev->tm_info.num_pg = 1;
1058 	hdev->tc_max = cfg.tc_num;
1059 	hdev->tm_info.hw_pfc_map = 0;
1060 	hdev->wanted_umv_size = cfg.umv_space;
1061 
1062 	if (hnae3_dev_fd_supported(hdev))
1063 		hdev->fd_en = true;
1064 
1065 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1066 	if (ret) {
1067 		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1068 		return ret;
1069 	}
1070 
1071 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1072 
1073 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1074 	    (hdev->tc_max < 1)) {
1075 		dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1076 			 hdev->tc_max);
1077 		hdev->tc_max = 1;
1078 	}
1079 
1080 	/* Dev does not support DCB */
1081 	if (!hnae3_dev_dcb_supported(hdev)) {
1082 		hdev->tc_max = 1;
1083 		hdev->pfc_max = 0;
1084 	} else {
1085 		hdev->pfc_max = hdev->tc_max;
1086 	}
1087 
1088 	hdev->tm_info.num_tc = 1;
1089 
1090 	/* Currently not support uncontiuous tc */
1091 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1092 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1093 
1094 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1095 
1096 	hclge_init_kdump_kernel_config(hdev);
1097 
1098 	return ret;
1099 }
1100 
1101 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1102 			    int tso_mss_max)
1103 {
1104 	struct hclge_cfg_tso_status_cmd *req;
1105 	struct hclge_desc desc;
1106 	u16 tso_mss;
1107 
1108 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1109 
1110 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1111 
1112 	tso_mss = 0;
1113 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1114 			HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1115 	req->tso_mss_min = cpu_to_le16(tso_mss);
1116 
1117 	tso_mss = 0;
1118 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1119 			HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1120 	req->tso_mss_max = cpu_to_le16(tso_mss);
1121 
1122 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1123 }
1124 
1125 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1126 {
1127 	struct hclge_cfg_gro_status_cmd *req;
1128 	struct hclge_desc desc;
1129 	int ret;
1130 
1131 	if (!hnae3_dev_gro_supported(hdev))
1132 		return 0;
1133 
1134 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1135 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1136 
1137 	req->gro_en = cpu_to_le16(en ? 1 : 0);
1138 
1139 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1140 	if (ret)
1141 		dev_err(&hdev->pdev->dev,
1142 			"GRO hardware config cmd failed, ret = %d\n", ret);
1143 
1144 	return ret;
1145 }
1146 
1147 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1148 {
1149 	struct hclge_tqp *tqp;
1150 	int i;
1151 
1152 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1153 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1154 	if (!hdev->htqp)
1155 		return -ENOMEM;
1156 
1157 	tqp = hdev->htqp;
1158 
1159 	for (i = 0; i < hdev->num_tqps; i++) {
1160 		tqp->dev = &hdev->pdev->dev;
1161 		tqp->index = i;
1162 
1163 		tqp->q.ae_algo = &ae_algo;
1164 		tqp->q.buf_size = hdev->rx_buf_len;
1165 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1166 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1167 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1168 			i * HCLGE_TQP_REG_SIZE;
1169 
1170 		tqp++;
1171 	}
1172 
1173 	return 0;
1174 }
1175 
1176 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1177 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1178 {
1179 	struct hclge_tqp_map_cmd *req;
1180 	struct hclge_desc desc;
1181 	int ret;
1182 
1183 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1184 
1185 	req = (struct hclge_tqp_map_cmd *)desc.data;
1186 	req->tqp_id = cpu_to_le16(tqp_pid);
1187 	req->tqp_vf = func_id;
1188 	req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1189 			1 << HCLGE_TQP_MAP_EN_B;
1190 	req->tqp_vid = cpu_to_le16(tqp_vid);
1191 
1192 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1193 	if (ret)
1194 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1195 
1196 	return ret;
1197 }
1198 
1199 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1200 {
1201 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1202 	struct hclge_dev *hdev = vport->back;
1203 	int i, alloced;
1204 
1205 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1206 	     alloced < num_tqps; i++) {
1207 		if (!hdev->htqp[i].alloced) {
1208 			hdev->htqp[i].q.handle = &vport->nic;
1209 			hdev->htqp[i].q.tqp_index = alloced;
1210 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1211 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1212 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1213 			hdev->htqp[i].alloced = true;
1214 			alloced++;
1215 		}
1216 	}
1217 	vport->alloc_tqps = alloced;
1218 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1219 				vport->alloc_tqps / hdev->tm_info.num_tc);
1220 
1221 	return 0;
1222 }
1223 
1224 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1225 			    u16 num_tx_desc, u16 num_rx_desc)
1226 
1227 {
1228 	struct hnae3_handle *nic = &vport->nic;
1229 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1230 	struct hclge_dev *hdev = vport->back;
1231 	int ret;
1232 
1233 	kinfo->num_tx_desc = num_tx_desc;
1234 	kinfo->num_rx_desc = num_rx_desc;
1235 
1236 	kinfo->rx_buf_len = hdev->rx_buf_len;
1237 
1238 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1239 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1240 	if (!kinfo->tqp)
1241 		return -ENOMEM;
1242 
1243 	ret = hclge_assign_tqp(vport, num_tqps);
1244 	if (ret)
1245 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1246 
1247 	return ret;
1248 }
1249 
1250 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1251 				  struct hclge_vport *vport)
1252 {
1253 	struct hnae3_handle *nic = &vport->nic;
1254 	struct hnae3_knic_private_info *kinfo;
1255 	u16 i;
1256 
1257 	kinfo = &nic->kinfo;
1258 	for (i = 0; i < vport->alloc_tqps; i++) {
1259 		struct hclge_tqp *q =
1260 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1261 		bool is_pf;
1262 		int ret;
1263 
1264 		is_pf = !(vport->vport_id);
1265 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1266 					     i, is_pf);
1267 		if (ret)
1268 			return ret;
1269 	}
1270 
1271 	return 0;
1272 }
1273 
1274 static int hclge_map_tqp(struct hclge_dev *hdev)
1275 {
1276 	struct hclge_vport *vport = hdev->vport;
1277 	u16 i, num_vport;
1278 
1279 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1280 	for (i = 0; i < num_vport; i++)	{
1281 		int ret;
1282 
1283 		ret = hclge_map_tqp_to_vport(hdev, vport);
1284 		if (ret)
1285 			return ret;
1286 
1287 		vport++;
1288 	}
1289 
1290 	return 0;
1291 }
1292 
1293 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1294 {
1295 	/* this would be initialized later */
1296 }
1297 
1298 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1299 {
1300 	struct hnae3_handle *nic = &vport->nic;
1301 	struct hclge_dev *hdev = vport->back;
1302 	int ret;
1303 
1304 	nic->pdev = hdev->pdev;
1305 	nic->ae_algo = &ae_algo;
1306 	nic->numa_node_mask = hdev->numa_node_mask;
1307 
1308 	if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1309 		ret = hclge_knic_setup(vport, num_tqps,
1310 				       hdev->num_tx_desc, hdev->num_rx_desc);
1311 
1312 		if (ret) {
1313 			dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1314 				ret);
1315 			return ret;
1316 		}
1317 	} else {
1318 		hclge_unic_setup(vport, num_tqps);
1319 	}
1320 
1321 	return 0;
1322 }
1323 
1324 static int hclge_alloc_vport(struct hclge_dev *hdev)
1325 {
1326 	struct pci_dev *pdev = hdev->pdev;
1327 	struct hclge_vport *vport;
1328 	u32 tqp_main_vport;
1329 	u32 tqp_per_vport;
1330 	int num_vport, i;
1331 	int ret;
1332 
1333 	/* We need to alloc a vport for main NIC of PF */
1334 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1335 
1336 	if (hdev->num_tqps < num_vport) {
1337 		dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1338 			hdev->num_tqps, num_vport);
1339 		return -EINVAL;
1340 	}
1341 
1342 	/* Alloc the same number of TQPs for every vport */
1343 	tqp_per_vport = hdev->num_tqps / num_vport;
1344 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1345 
1346 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1347 			     GFP_KERNEL);
1348 	if (!vport)
1349 		return -ENOMEM;
1350 
1351 	hdev->vport = vport;
1352 	hdev->num_alloc_vport = num_vport;
1353 
1354 	if (IS_ENABLED(CONFIG_PCI_IOV))
1355 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1356 
1357 	for (i = 0; i < num_vport; i++) {
1358 		vport->back = hdev;
1359 		vport->vport_id = i;
1360 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1361 		INIT_LIST_HEAD(&vport->vlan_list);
1362 		INIT_LIST_HEAD(&vport->uc_mac_list);
1363 		INIT_LIST_HEAD(&vport->mc_mac_list);
1364 
1365 		if (i == 0)
1366 			ret = hclge_vport_setup(vport, tqp_main_vport);
1367 		else
1368 			ret = hclge_vport_setup(vport, tqp_per_vport);
1369 		if (ret) {
1370 			dev_err(&pdev->dev,
1371 				"vport setup failed for vport %d, %d\n",
1372 				i, ret);
1373 			return ret;
1374 		}
1375 
1376 		vport++;
1377 	}
1378 
1379 	return 0;
1380 }
1381 
1382 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1383 				    struct hclge_pkt_buf_alloc *buf_alloc)
1384 {
1385 /* TX buffer size is unit by 128 byte */
1386 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1387 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1388 	struct hclge_tx_buff_alloc_cmd *req;
1389 	struct hclge_desc desc;
1390 	int ret;
1391 	u8 i;
1392 
1393 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1394 
1395 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1396 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1397 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1398 
1399 		req->tx_pkt_buff[i] =
1400 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1401 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1402 	}
1403 
1404 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1405 	if (ret)
1406 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1407 			ret);
1408 
1409 	return ret;
1410 }
1411 
1412 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1413 				 struct hclge_pkt_buf_alloc *buf_alloc)
1414 {
1415 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1416 
1417 	if (ret)
1418 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1419 
1420 	return ret;
1421 }
1422 
1423 static int hclge_get_tc_num(struct hclge_dev *hdev)
1424 {
1425 	int i, cnt = 0;
1426 
1427 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1428 		if (hdev->hw_tc_map & BIT(i))
1429 			cnt++;
1430 	return cnt;
1431 }
1432 
1433 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1434 {
1435 	int i, cnt = 0;
1436 
1437 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1438 		if (hdev->hw_tc_map & BIT(i) &&
1439 		    hdev->tm_info.hw_pfc_map & BIT(i))
1440 			cnt++;
1441 	return cnt;
1442 }
1443 
1444 /* Get the number of pfc enabled TCs, which have private buffer */
1445 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1446 				  struct hclge_pkt_buf_alloc *buf_alloc)
1447 {
1448 	struct hclge_priv_buf *priv;
1449 	int i, cnt = 0;
1450 
1451 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1452 		priv = &buf_alloc->priv_buf[i];
1453 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1454 		    priv->enable)
1455 			cnt++;
1456 	}
1457 
1458 	return cnt;
1459 }
1460 
1461 /* Get the number of pfc disabled TCs, which have private buffer */
1462 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1463 				     struct hclge_pkt_buf_alloc *buf_alloc)
1464 {
1465 	struct hclge_priv_buf *priv;
1466 	int i, cnt = 0;
1467 
1468 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1469 		priv = &buf_alloc->priv_buf[i];
1470 		if (hdev->hw_tc_map & BIT(i) &&
1471 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1472 		    priv->enable)
1473 			cnt++;
1474 	}
1475 
1476 	return cnt;
1477 }
1478 
1479 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1480 {
1481 	struct hclge_priv_buf *priv;
1482 	u32 rx_priv = 0;
1483 	int i;
1484 
1485 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1486 		priv = &buf_alloc->priv_buf[i];
1487 		if (priv->enable)
1488 			rx_priv += priv->buf_size;
1489 	}
1490 	return rx_priv;
1491 }
1492 
1493 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1494 {
1495 	u32 i, total_tx_size = 0;
1496 
1497 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1498 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1499 
1500 	return total_tx_size;
1501 }
1502 
1503 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1504 				struct hclge_pkt_buf_alloc *buf_alloc,
1505 				u32 rx_all)
1506 {
1507 	u32 shared_buf_min, shared_buf_tc, shared_std;
1508 	int tc_num, pfc_enable_num;
1509 	u32 shared_buf, aligned_mps;
1510 	u32 rx_priv;
1511 	int i;
1512 
1513 	tc_num = hclge_get_tc_num(hdev);
1514 	pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1515 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1516 
1517 	if (hnae3_dev_dcb_supported(hdev))
1518 		shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1519 	else
1520 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1521 					+ hdev->dv_buf_size;
1522 
1523 	shared_buf_tc = pfc_enable_num * aligned_mps +
1524 			(tc_num - pfc_enable_num) * aligned_mps / 2 +
1525 			aligned_mps;
1526 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1527 			     HCLGE_BUF_SIZE_UNIT);
1528 
1529 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1530 	if (rx_all < rx_priv + shared_std)
1531 		return false;
1532 
1533 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1534 	buf_alloc->s_buf.buf_size = shared_buf;
1535 	if (hnae3_dev_dcb_supported(hdev)) {
1536 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1537 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1538 			- roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1539 	} else {
1540 		buf_alloc->s_buf.self.high = aligned_mps +
1541 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1542 		buf_alloc->s_buf.self.low =
1543 			roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1544 	}
1545 
1546 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1547 		if ((hdev->hw_tc_map & BIT(i)) &&
1548 		    (hdev->tm_info.hw_pfc_map & BIT(i))) {
1549 			buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
1550 			buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
1551 		} else {
1552 			buf_alloc->s_buf.tc_thrd[i].low = 0;
1553 			buf_alloc->s_buf.tc_thrd[i].high = aligned_mps;
1554 		}
1555 	}
1556 
1557 	return true;
1558 }
1559 
1560 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1561 				struct hclge_pkt_buf_alloc *buf_alloc)
1562 {
1563 	u32 i, total_size;
1564 
1565 	total_size = hdev->pkt_buf_size;
1566 
1567 	/* alloc tx buffer for all enabled tc */
1568 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1569 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1570 
1571 		if (hdev->hw_tc_map & BIT(i)) {
1572 			if (total_size < hdev->tx_buf_size)
1573 				return -ENOMEM;
1574 
1575 			priv->tx_buf_size = hdev->tx_buf_size;
1576 		} else {
1577 			priv->tx_buf_size = 0;
1578 		}
1579 
1580 		total_size -= priv->tx_buf_size;
1581 	}
1582 
1583 	return 0;
1584 }
1585 
1586 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1587 				  struct hclge_pkt_buf_alloc *buf_alloc)
1588 {
1589 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1590 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1591 	int i;
1592 
1593 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1594 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1595 
1596 		priv->enable = 0;
1597 		priv->wl.low = 0;
1598 		priv->wl.high = 0;
1599 		priv->buf_size = 0;
1600 
1601 		if (!(hdev->hw_tc_map & BIT(i)))
1602 			continue;
1603 
1604 		priv->enable = 1;
1605 
1606 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1607 			priv->wl.low = max ? aligned_mps : 256;
1608 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
1609 						HCLGE_BUF_SIZE_UNIT);
1610 		} else {
1611 			priv->wl.low = 0;
1612 			priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1613 		}
1614 
1615 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1616 	}
1617 
1618 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1619 }
1620 
1621 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1622 					  struct hclge_pkt_buf_alloc *buf_alloc)
1623 {
1624 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1625 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1626 	int i;
1627 
1628 	/* let the last to be cleared first */
1629 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1630 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1631 
1632 		if (hdev->hw_tc_map & BIT(i) &&
1633 		    !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1634 			/* Clear the no pfc TC private buffer */
1635 			priv->wl.low = 0;
1636 			priv->wl.high = 0;
1637 			priv->buf_size = 0;
1638 			priv->enable = 0;
1639 			no_pfc_priv_num--;
1640 		}
1641 
1642 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1643 		    no_pfc_priv_num == 0)
1644 			break;
1645 	}
1646 
1647 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1648 }
1649 
1650 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1651 					struct hclge_pkt_buf_alloc *buf_alloc)
1652 {
1653 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1654 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1655 	int i;
1656 
1657 	/* let the last to be cleared first */
1658 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1659 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1660 
1661 		if (hdev->hw_tc_map & BIT(i) &&
1662 		    hdev->tm_info.hw_pfc_map & BIT(i)) {
1663 			/* Reduce the number of pfc TC with private buffer */
1664 			priv->wl.low = 0;
1665 			priv->enable = 0;
1666 			priv->wl.high = 0;
1667 			priv->buf_size = 0;
1668 			pfc_priv_num--;
1669 		}
1670 
1671 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1672 		    pfc_priv_num == 0)
1673 			break;
1674 	}
1675 
1676 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1677 }
1678 
1679 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1680  * @hdev: pointer to struct hclge_dev
1681  * @buf_alloc: pointer to buffer calculation data
1682  * @return: 0: calculate sucessful, negative: fail
1683  */
1684 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1685 				struct hclge_pkt_buf_alloc *buf_alloc)
1686 {
1687 	/* When DCB is not supported, rx private buffer is not allocated. */
1688 	if (!hnae3_dev_dcb_supported(hdev)) {
1689 		u32 rx_all = hdev->pkt_buf_size;
1690 
1691 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1692 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1693 			return -ENOMEM;
1694 
1695 		return 0;
1696 	}
1697 
1698 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1699 		return 0;
1700 
1701 	/* try to decrease the buffer size */
1702 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1703 		return 0;
1704 
1705 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1706 		return 0;
1707 
1708 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1709 		return 0;
1710 
1711 	return -ENOMEM;
1712 }
1713 
1714 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1715 				   struct hclge_pkt_buf_alloc *buf_alloc)
1716 {
1717 	struct hclge_rx_priv_buff_cmd *req;
1718 	struct hclge_desc desc;
1719 	int ret;
1720 	int i;
1721 
1722 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1723 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1724 
1725 	/* Alloc private buffer TCs */
1726 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1727 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1728 
1729 		req->buf_num[i] =
1730 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1731 		req->buf_num[i] |=
1732 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1733 	}
1734 
1735 	req->shared_buf =
1736 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1737 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
1738 
1739 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1740 	if (ret)
1741 		dev_err(&hdev->pdev->dev,
1742 			"rx private buffer alloc cmd failed %d\n", ret);
1743 
1744 	return ret;
1745 }
1746 
1747 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1748 				   struct hclge_pkt_buf_alloc *buf_alloc)
1749 {
1750 	struct hclge_rx_priv_wl_buf *req;
1751 	struct hclge_priv_buf *priv;
1752 	struct hclge_desc desc[2];
1753 	int i, j;
1754 	int ret;
1755 
1756 	for (i = 0; i < 2; i++) {
1757 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1758 					   false);
1759 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1760 
1761 		/* The first descriptor set the NEXT bit to 1 */
1762 		if (i == 0)
1763 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1764 		else
1765 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1766 
1767 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1768 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1769 
1770 			priv = &buf_alloc->priv_buf[idx];
1771 			req->tc_wl[j].high =
1772 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1773 			req->tc_wl[j].high |=
1774 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1775 			req->tc_wl[j].low =
1776 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1777 			req->tc_wl[j].low |=
1778 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1779 		}
1780 	}
1781 
1782 	/* Send 2 descriptor at one time */
1783 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1784 	if (ret)
1785 		dev_err(&hdev->pdev->dev,
1786 			"rx private waterline config cmd failed %d\n",
1787 			ret);
1788 	return ret;
1789 }
1790 
1791 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1792 				    struct hclge_pkt_buf_alloc *buf_alloc)
1793 {
1794 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1795 	struct hclge_rx_com_thrd *req;
1796 	struct hclge_desc desc[2];
1797 	struct hclge_tc_thrd *tc;
1798 	int i, j;
1799 	int ret;
1800 
1801 	for (i = 0; i < 2; i++) {
1802 		hclge_cmd_setup_basic_desc(&desc[i],
1803 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1804 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
1805 
1806 		/* The first descriptor set the NEXT bit to 1 */
1807 		if (i == 0)
1808 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1809 		else
1810 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1811 
1812 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1813 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1814 
1815 			req->com_thrd[j].high =
1816 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1817 			req->com_thrd[j].high |=
1818 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1819 			req->com_thrd[j].low =
1820 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1821 			req->com_thrd[j].low |=
1822 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1823 		}
1824 	}
1825 
1826 	/* Send 2 descriptors at one time */
1827 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1828 	if (ret)
1829 		dev_err(&hdev->pdev->dev,
1830 			"common threshold config cmd failed %d\n", ret);
1831 	return ret;
1832 }
1833 
1834 static int hclge_common_wl_config(struct hclge_dev *hdev,
1835 				  struct hclge_pkt_buf_alloc *buf_alloc)
1836 {
1837 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1838 	struct hclge_rx_com_wl *req;
1839 	struct hclge_desc desc;
1840 	int ret;
1841 
1842 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1843 
1844 	req = (struct hclge_rx_com_wl *)desc.data;
1845 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1846 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1847 
1848 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1849 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1850 
1851 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1852 	if (ret)
1853 		dev_err(&hdev->pdev->dev,
1854 			"common waterline config cmd failed %d\n", ret);
1855 
1856 	return ret;
1857 }
1858 
1859 int hclge_buffer_alloc(struct hclge_dev *hdev)
1860 {
1861 	struct hclge_pkt_buf_alloc *pkt_buf;
1862 	int ret;
1863 
1864 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1865 	if (!pkt_buf)
1866 		return -ENOMEM;
1867 
1868 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1869 	if (ret) {
1870 		dev_err(&hdev->pdev->dev,
1871 			"could not calc tx buffer size for all TCs %d\n", ret);
1872 		goto out;
1873 	}
1874 
1875 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1876 	if (ret) {
1877 		dev_err(&hdev->pdev->dev,
1878 			"could not alloc tx buffers %d\n", ret);
1879 		goto out;
1880 	}
1881 
1882 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1883 	if (ret) {
1884 		dev_err(&hdev->pdev->dev,
1885 			"could not calc rx priv buffer size for all TCs %d\n",
1886 			ret);
1887 		goto out;
1888 	}
1889 
1890 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1891 	if (ret) {
1892 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1893 			ret);
1894 		goto out;
1895 	}
1896 
1897 	if (hnae3_dev_dcb_supported(hdev)) {
1898 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1899 		if (ret) {
1900 			dev_err(&hdev->pdev->dev,
1901 				"could not configure rx private waterline %d\n",
1902 				ret);
1903 			goto out;
1904 		}
1905 
1906 		ret = hclge_common_thrd_config(hdev, pkt_buf);
1907 		if (ret) {
1908 			dev_err(&hdev->pdev->dev,
1909 				"could not configure common threshold %d\n",
1910 				ret);
1911 			goto out;
1912 		}
1913 	}
1914 
1915 	ret = hclge_common_wl_config(hdev, pkt_buf);
1916 	if (ret)
1917 		dev_err(&hdev->pdev->dev,
1918 			"could not configure common waterline %d\n", ret);
1919 
1920 out:
1921 	kfree(pkt_buf);
1922 	return ret;
1923 }
1924 
1925 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1926 {
1927 	struct hnae3_handle *roce = &vport->roce;
1928 	struct hnae3_handle *nic = &vport->nic;
1929 
1930 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
1931 
1932 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1933 	    vport->back->num_msi_left == 0)
1934 		return -EINVAL;
1935 
1936 	roce->rinfo.base_vector = vport->back->roce_base_vector;
1937 
1938 	roce->rinfo.netdev = nic->kinfo.netdev;
1939 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
1940 
1941 	roce->pdev = nic->pdev;
1942 	roce->ae_algo = nic->ae_algo;
1943 	roce->numa_node_mask = nic->numa_node_mask;
1944 
1945 	return 0;
1946 }
1947 
1948 static int hclge_init_msi(struct hclge_dev *hdev)
1949 {
1950 	struct pci_dev *pdev = hdev->pdev;
1951 	int vectors;
1952 	int i;
1953 
1954 	vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1955 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
1956 	if (vectors < 0) {
1957 		dev_err(&pdev->dev,
1958 			"failed(%d) to allocate MSI/MSI-X vectors\n",
1959 			vectors);
1960 		return vectors;
1961 	}
1962 	if (vectors < hdev->num_msi)
1963 		dev_warn(&hdev->pdev->dev,
1964 			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1965 			 hdev->num_msi, vectors);
1966 
1967 	hdev->num_msi = vectors;
1968 	hdev->num_msi_left = vectors;
1969 	hdev->base_msi_vector = pdev->irq;
1970 	hdev->roce_base_vector = hdev->base_msi_vector +
1971 				hdev->roce_base_msix_offset;
1972 
1973 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1974 					   sizeof(u16), GFP_KERNEL);
1975 	if (!hdev->vector_status) {
1976 		pci_free_irq_vectors(pdev);
1977 		return -ENOMEM;
1978 	}
1979 
1980 	for (i = 0; i < hdev->num_msi; i++)
1981 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1982 
1983 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1984 					sizeof(int), GFP_KERNEL);
1985 	if (!hdev->vector_irq) {
1986 		pci_free_irq_vectors(pdev);
1987 		return -ENOMEM;
1988 	}
1989 
1990 	return 0;
1991 }
1992 
1993 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1994 {
1995 
1996 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1997 		duplex = HCLGE_MAC_FULL;
1998 
1999 	return duplex;
2000 }
2001 
2002 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2003 				      u8 duplex)
2004 {
2005 	struct hclge_config_mac_speed_dup_cmd *req;
2006 	struct hclge_desc desc;
2007 	int ret;
2008 
2009 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2010 
2011 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2012 
2013 	hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2014 
2015 	switch (speed) {
2016 	case HCLGE_MAC_SPEED_10M:
2017 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2018 				HCLGE_CFG_SPEED_S, 6);
2019 		break;
2020 	case HCLGE_MAC_SPEED_100M:
2021 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2022 				HCLGE_CFG_SPEED_S, 7);
2023 		break;
2024 	case HCLGE_MAC_SPEED_1G:
2025 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2026 				HCLGE_CFG_SPEED_S, 0);
2027 		break;
2028 	case HCLGE_MAC_SPEED_10G:
2029 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2030 				HCLGE_CFG_SPEED_S, 1);
2031 		break;
2032 	case HCLGE_MAC_SPEED_25G:
2033 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2034 				HCLGE_CFG_SPEED_S, 2);
2035 		break;
2036 	case HCLGE_MAC_SPEED_40G:
2037 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2038 				HCLGE_CFG_SPEED_S, 3);
2039 		break;
2040 	case HCLGE_MAC_SPEED_50G:
2041 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2042 				HCLGE_CFG_SPEED_S, 4);
2043 		break;
2044 	case HCLGE_MAC_SPEED_100G:
2045 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2046 				HCLGE_CFG_SPEED_S, 5);
2047 		break;
2048 	default:
2049 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2050 		return -EINVAL;
2051 	}
2052 
2053 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2054 		      1);
2055 
2056 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2057 	if (ret) {
2058 		dev_err(&hdev->pdev->dev,
2059 			"mac speed/duplex config cmd failed %d.\n", ret);
2060 		return ret;
2061 	}
2062 
2063 	return 0;
2064 }
2065 
2066 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2067 {
2068 	int ret;
2069 
2070 	duplex = hclge_check_speed_dup(duplex, speed);
2071 	if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2072 		return 0;
2073 
2074 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2075 	if (ret)
2076 		return ret;
2077 
2078 	hdev->hw.mac.speed = speed;
2079 	hdev->hw.mac.duplex = duplex;
2080 
2081 	return 0;
2082 }
2083 
2084 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2085 				     u8 duplex)
2086 {
2087 	struct hclge_vport *vport = hclge_get_vport(handle);
2088 	struct hclge_dev *hdev = vport->back;
2089 
2090 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2091 }
2092 
2093 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2094 {
2095 	struct hclge_config_auto_neg_cmd *req;
2096 	struct hclge_desc desc;
2097 	u32 flag = 0;
2098 	int ret;
2099 
2100 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2101 
2102 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2103 	hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2104 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2105 
2106 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2107 	if (ret)
2108 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2109 			ret);
2110 
2111 	return ret;
2112 }
2113 
2114 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2115 {
2116 	struct hclge_vport *vport = hclge_get_vport(handle);
2117 	struct hclge_dev *hdev = vport->back;
2118 
2119 	return hclge_set_autoneg_en(hdev, enable);
2120 }
2121 
2122 static int hclge_get_autoneg(struct hnae3_handle *handle)
2123 {
2124 	struct hclge_vport *vport = hclge_get_vport(handle);
2125 	struct hclge_dev *hdev = vport->back;
2126 	struct phy_device *phydev = hdev->hw.mac.phydev;
2127 
2128 	if (phydev)
2129 		return phydev->autoneg;
2130 
2131 	return hdev->hw.mac.autoneg;
2132 }
2133 
2134 static int hclge_mac_init(struct hclge_dev *hdev)
2135 {
2136 	struct hclge_mac *mac = &hdev->hw.mac;
2137 	int ret;
2138 
2139 	hdev->support_sfp_query = true;
2140 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2141 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2142 					 hdev->hw.mac.duplex);
2143 	if (ret) {
2144 		dev_err(&hdev->pdev->dev,
2145 			"Config mac speed dup fail ret=%d\n", ret);
2146 		return ret;
2147 	}
2148 
2149 	mac->link = 0;
2150 
2151 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2152 	if (ret) {
2153 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2154 		return ret;
2155 	}
2156 
2157 	ret = hclge_buffer_alloc(hdev);
2158 	if (ret)
2159 		dev_err(&hdev->pdev->dev,
2160 			"allocate buffer fail, ret=%d\n", ret);
2161 
2162 	return ret;
2163 }
2164 
2165 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2166 {
2167 	if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2168 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2169 		schedule_work(&hdev->mbx_service_task);
2170 }
2171 
2172 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2173 {
2174 	if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2175 		schedule_work(&hdev->rst_service_task);
2176 }
2177 
2178 static void hclge_task_schedule(struct hclge_dev *hdev)
2179 {
2180 	if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2181 	    !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2182 	    !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2183 		(void)schedule_work(&hdev->service_task);
2184 }
2185 
2186 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2187 {
2188 	struct hclge_link_status_cmd *req;
2189 	struct hclge_desc desc;
2190 	int link_status;
2191 	int ret;
2192 
2193 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2194 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2195 	if (ret) {
2196 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2197 			ret);
2198 		return ret;
2199 	}
2200 
2201 	req = (struct hclge_link_status_cmd *)desc.data;
2202 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2203 
2204 	return !!link_status;
2205 }
2206 
2207 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2208 {
2209 	int mac_state;
2210 	int link_stat;
2211 
2212 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2213 		return 0;
2214 
2215 	mac_state = hclge_get_mac_link_status(hdev);
2216 
2217 	if (hdev->hw.mac.phydev) {
2218 		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2219 			link_stat = mac_state &
2220 				hdev->hw.mac.phydev->link;
2221 		else
2222 			link_stat = 0;
2223 
2224 	} else {
2225 		link_stat = mac_state;
2226 	}
2227 
2228 	return !!link_stat;
2229 }
2230 
2231 static void hclge_update_link_status(struct hclge_dev *hdev)
2232 {
2233 	struct hnae3_client *rclient = hdev->roce_client;
2234 	struct hnae3_client *client = hdev->nic_client;
2235 	struct hnae3_handle *rhandle;
2236 	struct hnae3_handle *handle;
2237 	int state;
2238 	int i;
2239 
2240 	if (!client)
2241 		return;
2242 	state = hclge_get_mac_phy_link(hdev);
2243 	if (state != hdev->hw.mac.link) {
2244 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2245 			handle = &hdev->vport[i].nic;
2246 			client->ops->link_status_change(handle, state);
2247 			rhandle = &hdev->vport[i].roce;
2248 			if (rclient && rclient->ops->link_status_change)
2249 				rclient->ops->link_status_change(rhandle,
2250 								 state);
2251 		}
2252 		hdev->hw.mac.link = state;
2253 	}
2254 }
2255 
2256 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2257 {
2258 	struct hclge_sfp_speed_cmd *resp = NULL;
2259 	struct hclge_desc desc;
2260 	int ret;
2261 
2262 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2263 	resp = (struct hclge_sfp_speed_cmd *)desc.data;
2264 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2265 	if (ret == -EOPNOTSUPP) {
2266 		dev_warn(&hdev->pdev->dev,
2267 			 "IMP do not support get SFP speed %d\n", ret);
2268 		return ret;
2269 	} else if (ret) {
2270 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2271 		return ret;
2272 	}
2273 
2274 	*speed = resp->sfp_speed;
2275 
2276 	return 0;
2277 }
2278 
2279 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2280 {
2281 	struct hclge_mac mac = hdev->hw.mac;
2282 	int speed;
2283 	int ret;
2284 
2285 	/* get the speed from SFP cmd when phy
2286 	 * doesn't exit.
2287 	 */
2288 	if (mac.phydev)
2289 		return 0;
2290 
2291 	/* if IMP does not support get SFP/qSFP speed, return directly */
2292 	if (!hdev->support_sfp_query)
2293 		return 0;
2294 
2295 	ret = hclge_get_sfp_speed(hdev, &speed);
2296 	if (ret == -EOPNOTSUPP) {
2297 		hdev->support_sfp_query = false;
2298 		return ret;
2299 	} else if (ret) {
2300 		return ret;
2301 	}
2302 
2303 	if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2304 		return 0; /* do nothing if no SFP */
2305 
2306 	/* must config full duplex for SFP */
2307 	return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2308 }
2309 
2310 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2311 {
2312 	struct hclge_vport *vport = hclge_get_vport(handle);
2313 	struct hclge_dev *hdev = vport->back;
2314 
2315 	return hclge_update_speed_duplex(hdev);
2316 }
2317 
2318 static int hclge_get_status(struct hnae3_handle *handle)
2319 {
2320 	struct hclge_vport *vport = hclge_get_vport(handle);
2321 	struct hclge_dev *hdev = vport->back;
2322 
2323 	hclge_update_link_status(hdev);
2324 
2325 	return hdev->hw.mac.link;
2326 }
2327 
2328 static void hclge_service_timer(struct timer_list *t)
2329 {
2330 	struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2331 
2332 	mod_timer(&hdev->service_timer, jiffies + HZ);
2333 	hdev->hw_stats.stats_timer++;
2334 	hclge_task_schedule(hdev);
2335 }
2336 
2337 static void hclge_service_complete(struct hclge_dev *hdev)
2338 {
2339 	WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2340 
2341 	/* Flush memory before next watchdog */
2342 	smp_mb__before_atomic();
2343 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2344 }
2345 
2346 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2347 {
2348 	u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2349 
2350 	/* fetch the events from their corresponding regs */
2351 	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2352 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2353 	msix_src_reg = hclge_read_dev(&hdev->hw,
2354 				      HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2355 
2356 	/* Assumption: If by any chance reset and mailbox events are reported
2357 	 * together then we will only process reset event in this go and will
2358 	 * defer the processing of the mailbox events. Since, we would have not
2359 	 * cleared RX CMDQ event this time we would receive again another
2360 	 * interrupt from H/W just for the mailbox.
2361 	 */
2362 
2363 	/* check for vector0 reset event sources */
2364 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2365 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2366 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2367 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2368 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2369 		return HCLGE_VECTOR0_EVENT_RST;
2370 	}
2371 
2372 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2373 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2374 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2375 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2376 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2377 		return HCLGE_VECTOR0_EVENT_RST;
2378 	}
2379 
2380 	if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2381 		dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2382 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2383 		set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2384 		*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2385 		return HCLGE_VECTOR0_EVENT_RST;
2386 	}
2387 
2388 	/* check for vector0 msix event source */
2389 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK)
2390 		return HCLGE_VECTOR0_EVENT_ERR;
2391 
2392 	/* check for vector0 mailbox(=CMDQ RX) event source */
2393 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2394 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2395 		*clearval = cmdq_src_reg;
2396 		return HCLGE_VECTOR0_EVENT_MBX;
2397 	}
2398 
2399 	return HCLGE_VECTOR0_EVENT_OTHER;
2400 }
2401 
2402 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2403 				    u32 regclr)
2404 {
2405 	switch (event_type) {
2406 	case HCLGE_VECTOR0_EVENT_RST:
2407 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2408 		break;
2409 	case HCLGE_VECTOR0_EVENT_MBX:
2410 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2411 		break;
2412 	default:
2413 		break;
2414 	}
2415 }
2416 
2417 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2418 {
2419 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2420 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2421 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2422 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2423 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2424 }
2425 
2426 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2427 {
2428 	writel(enable ? 1 : 0, vector->addr);
2429 }
2430 
2431 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2432 {
2433 	struct hclge_dev *hdev = data;
2434 	u32 event_cause;
2435 	u32 clearval;
2436 
2437 	hclge_enable_vector(&hdev->misc_vector, false);
2438 	event_cause = hclge_check_event_cause(hdev, &clearval);
2439 
2440 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
2441 	switch (event_cause) {
2442 	case HCLGE_VECTOR0_EVENT_ERR:
2443 		/* we do not know what type of reset is required now. This could
2444 		 * only be decided after we fetch the type of errors which
2445 		 * caused this event. Therefore, we will do below for now:
2446 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2447 		 *    have defered type of reset to be used.
2448 		 * 2. Schedule the reset serivce task.
2449 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2450 		 *    will fetch the correct type of reset.  This would be done
2451 		 *    by first decoding the types of errors.
2452 		 */
2453 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2454 		/* fall through */
2455 	case HCLGE_VECTOR0_EVENT_RST:
2456 		hclge_reset_task_schedule(hdev);
2457 		break;
2458 	case HCLGE_VECTOR0_EVENT_MBX:
2459 		/* If we are here then,
2460 		 * 1. Either we are not handling any mbx task and we are not
2461 		 *    scheduled as well
2462 		 *                        OR
2463 		 * 2. We could be handling a mbx task but nothing more is
2464 		 *    scheduled.
2465 		 * In both cases, we should schedule mbx task as there are more
2466 		 * mbx messages reported by this interrupt.
2467 		 */
2468 		hclge_mbx_task_schedule(hdev);
2469 		break;
2470 	default:
2471 		dev_warn(&hdev->pdev->dev,
2472 			 "received unknown or unhandled event of vector0\n");
2473 		break;
2474 	}
2475 
2476 	/* clear the source of interrupt if it is not cause by reset */
2477 	if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2478 		hclge_clear_event_cause(hdev, event_cause, clearval);
2479 		hclge_enable_vector(&hdev->misc_vector, true);
2480 	}
2481 
2482 	return IRQ_HANDLED;
2483 }
2484 
2485 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2486 {
2487 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2488 		dev_warn(&hdev->pdev->dev,
2489 			 "vector(vector_id %d) has been freed.\n", vector_id);
2490 		return;
2491 	}
2492 
2493 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2494 	hdev->num_msi_left += 1;
2495 	hdev->num_msi_used -= 1;
2496 }
2497 
2498 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2499 {
2500 	struct hclge_misc_vector *vector = &hdev->misc_vector;
2501 
2502 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2503 
2504 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2505 	hdev->vector_status[0] = 0;
2506 
2507 	hdev->num_msi_left -= 1;
2508 	hdev->num_msi_used += 1;
2509 }
2510 
2511 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2512 {
2513 	int ret;
2514 
2515 	hclge_get_misc_vector(hdev);
2516 
2517 	/* this would be explicitly freed in the end */
2518 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2519 			  0, "hclge_misc", hdev);
2520 	if (ret) {
2521 		hclge_free_vector(hdev, 0);
2522 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2523 			hdev->misc_vector.vector_irq);
2524 	}
2525 
2526 	return ret;
2527 }
2528 
2529 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2530 {
2531 	free_irq(hdev->misc_vector.vector_irq, hdev);
2532 	hclge_free_vector(hdev, 0);
2533 }
2534 
2535 int hclge_notify_client(struct hclge_dev *hdev,
2536 			enum hnae3_reset_notify_type type)
2537 {
2538 	struct hnae3_client *client = hdev->nic_client;
2539 	u16 i;
2540 
2541 	if (!client->ops->reset_notify)
2542 		return -EOPNOTSUPP;
2543 
2544 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2545 		struct hnae3_handle *handle = &hdev->vport[i].nic;
2546 		int ret;
2547 
2548 		ret = client->ops->reset_notify(handle, type);
2549 		if (ret) {
2550 			dev_err(&hdev->pdev->dev,
2551 				"notify nic client failed %d(%d)\n", type, ret);
2552 			return ret;
2553 		}
2554 	}
2555 
2556 	return 0;
2557 }
2558 
2559 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2560 				    enum hnae3_reset_notify_type type)
2561 {
2562 	struct hnae3_client *client = hdev->roce_client;
2563 	int ret = 0;
2564 	u16 i;
2565 
2566 	if (!client)
2567 		return 0;
2568 
2569 	if (!client->ops->reset_notify)
2570 		return -EOPNOTSUPP;
2571 
2572 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2573 		struct hnae3_handle *handle = &hdev->vport[i].roce;
2574 
2575 		ret = client->ops->reset_notify(handle, type);
2576 		if (ret) {
2577 			dev_err(&hdev->pdev->dev,
2578 				"notify roce client failed %d(%d)",
2579 				type, ret);
2580 			return ret;
2581 		}
2582 	}
2583 
2584 	return ret;
2585 }
2586 
2587 static int hclge_reset_wait(struct hclge_dev *hdev)
2588 {
2589 #define HCLGE_RESET_WATI_MS	100
2590 #define HCLGE_RESET_WAIT_CNT	200
2591 	u32 val, reg, reg_bit;
2592 	u32 cnt = 0;
2593 
2594 	switch (hdev->reset_type) {
2595 	case HNAE3_IMP_RESET:
2596 		reg = HCLGE_GLOBAL_RESET_REG;
2597 		reg_bit = HCLGE_IMP_RESET_BIT;
2598 		break;
2599 	case HNAE3_GLOBAL_RESET:
2600 		reg = HCLGE_GLOBAL_RESET_REG;
2601 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
2602 		break;
2603 	case HNAE3_CORE_RESET:
2604 		reg = HCLGE_GLOBAL_RESET_REG;
2605 		reg_bit = HCLGE_CORE_RESET_BIT;
2606 		break;
2607 	case HNAE3_FUNC_RESET:
2608 		reg = HCLGE_FUN_RST_ING;
2609 		reg_bit = HCLGE_FUN_RST_ING_B;
2610 		break;
2611 	case HNAE3_FLR_RESET:
2612 		break;
2613 	default:
2614 		dev_err(&hdev->pdev->dev,
2615 			"Wait for unsupported reset type: %d\n",
2616 			hdev->reset_type);
2617 		return -EINVAL;
2618 	}
2619 
2620 	if (hdev->reset_type == HNAE3_FLR_RESET) {
2621 		while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2622 		       cnt++ < HCLGE_RESET_WAIT_CNT)
2623 			msleep(HCLGE_RESET_WATI_MS);
2624 
2625 		if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2626 			dev_err(&hdev->pdev->dev,
2627 				"flr wait timeout: %d\n", cnt);
2628 			return -EBUSY;
2629 		}
2630 
2631 		return 0;
2632 	}
2633 
2634 	val = hclge_read_dev(&hdev->hw, reg);
2635 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2636 		msleep(HCLGE_RESET_WATI_MS);
2637 		val = hclge_read_dev(&hdev->hw, reg);
2638 		cnt++;
2639 	}
2640 
2641 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
2642 		dev_warn(&hdev->pdev->dev,
2643 			 "Wait for reset timeout: %d\n", hdev->reset_type);
2644 		return -EBUSY;
2645 	}
2646 
2647 	return 0;
2648 }
2649 
2650 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2651 {
2652 	struct hclge_vf_rst_cmd *req;
2653 	struct hclge_desc desc;
2654 
2655 	req = (struct hclge_vf_rst_cmd *)desc.data;
2656 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2657 	req->dest_vfid = func_id;
2658 
2659 	if (reset)
2660 		req->vf_rst = 0x1;
2661 
2662 	return hclge_cmd_send(&hdev->hw, &desc, 1);
2663 }
2664 
2665 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2666 {
2667 	int i;
2668 
2669 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2670 		struct hclge_vport *vport = &hdev->vport[i];
2671 		int ret;
2672 
2673 		/* Send cmd to set/clear VF's FUNC_RST_ING */
2674 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2675 		if (ret) {
2676 			dev_err(&hdev->pdev->dev,
2677 				"set vf(%d) rst failed %d!\n",
2678 				vport->vport_id, ret);
2679 			return ret;
2680 		}
2681 
2682 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
2683 			continue;
2684 
2685 		/* Inform VF to process the reset.
2686 		 * hclge_inform_reset_assert_to_vf may fail if VF
2687 		 * driver is not loaded.
2688 		 */
2689 		ret = hclge_inform_reset_assert_to_vf(vport);
2690 		if (ret)
2691 			dev_warn(&hdev->pdev->dev,
2692 				 "inform reset to vf(%d) failed %d!\n",
2693 				 vport->vport_id, ret);
2694 	}
2695 
2696 	return 0;
2697 }
2698 
2699 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2700 {
2701 	struct hclge_desc desc;
2702 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2703 	int ret;
2704 
2705 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2706 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2707 	req->fun_reset_vfid = func_id;
2708 
2709 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2710 	if (ret)
2711 		dev_err(&hdev->pdev->dev,
2712 			"send function reset cmd fail, status =%d\n", ret);
2713 
2714 	return ret;
2715 }
2716 
2717 static void hclge_do_reset(struct hclge_dev *hdev)
2718 {
2719 	struct hnae3_handle *handle = &hdev->vport[0].nic;
2720 	struct pci_dev *pdev = hdev->pdev;
2721 	u32 val;
2722 
2723 	if (hclge_get_hw_reset_stat(handle)) {
2724 		dev_info(&pdev->dev, "Hardware reset not finish\n");
2725 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
2726 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
2727 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
2728 		return;
2729 	}
2730 
2731 	switch (hdev->reset_type) {
2732 	case HNAE3_GLOBAL_RESET:
2733 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2734 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2735 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2736 		dev_info(&pdev->dev, "Global Reset requested\n");
2737 		break;
2738 	case HNAE3_CORE_RESET:
2739 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2740 		hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2741 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2742 		dev_info(&pdev->dev, "Core Reset requested\n");
2743 		break;
2744 	case HNAE3_FUNC_RESET:
2745 		dev_info(&pdev->dev, "PF Reset requested\n");
2746 		/* schedule again to check later */
2747 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2748 		hclge_reset_task_schedule(hdev);
2749 		break;
2750 	case HNAE3_FLR_RESET:
2751 		dev_info(&pdev->dev, "FLR requested\n");
2752 		/* schedule again to check later */
2753 		set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2754 		hclge_reset_task_schedule(hdev);
2755 		break;
2756 	default:
2757 		dev_warn(&pdev->dev,
2758 			 "Unsupported reset type: %d\n", hdev->reset_type);
2759 		break;
2760 	}
2761 }
2762 
2763 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2764 						   unsigned long *addr)
2765 {
2766 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2767 
2768 	/* first, resolve any unknown reset type to the known type(s) */
2769 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2770 		/* we will intentionally ignore any errors from this function
2771 		 *  as we will end up in *some* reset request in any case
2772 		 */
2773 		hclge_handle_hw_msix_error(hdev, addr);
2774 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
2775 		/* We defered the clearing of the error event which caused
2776 		 * interrupt since it was not posssible to do that in
2777 		 * interrupt context (and this is the reason we introduced
2778 		 * new UNKNOWN reset type). Now, the errors have been
2779 		 * handled and cleared in hardware we can safely enable
2780 		 * interrupts. This is an exception to the norm.
2781 		 */
2782 		hclge_enable_vector(&hdev->misc_vector, true);
2783 	}
2784 
2785 	/* return the highest priority reset level amongst all */
2786 	if (test_bit(HNAE3_IMP_RESET, addr)) {
2787 		rst_level = HNAE3_IMP_RESET;
2788 		clear_bit(HNAE3_IMP_RESET, addr);
2789 		clear_bit(HNAE3_GLOBAL_RESET, addr);
2790 		clear_bit(HNAE3_CORE_RESET, addr);
2791 		clear_bit(HNAE3_FUNC_RESET, addr);
2792 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2793 		rst_level = HNAE3_GLOBAL_RESET;
2794 		clear_bit(HNAE3_GLOBAL_RESET, addr);
2795 		clear_bit(HNAE3_CORE_RESET, addr);
2796 		clear_bit(HNAE3_FUNC_RESET, addr);
2797 	} else if (test_bit(HNAE3_CORE_RESET, addr)) {
2798 		rst_level = HNAE3_CORE_RESET;
2799 		clear_bit(HNAE3_CORE_RESET, addr);
2800 		clear_bit(HNAE3_FUNC_RESET, addr);
2801 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2802 		rst_level = HNAE3_FUNC_RESET;
2803 		clear_bit(HNAE3_FUNC_RESET, addr);
2804 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
2805 		rst_level = HNAE3_FLR_RESET;
2806 		clear_bit(HNAE3_FLR_RESET, addr);
2807 	}
2808 
2809 	if (hdev->reset_type != HNAE3_NONE_RESET &&
2810 	    rst_level < hdev->reset_type)
2811 		return HNAE3_NONE_RESET;
2812 
2813 	return rst_level;
2814 }
2815 
2816 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2817 {
2818 	u32 clearval = 0;
2819 
2820 	switch (hdev->reset_type) {
2821 	case HNAE3_IMP_RESET:
2822 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2823 		break;
2824 	case HNAE3_GLOBAL_RESET:
2825 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2826 		break;
2827 	case HNAE3_CORE_RESET:
2828 		clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2829 		break;
2830 	default:
2831 		break;
2832 	}
2833 
2834 	if (!clearval)
2835 		return;
2836 
2837 	hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2838 	hclge_enable_vector(&hdev->misc_vector, true);
2839 }
2840 
2841 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2842 {
2843 	int ret = 0;
2844 
2845 	switch (hdev->reset_type) {
2846 	case HNAE3_FUNC_RESET:
2847 		/* fall through */
2848 	case HNAE3_FLR_RESET:
2849 		ret = hclge_set_all_vf_rst(hdev, true);
2850 		break;
2851 	default:
2852 		break;
2853 	}
2854 
2855 	return ret;
2856 }
2857 
2858 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2859 {
2860 	u32 reg_val;
2861 	int ret = 0;
2862 
2863 	switch (hdev->reset_type) {
2864 	case HNAE3_FUNC_RESET:
2865 		/* There is no mechanism for PF to know if VF has stopped IO
2866 		 * for now, just wait 100 ms for VF to stop IO
2867 		 */
2868 		msleep(100);
2869 		ret = hclge_func_reset_cmd(hdev, 0);
2870 		if (ret) {
2871 			dev_err(&hdev->pdev->dev,
2872 				"asserting function reset fail %d!\n", ret);
2873 			return ret;
2874 		}
2875 
2876 		/* After performaning pf reset, it is not necessary to do the
2877 		 * mailbox handling or send any command to firmware, because
2878 		 * any mailbox handling or command to firmware is only valid
2879 		 * after hclge_cmd_init is called.
2880 		 */
2881 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2882 		break;
2883 	case HNAE3_FLR_RESET:
2884 		/* There is no mechanism for PF to know if VF has stopped IO
2885 		 * for now, just wait 100 ms for VF to stop IO
2886 		 */
2887 		msleep(100);
2888 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2889 		set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2890 		break;
2891 	case HNAE3_IMP_RESET:
2892 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2893 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2894 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2895 		break;
2896 	default:
2897 		break;
2898 	}
2899 
2900 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2901 
2902 	return ret;
2903 }
2904 
2905 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2906 {
2907 #define MAX_RESET_FAIL_CNT 5
2908 #define RESET_UPGRADE_DELAY_SEC 10
2909 
2910 	if (hdev->reset_pending) {
2911 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2912 			 hdev->reset_pending);
2913 		return true;
2914 	} else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2915 		   (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2916 		    BIT(HCLGE_IMP_RESET_BIT))) {
2917 		dev_info(&hdev->pdev->dev,
2918 			 "reset failed because IMP Reset is pending\n");
2919 		hclge_clear_reset_cause(hdev);
2920 		return false;
2921 	} else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2922 		hdev->reset_fail_cnt++;
2923 		if (is_timeout) {
2924 			set_bit(hdev->reset_type, &hdev->reset_pending);
2925 			dev_info(&hdev->pdev->dev,
2926 				 "re-schedule to wait for hw reset done\n");
2927 			return true;
2928 		}
2929 
2930 		dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2931 		hclge_clear_reset_cause(hdev);
2932 		mod_timer(&hdev->reset_timer,
2933 			  jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2934 
2935 		return false;
2936 	}
2937 
2938 	hclge_clear_reset_cause(hdev);
2939 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
2940 	return false;
2941 }
2942 
2943 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2944 {
2945 	int ret = 0;
2946 
2947 	switch (hdev->reset_type) {
2948 	case HNAE3_FUNC_RESET:
2949 		/* fall through */
2950 	case HNAE3_FLR_RESET:
2951 		ret = hclge_set_all_vf_rst(hdev, false);
2952 		break;
2953 	default:
2954 		break;
2955 	}
2956 
2957 	return ret;
2958 }
2959 
2960 static void hclge_reset(struct hclge_dev *hdev)
2961 {
2962 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2963 	bool is_timeout = false;
2964 	int ret;
2965 
2966 	/* Initialize ae_dev reset status as well, in case enet layer wants to
2967 	 * know if device is undergoing reset
2968 	 */
2969 	ae_dev->reset_type = hdev->reset_type;
2970 	hdev->reset_count++;
2971 	/* perform reset of the stack & ae device for a client */
2972 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2973 	if (ret)
2974 		goto err_reset;
2975 
2976 	ret = hclge_reset_prepare_down(hdev);
2977 	if (ret)
2978 		goto err_reset;
2979 
2980 	rtnl_lock();
2981 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2982 	if (ret)
2983 		goto err_reset_lock;
2984 
2985 	rtnl_unlock();
2986 
2987 	ret = hclge_reset_prepare_wait(hdev);
2988 	if (ret)
2989 		goto err_reset;
2990 
2991 	if (hclge_reset_wait(hdev)) {
2992 		is_timeout = true;
2993 		goto err_reset;
2994 	}
2995 
2996 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2997 	if (ret)
2998 		goto err_reset;
2999 
3000 	rtnl_lock();
3001 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3002 	if (ret)
3003 		goto err_reset_lock;
3004 
3005 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3006 	if (ret)
3007 		goto err_reset_lock;
3008 
3009 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3010 	if (ret)
3011 		goto err_reset_lock;
3012 
3013 	ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3014 	if (ret)
3015 		goto err_reset_lock;
3016 
3017 	hclge_clear_reset_cause(hdev);
3018 
3019 	ret = hclge_reset_prepare_up(hdev);
3020 	if (ret)
3021 		goto err_reset_lock;
3022 
3023 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3024 	if (ret)
3025 		goto err_reset_lock;
3026 
3027 	rtnl_unlock();
3028 
3029 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3030 	if (ret)
3031 		goto err_reset;
3032 
3033 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3034 	if (ret)
3035 		goto err_reset;
3036 
3037 	hdev->last_reset_time = jiffies;
3038 	hdev->reset_fail_cnt = 0;
3039 	ae_dev->reset_type = HNAE3_NONE_RESET;
3040 	del_timer(&hdev->reset_timer);
3041 
3042 	return;
3043 
3044 err_reset_lock:
3045 	rtnl_unlock();
3046 err_reset:
3047 	if (hclge_reset_err_handle(hdev, is_timeout))
3048 		hclge_reset_task_schedule(hdev);
3049 }
3050 
3051 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3052 {
3053 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3054 	struct hclge_dev *hdev = ae_dev->priv;
3055 
3056 	/* We might end up getting called broadly because of 2 below cases:
3057 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3058 	 *    normalcy is to reset.
3059 	 * 2. A new reset request from the stack due to timeout
3060 	 *
3061 	 * For the first case,error event might not have ae handle available.
3062 	 * check if this is a new reset request and we are not here just because
3063 	 * last reset attempt did not succeed and watchdog hit us again. We will
3064 	 * know this if last reset request did not occur very recently (watchdog
3065 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3066 	 * In case of new request we reset the "reset level" to PF reset.
3067 	 * And if it is a repeat reset request of the most recent one then we
3068 	 * want to make sure we throttle the reset request. Therefore, we will
3069 	 * not allow it again before 3*HZ times.
3070 	 */
3071 	if (!handle)
3072 		handle = &hdev->vport[0].nic;
3073 
3074 	if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3075 		return;
3076 	else if (hdev->default_reset_request)
3077 		hdev->reset_level =
3078 			hclge_get_reset_level(hdev,
3079 					      &hdev->default_reset_request);
3080 	else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3081 		hdev->reset_level = HNAE3_FUNC_RESET;
3082 
3083 	dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3084 		 hdev->reset_level);
3085 
3086 	/* request reset & schedule reset task */
3087 	set_bit(hdev->reset_level, &hdev->reset_request);
3088 	hclge_reset_task_schedule(hdev);
3089 
3090 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3091 		hdev->reset_level++;
3092 }
3093 
3094 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3095 					enum hnae3_reset_type rst_type)
3096 {
3097 	struct hclge_dev *hdev = ae_dev->priv;
3098 
3099 	set_bit(rst_type, &hdev->default_reset_request);
3100 }
3101 
3102 static void hclge_reset_timer(struct timer_list *t)
3103 {
3104 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3105 
3106 	dev_info(&hdev->pdev->dev,
3107 		 "triggering global reset in reset timer\n");
3108 	set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3109 	hclge_reset_event(hdev->pdev, NULL);
3110 }
3111 
3112 static void hclge_reset_subtask(struct hclge_dev *hdev)
3113 {
3114 	/* check if there is any ongoing reset in the hardware. This status can
3115 	 * be checked from reset_pending. If there is then, we need to wait for
3116 	 * hardware to complete reset.
3117 	 *    a. If we are able to figure out in reasonable time that hardware
3118 	 *       has fully resetted then, we can proceed with driver, client
3119 	 *       reset.
3120 	 *    b. else, we can come back later to check this status so re-sched
3121 	 *       now.
3122 	 */
3123 	hdev->last_reset_time = jiffies;
3124 	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3125 	if (hdev->reset_type != HNAE3_NONE_RESET)
3126 		hclge_reset(hdev);
3127 
3128 	/* check if we got any *new* reset requests to be honored */
3129 	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3130 	if (hdev->reset_type != HNAE3_NONE_RESET)
3131 		hclge_do_reset(hdev);
3132 
3133 	hdev->reset_type = HNAE3_NONE_RESET;
3134 }
3135 
3136 static void hclge_reset_service_task(struct work_struct *work)
3137 {
3138 	struct hclge_dev *hdev =
3139 		container_of(work, struct hclge_dev, rst_service_task);
3140 
3141 	if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3142 		return;
3143 
3144 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3145 
3146 	hclge_reset_subtask(hdev);
3147 
3148 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3149 }
3150 
3151 static void hclge_mailbox_service_task(struct work_struct *work)
3152 {
3153 	struct hclge_dev *hdev =
3154 		container_of(work, struct hclge_dev, mbx_service_task);
3155 
3156 	if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3157 		return;
3158 
3159 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3160 
3161 	hclge_mbx_handler(hdev);
3162 
3163 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3164 }
3165 
3166 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3167 {
3168 	int i;
3169 
3170 	/* start from vport 1 for PF is always alive */
3171 	for (i = 1; i < hdev->num_alloc_vport; i++) {
3172 		struct hclge_vport *vport = &hdev->vport[i];
3173 
3174 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3175 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3176 
3177 		/* If vf is not alive, set to default value */
3178 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3179 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3180 	}
3181 }
3182 
3183 static void hclge_service_task(struct work_struct *work)
3184 {
3185 	struct hclge_dev *hdev =
3186 		container_of(work, struct hclge_dev, service_task);
3187 
3188 	if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3189 		hclge_update_stats_for_all(hdev);
3190 		hdev->hw_stats.stats_timer = 0;
3191 	}
3192 
3193 	hclge_update_speed_duplex(hdev);
3194 	hclge_update_link_status(hdev);
3195 	hclge_update_vport_alive(hdev);
3196 	hclge_service_complete(hdev);
3197 }
3198 
3199 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3200 {
3201 	/* VF handle has no client */
3202 	if (!handle->client)
3203 		return container_of(handle, struct hclge_vport, nic);
3204 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
3205 		return container_of(handle, struct hclge_vport, roce);
3206 	else
3207 		return container_of(handle, struct hclge_vport, nic);
3208 }
3209 
3210 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3211 			    struct hnae3_vector_info *vector_info)
3212 {
3213 	struct hclge_vport *vport = hclge_get_vport(handle);
3214 	struct hnae3_vector_info *vector = vector_info;
3215 	struct hclge_dev *hdev = vport->back;
3216 	int alloc = 0;
3217 	int i, j;
3218 
3219 	vector_num = min(hdev->num_msi_left, vector_num);
3220 
3221 	for (j = 0; j < vector_num; j++) {
3222 		for (i = 1; i < hdev->num_msi; i++) {
3223 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3224 				vector->vector = pci_irq_vector(hdev->pdev, i);
3225 				vector->io_addr = hdev->hw.io_base +
3226 					HCLGE_VECTOR_REG_BASE +
3227 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
3228 					vport->vport_id *
3229 					HCLGE_VECTOR_VF_OFFSET;
3230 				hdev->vector_status[i] = vport->vport_id;
3231 				hdev->vector_irq[i] = vector->vector;
3232 
3233 				vector++;
3234 				alloc++;
3235 
3236 				break;
3237 			}
3238 		}
3239 	}
3240 	hdev->num_msi_left -= alloc;
3241 	hdev->num_msi_used += alloc;
3242 
3243 	return alloc;
3244 }
3245 
3246 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3247 {
3248 	int i;
3249 
3250 	for (i = 0; i < hdev->num_msi; i++)
3251 		if (vector == hdev->vector_irq[i])
3252 			return i;
3253 
3254 	return -EINVAL;
3255 }
3256 
3257 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3258 {
3259 	struct hclge_vport *vport = hclge_get_vport(handle);
3260 	struct hclge_dev *hdev = vport->back;
3261 	int vector_id;
3262 
3263 	vector_id = hclge_get_vector_index(hdev, vector);
3264 	if (vector_id < 0) {
3265 		dev_err(&hdev->pdev->dev,
3266 			"Get vector index fail. vector_id =%d\n", vector_id);
3267 		return vector_id;
3268 	}
3269 
3270 	hclge_free_vector(hdev, vector_id);
3271 
3272 	return 0;
3273 }
3274 
3275 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3276 {
3277 	return HCLGE_RSS_KEY_SIZE;
3278 }
3279 
3280 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3281 {
3282 	return HCLGE_RSS_IND_TBL_SIZE;
3283 }
3284 
3285 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3286 				  const u8 hfunc, const u8 *key)
3287 {
3288 	struct hclge_rss_config_cmd *req;
3289 	struct hclge_desc desc;
3290 	int key_offset;
3291 	int key_size;
3292 	int ret;
3293 
3294 	req = (struct hclge_rss_config_cmd *)desc.data;
3295 
3296 	for (key_offset = 0; key_offset < 3; key_offset++) {
3297 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3298 					   false);
3299 
3300 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3301 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3302 
3303 		if (key_offset == 2)
3304 			key_size =
3305 			HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3306 		else
3307 			key_size = HCLGE_RSS_HASH_KEY_NUM;
3308 
3309 		memcpy(req->hash_key,
3310 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3311 
3312 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3313 		if (ret) {
3314 			dev_err(&hdev->pdev->dev,
3315 				"Configure RSS config fail, status = %d\n",
3316 				ret);
3317 			return ret;
3318 		}
3319 	}
3320 	return 0;
3321 }
3322 
3323 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3324 {
3325 	struct hclge_rss_indirection_table_cmd *req;
3326 	struct hclge_desc desc;
3327 	int i, j;
3328 	int ret;
3329 
3330 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3331 
3332 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3333 		hclge_cmd_setup_basic_desc
3334 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3335 
3336 		req->start_table_index =
3337 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3338 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3339 
3340 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3341 			req->rss_result[j] =
3342 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3343 
3344 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3345 		if (ret) {
3346 			dev_err(&hdev->pdev->dev,
3347 				"Configure rss indir table fail,status = %d\n",
3348 				ret);
3349 			return ret;
3350 		}
3351 	}
3352 	return 0;
3353 }
3354 
3355 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3356 				 u16 *tc_size, u16 *tc_offset)
3357 {
3358 	struct hclge_rss_tc_mode_cmd *req;
3359 	struct hclge_desc desc;
3360 	int ret;
3361 	int i;
3362 
3363 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3364 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3365 
3366 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3367 		u16 mode = 0;
3368 
3369 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3370 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3371 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3372 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3373 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3374 
3375 		req->rss_tc_mode[i] = cpu_to_le16(mode);
3376 	}
3377 
3378 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3379 	if (ret)
3380 		dev_err(&hdev->pdev->dev,
3381 			"Configure rss tc mode fail, status = %d\n", ret);
3382 
3383 	return ret;
3384 }
3385 
3386 static void hclge_get_rss_type(struct hclge_vport *vport)
3387 {
3388 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
3389 	    vport->rss_tuple_sets.ipv4_udp_en ||
3390 	    vport->rss_tuple_sets.ipv4_sctp_en ||
3391 	    vport->rss_tuple_sets.ipv6_tcp_en ||
3392 	    vport->rss_tuple_sets.ipv6_udp_en ||
3393 	    vport->rss_tuple_sets.ipv6_sctp_en)
3394 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3395 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3396 		 vport->rss_tuple_sets.ipv6_fragment_en)
3397 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3398 	else
3399 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3400 }
3401 
3402 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3403 {
3404 	struct hclge_rss_input_tuple_cmd *req;
3405 	struct hclge_desc desc;
3406 	int ret;
3407 
3408 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3409 
3410 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3411 
3412 	/* Get the tuple cfg from pf */
3413 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3414 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3415 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3416 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3417 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3418 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3419 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3420 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3421 	hclge_get_rss_type(&hdev->vport[0]);
3422 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3423 	if (ret)
3424 		dev_err(&hdev->pdev->dev,
3425 			"Configure rss input fail, status = %d\n", ret);
3426 	return ret;
3427 }
3428 
3429 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3430 			 u8 *key, u8 *hfunc)
3431 {
3432 	struct hclge_vport *vport = hclge_get_vport(handle);
3433 	int i;
3434 
3435 	/* Get hash algorithm */
3436 	if (hfunc) {
3437 		switch (vport->rss_algo) {
3438 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3439 			*hfunc = ETH_RSS_HASH_TOP;
3440 			break;
3441 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
3442 			*hfunc = ETH_RSS_HASH_XOR;
3443 			break;
3444 		default:
3445 			*hfunc = ETH_RSS_HASH_UNKNOWN;
3446 			break;
3447 		}
3448 	}
3449 
3450 	/* Get the RSS Key required by the user */
3451 	if (key)
3452 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3453 
3454 	/* Get indirect table */
3455 	if (indir)
3456 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3457 			indir[i] =  vport->rss_indirection_tbl[i];
3458 
3459 	return 0;
3460 }
3461 
3462 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3463 			 const  u8 *key, const  u8 hfunc)
3464 {
3465 	struct hclge_vport *vport = hclge_get_vport(handle);
3466 	struct hclge_dev *hdev = vport->back;
3467 	u8 hash_algo;
3468 	int ret, i;
3469 
3470 	/* Set the RSS Hash Key if specififed by the user */
3471 	if (key) {
3472 		switch (hfunc) {
3473 		case ETH_RSS_HASH_TOP:
3474 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3475 			break;
3476 		case ETH_RSS_HASH_XOR:
3477 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3478 			break;
3479 		case ETH_RSS_HASH_NO_CHANGE:
3480 			hash_algo = vport->rss_algo;
3481 			break;
3482 		default:
3483 			return -EINVAL;
3484 		}
3485 
3486 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3487 		if (ret)
3488 			return ret;
3489 
3490 		/* Update the shadow RSS key with user specified qids */
3491 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3492 		vport->rss_algo = hash_algo;
3493 	}
3494 
3495 	/* Update the shadow RSS table with user specified qids */
3496 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3497 		vport->rss_indirection_tbl[i] = indir[i];
3498 
3499 	/* Update the hardware */
3500 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3501 }
3502 
3503 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3504 {
3505 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3506 
3507 	if (nfc->data & RXH_L4_B_2_3)
3508 		hash_sets |= HCLGE_D_PORT_BIT;
3509 	else
3510 		hash_sets &= ~HCLGE_D_PORT_BIT;
3511 
3512 	if (nfc->data & RXH_IP_SRC)
3513 		hash_sets |= HCLGE_S_IP_BIT;
3514 	else
3515 		hash_sets &= ~HCLGE_S_IP_BIT;
3516 
3517 	if (nfc->data & RXH_IP_DST)
3518 		hash_sets |= HCLGE_D_IP_BIT;
3519 	else
3520 		hash_sets &= ~HCLGE_D_IP_BIT;
3521 
3522 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3523 		hash_sets |= HCLGE_V_TAG_BIT;
3524 
3525 	return hash_sets;
3526 }
3527 
3528 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3529 			       struct ethtool_rxnfc *nfc)
3530 {
3531 	struct hclge_vport *vport = hclge_get_vport(handle);
3532 	struct hclge_dev *hdev = vport->back;
3533 	struct hclge_rss_input_tuple_cmd *req;
3534 	struct hclge_desc desc;
3535 	u8 tuple_sets;
3536 	int ret;
3537 
3538 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3539 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
3540 		return -EINVAL;
3541 
3542 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3543 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3544 
3545 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3546 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3547 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3548 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3549 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3550 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3551 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3552 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3553 
3554 	tuple_sets = hclge_get_rss_hash_bits(nfc);
3555 	switch (nfc->flow_type) {
3556 	case TCP_V4_FLOW:
3557 		req->ipv4_tcp_en = tuple_sets;
3558 		break;
3559 	case TCP_V6_FLOW:
3560 		req->ipv6_tcp_en = tuple_sets;
3561 		break;
3562 	case UDP_V4_FLOW:
3563 		req->ipv4_udp_en = tuple_sets;
3564 		break;
3565 	case UDP_V6_FLOW:
3566 		req->ipv6_udp_en = tuple_sets;
3567 		break;
3568 	case SCTP_V4_FLOW:
3569 		req->ipv4_sctp_en = tuple_sets;
3570 		break;
3571 	case SCTP_V6_FLOW:
3572 		if ((nfc->data & RXH_L4_B_0_1) ||
3573 		    (nfc->data & RXH_L4_B_2_3))
3574 			return -EINVAL;
3575 
3576 		req->ipv6_sctp_en = tuple_sets;
3577 		break;
3578 	case IPV4_FLOW:
3579 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3580 		break;
3581 	case IPV6_FLOW:
3582 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3583 		break;
3584 	default:
3585 		return -EINVAL;
3586 	}
3587 
3588 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3589 	if (ret) {
3590 		dev_err(&hdev->pdev->dev,
3591 			"Set rss tuple fail, status = %d\n", ret);
3592 		return ret;
3593 	}
3594 
3595 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3596 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3597 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3598 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3599 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3600 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3601 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3602 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3603 	hclge_get_rss_type(vport);
3604 	return 0;
3605 }
3606 
3607 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3608 			       struct ethtool_rxnfc *nfc)
3609 {
3610 	struct hclge_vport *vport = hclge_get_vport(handle);
3611 	u8 tuple_sets;
3612 
3613 	nfc->data = 0;
3614 
3615 	switch (nfc->flow_type) {
3616 	case TCP_V4_FLOW:
3617 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3618 		break;
3619 	case UDP_V4_FLOW:
3620 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3621 		break;
3622 	case TCP_V6_FLOW:
3623 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3624 		break;
3625 	case UDP_V6_FLOW:
3626 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3627 		break;
3628 	case SCTP_V4_FLOW:
3629 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3630 		break;
3631 	case SCTP_V6_FLOW:
3632 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3633 		break;
3634 	case IPV4_FLOW:
3635 	case IPV6_FLOW:
3636 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3637 		break;
3638 	default:
3639 		return -EINVAL;
3640 	}
3641 
3642 	if (!tuple_sets)
3643 		return 0;
3644 
3645 	if (tuple_sets & HCLGE_D_PORT_BIT)
3646 		nfc->data |= RXH_L4_B_2_3;
3647 	if (tuple_sets & HCLGE_S_PORT_BIT)
3648 		nfc->data |= RXH_L4_B_0_1;
3649 	if (tuple_sets & HCLGE_D_IP_BIT)
3650 		nfc->data |= RXH_IP_DST;
3651 	if (tuple_sets & HCLGE_S_IP_BIT)
3652 		nfc->data |= RXH_IP_SRC;
3653 
3654 	return 0;
3655 }
3656 
3657 static int hclge_get_tc_size(struct hnae3_handle *handle)
3658 {
3659 	struct hclge_vport *vport = hclge_get_vport(handle);
3660 	struct hclge_dev *hdev = vport->back;
3661 
3662 	return hdev->rss_size_max;
3663 }
3664 
3665 int hclge_rss_init_hw(struct hclge_dev *hdev)
3666 {
3667 	struct hclge_vport *vport = hdev->vport;
3668 	u8 *rss_indir = vport[0].rss_indirection_tbl;
3669 	u16 rss_size = vport[0].alloc_rss_size;
3670 	u8 *key = vport[0].rss_hash_key;
3671 	u8 hfunc = vport[0].rss_algo;
3672 	u16 tc_offset[HCLGE_MAX_TC_NUM];
3673 	u16 tc_valid[HCLGE_MAX_TC_NUM];
3674 	u16 tc_size[HCLGE_MAX_TC_NUM];
3675 	u16 roundup_size;
3676 	int i, ret;
3677 
3678 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
3679 	if (ret)
3680 		return ret;
3681 
3682 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3683 	if (ret)
3684 		return ret;
3685 
3686 	ret = hclge_set_rss_input_tuple(hdev);
3687 	if (ret)
3688 		return ret;
3689 
3690 	/* Each TC have the same queue size, and tc_size set to hardware is
3691 	 * the log2 of roundup power of two of rss_size, the acutal queue
3692 	 * size is limited by indirection table.
3693 	 */
3694 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3695 		dev_err(&hdev->pdev->dev,
3696 			"Configure rss tc size failed, invalid TC_SIZE = %d\n",
3697 			rss_size);
3698 		return -EINVAL;
3699 	}
3700 
3701 	roundup_size = roundup_pow_of_two(rss_size);
3702 	roundup_size = ilog2(roundup_size);
3703 
3704 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3705 		tc_valid[i] = 0;
3706 
3707 		if (!(hdev->hw_tc_map & BIT(i)))
3708 			continue;
3709 
3710 		tc_valid[i] = 1;
3711 		tc_size[i] = roundup_size;
3712 		tc_offset[i] = rss_size * i;
3713 	}
3714 
3715 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3716 }
3717 
3718 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3719 {
3720 	struct hclge_vport *vport = hdev->vport;
3721 	int i, j;
3722 
3723 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3724 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3725 			vport[j].rss_indirection_tbl[i] =
3726 				i % vport[j].alloc_rss_size;
3727 	}
3728 }
3729 
3730 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3731 {
3732 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3733 	struct hclge_vport *vport = hdev->vport;
3734 
3735 	if (hdev->pdev->revision >= 0x21)
3736 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3737 
3738 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3739 		vport[i].rss_tuple_sets.ipv4_tcp_en =
3740 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3741 		vport[i].rss_tuple_sets.ipv4_udp_en =
3742 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3743 		vport[i].rss_tuple_sets.ipv4_sctp_en =
3744 			HCLGE_RSS_INPUT_TUPLE_SCTP;
3745 		vport[i].rss_tuple_sets.ipv4_fragment_en =
3746 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3747 		vport[i].rss_tuple_sets.ipv6_tcp_en =
3748 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3749 		vport[i].rss_tuple_sets.ipv6_udp_en =
3750 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3751 		vport[i].rss_tuple_sets.ipv6_sctp_en =
3752 			HCLGE_RSS_INPUT_TUPLE_SCTP;
3753 		vport[i].rss_tuple_sets.ipv6_fragment_en =
3754 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3755 
3756 		vport[i].rss_algo = rss_algo;
3757 
3758 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
3759 		       HCLGE_RSS_KEY_SIZE);
3760 	}
3761 
3762 	hclge_rss_indir_init_cfg(hdev);
3763 }
3764 
3765 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3766 				int vector_id, bool en,
3767 				struct hnae3_ring_chain_node *ring_chain)
3768 {
3769 	struct hclge_dev *hdev = vport->back;
3770 	struct hnae3_ring_chain_node *node;
3771 	struct hclge_desc desc;
3772 	struct hclge_ctrl_vector_chain_cmd *req
3773 		= (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3774 	enum hclge_cmd_status status;
3775 	enum hclge_opcode_type op;
3776 	u16 tqp_type_and_id;
3777 	int i;
3778 
3779 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3780 	hclge_cmd_setup_basic_desc(&desc, op, false);
3781 	req->int_vector_id = vector_id;
3782 
3783 	i = 0;
3784 	for (node = ring_chain; node; node = node->next) {
3785 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3786 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
3787 				HCLGE_INT_TYPE_S,
3788 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3789 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3790 				HCLGE_TQP_ID_S, node->tqp_index);
3791 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3792 				HCLGE_INT_GL_IDX_S,
3793 				hnae3_get_field(node->int_gl_idx,
3794 						HNAE3_RING_GL_IDX_M,
3795 						HNAE3_RING_GL_IDX_S));
3796 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3797 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3798 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3799 			req->vfid = vport->vport_id;
3800 
3801 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
3802 			if (status) {
3803 				dev_err(&hdev->pdev->dev,
3804 					"Map TQP fail, status is %d.\n",
3805 					status);
3806 				return -EIO;
3807 			}
3808 			i = 0;
3809 
3810 			hclge_cmd_setup_basic_desc(&desc,
3811 						   op,
3812 						   false);
3813 			req->int_vector_id = vector_id;
3814 		}
3815 	}
3816 
3817 	if (i > 0) {
3818 		req->int_cause_num = i;
3819 		req->vfid = vport->vport_id;
3820 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
3821 		if (status) {
3822 			dev_err(&hdev->pdev->dev,
3823 				"Map TQP fail, status is %d.\n", status);
3824 			return -EIO;
3825 		}
3826 	}
3827 
3828 	return 0;
3829 }
3830 
3831 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3832 				    int vector,
3833 				    struct hnae3_ring_chain_node *ring_chain)
3834 {
3835 	struct hclge_vport *vport = hclge_get_vport(handle);
3836 	struct hclge_dev *hdev = vport->back;
3837 	int vector_id;
3838 
3839 	vector_id = hclge_get_vector_index(hdev, vector);
3840 	if (vector_id < 0) {
3841 		dev_err(&hdev->pdev->dev,
3842 			"Get vector index fail. vector_id =%d\n", vector_id);
3843 		return vector_id;
3844 	}
3845 
3846 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3847 }
3848 
3849 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3850 				       int vector,
3851 				       struct hnae3_ring_chain_node *ring_chain)
3852 {
3853 	struct hclge_vport *vport = hclge_get_vport(handle);
3854 	struct hclge_dev *hdev = vport->back;
3855 	int vector_id, ret;
3856 
3857 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3858 		return 0;
3859 
3860 	vector_id = hclge_get_vector_index(hdev, vector);
3861 	if (vector_id < 0) {
3862 		dev_err(&handle->pdev->dev,
3863 			"Get vector index fail. ret =%d\n", vector_id);
3864 		return vector_id;
3865 	}
3866 
3867 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3868 	if (ret)
3869 		dev_err(&handle->pdev->dev,
3870 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3871 			vector_id,
3872 			ret);
3873 
3874 	return ret;
3875 }
3876 
3877 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3878 			       struct hclge_promisc_param *param)
3879 {
3880 	struct hclge_promisc_cfg_cmd *req;
3881 	struct hclge_desc desc;
3882 	int ret;
3883 
3884 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3885 
3886 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
3887 	req->vf_id = param->vf_id;
3888 
3889 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3890 	 * pdev revision(0x20), new revision support them. The
3891 	 * value of this two fields will not return error when driver
3892 	 * send command to fireware in revision(0x20).
3893 	 */
3894 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3895 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3896 
3897 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3898 	if (ret)
3899 		dev_err(&hdev->pdev->dev,
3900 			"Set promisc mode fail, status is %d.\n", ret);
3901 
3902 	return ret;
3903 }
3904 
3905 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3906 			      bool en_mc, bool en_bc, int vport_id)
3907 {
3908 	if (!param)
3909 		return;
3910 
3911 	memset(param, 0, sizeof(struct hclge_promisc_param));
3912 	if (en_uc)
3913 		param->enable = HCLGE_PROMISC_EN_UC;
3914 	if (en_mc)
3915 		param->enable |= HCLGE_PROMISC_EN_MC;
3916 	if (en_bc)
3917 		param->enable |= HCLGE_PROMISC_EN_BC;
3918 	param->vf_id = vport_id;
3919 }
3920 
3921 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3922 				  bool en_mc_pmc)
3923 {
3924 	struct hclge_vport *vport = hclge_get_vport(handle);
3925 	struct hclge_dev *hdev = vport->back;
3926 	struct hclge_promisc_param param;
3927 	bool en_bc_pmc = true;
3928 
3929 	/* For revision 0x20, if broadcast promisc enabled, vlan filter is
3930 	 * always bypassed. So broadcast promisc should be disabled until
3931 	 * user enable promisc mode
3932 	 */
3933 	if (handle->pdev->revision == 0x20)
3934 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
3935 
3936 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
3937 				 vport->vport_id);
3938 	return hclge_cmd_set_promisc_mode(hdev, &param);
3939 }
3940 
3941 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3942 {
3943 	struct hclge_get_fd_mode_cmd *req;
3944 	struct hclge_desc desc;
3945 	int ret;
3946 
3947 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3948 
3949 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
3950 
3951 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3952 	if (ret) {
3953 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3954 		return ret;
3955 	}
3956 
3957 	*fd_mode = req->mode;
3958 
3959 	return ret;
3960 }
3961 
3962 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3963 				   u32 *stage1_entry_num,
3964 				   u32 *stage2_entry_num,
3965 				   u16 *stage1_counter_num,
3966 				   u16 *stage2_counter_num)
3967 {
3968 	struct hclge_get_fd_allocation_cmd *req;
3969 	struct hclge_desc desc;
3970 	int ret;
3971 
3972 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3973 
3974 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3975 
3976 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3977 	if (ret) {
3978 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3979 			ret);
3980 		return ret;
3981 	}
3982 
3983 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3984 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3985 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3986 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3987 
3988 	return ret;
3989 }
3990 
3991 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3992 {
3993 	struct hclge_set_fd_key_config_cmd *req;
3994 	struct hclge_fd_key_cfg *stage;
3995 	struct hclge_desc desc;
3996 	int ret;
3997 
3998 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3999 
4000 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4001 	stage = &hdev->fd_cfg.key_cfg[stage_num];
4002 	req->stage = stage_num;
4003 	req->key_select = stage->key_sel;
4004 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4005 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4006 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4007 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4008 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4009 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4010 
4011 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4012 	if (ret)
4013 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4014 
4015 	return ret;
4016 }
4017 
4018 static int hclge_init_fd_config(struct hclge_dev *hdev)
4019 {
4020 #define LOW_2_WORDS		0x03
4021 	struct hclge_fd_key_cfg *key_cfg;
4022 	int ret;
4023 
4024 	if (!hnae3_dev_fd_supported(hdev))
4025 		return 0;
4026 
4027 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4028 	if (ret)
4029 		return ret;
4030 
4031 	switch (hdev->fd_cfg.fd_mode) {
4032 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4033 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4034 		break;
4035 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4036 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4037 		break;
4038 	default:
4039 		dev_err(&hdev->pdev->dev,
4040 			"Unsupported flow director mode %d\n",
4041 			hdev->fd_cfg.fd_mode);
4042 		return -EOPNOTSUPP;
4043 	}
4044 
4045 	hdev->fd_cfg.proto_support =
4046 		TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4047 		UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4048 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4049 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4050 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4051 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4052 	key_cfg->outer_sipv6_word_en = 0;
4053 	key_cfg->outer_dipv6_word_en = 0;
4054 
4055 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4056 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4057 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4058 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4059 
4060 	/* If use max 400bit key, we can support tuples for ether type */
4061 	if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4062 		hdev->fd_cfg.proto_support |= ETHER_FLOW;
4063 		key_cfg->tuple_active |=
4064 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4065 	}
4066 
4067 	/* roce_type is used to filter roce frames
4068 	 * dst_vport is used to specify the rule
4069 	 */
4070 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4071 
4072 	ret = hclge_get_fd_allocation(hdev,
4073 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4074 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4075 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4076 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4077 	if (ret)
4078 		return ret;
4079 
4080 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4081 }
4082 
4083 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4084 				int loc, u8 *key, bool is_add)
4085 {
4086 	struct hclge_fd_tcam_config_1_cmd *req1;
4087 	struct hclge_fd_tcam_config_2_cmd *req2;
4088 	struct hclge_fd_tcam_config_3_cmd *req3;
4089 	struct hclge_desc desc[3];
4090 	int ret;
4091 
4092 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4093 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4094 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4095 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4096 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4097 
4098 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4099 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4100 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4101 
4102 	req1->stage = stage;
4103 	req1->xy_sel = sel_x ? 1 : 0;
4104 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4105 	req1->index = cpu_to_le32(loc);
4106 	req1->entry_vld = sel_x ? is_add : 0;
4107 
4108 	if (key) {
4109 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4110 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4111 		       sizeof(req2->tcam_data));
4112 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4113 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4114 	}
4115 
4116 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
4117 	if (ret)
4118 		dev_err(&hdev->pdev->dev,
4119 			"config tcam key fail, ret=%d\n",
4120 			ret);
4121 
4122 	return ret;
4123 }
4124 
4125 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4126 			      struct hclge_fd_ad_data *action)
4127 {
4128 	struct hclge_fd_ad_config_cmd *req;
4129 	struct hclge_desc desc;
4130 	u64 ad_data = 0;
4131 	int ret;
4132 
4133 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4134 
4135 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
4136 	req->index = cpu_to_le32(loc);
4137 	req->stage = stage;
4138 
4139 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4140 		      action->write_rule_id_to_bd);
4141 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4142 			action->rule_id);
4143 	ad_data <<= 32;
4144 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4145 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4146 		      action->forward_to_direct_queue);
4147 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4148 			action->queue_id);
4149 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4150 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4151 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4152 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4153 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4154 			action->counter_id);
4155 
4156 	req->ad_data = cpu_to_le64(ad_data);
4157 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4158 	if (ret)
4159 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4160 
4161 	return ret;
4162 }
4163 
4164 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4165 				   struct hclge_fd_rule *rule)
4166 {
4167 	u16 tmp_x_s, tmp_y_s;
4168 	u32 tmp_x_l, tmp_y_l;
4169 	int i;
4170 
4171 	if (rule->unused_tuple & tuple_bit)
4172 		return true;
4173 
4174 	switch (tuple_bit) {
4175 	case 0:
4176 		return false;
4177 	case BIT(INNER_DST_MAC):
4178 		for (i = 0; i < 6; i++) {
4179 			calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4180 			       rule->tuples_mask.dst_mac[i]);
4181 			calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4182 			       rule->tuples_mask.dst_mac[i]);
4183 		}
4184 
4185 		return true;
4186 	case BIT(INNER_SRC_MAC):
4187 		for (i = 0; i < 6; i++) {
4188 			calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4189 			       rule->tuples.src_mac[i]);
4190 			calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4191 			       rule->tuples.src_mac[i]);
4192 		}
4193 
4194 		return true;
4195 	case BIT(INNER_VLAN_TAG_FST):
4196 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4197 		       rule->tuples_mask.vlan_tag1);
4198 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4199 		       rule->tuples_mask.vlan_tag1);
4200 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4201 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4202 
4203 		return true;
4204 	case BIT(INNER_ETH_TYPE):
4205 		calc_x(tmp_x_s, rule->tuples.ether_proto,
4206 		       rule->tuples_mask.ether_proto);
4207 		calc_y(tmp_y_s, rule->tuples.ether_proto,
4208 		       rule->tuples_mask.ether_proto);
4209 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4210 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4211 
4212 		return true;
4213 	case BIT(INNER_IP_TOS):
4214 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4215 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4216 
4217 		return true;
4218 	case BIT(INNER_IP_PROTO):
4219 		calc_x(*key_x, rule->tuples.ip_proto,
4220 		       rule->tuples_mask.ip_proto);
4221 		calc_y(*key_y, rule->tuples.ip_proto,
4222 		       rule->tuples_mask.ip_proto);
4223 
4224 		return true;
4225 	case BIT(INNER_SRC_IP):
4226 		calc_x(tmp_x_l, rule->tuples.src_ip[3],
4227 		       rule->tuples_mask.src_ip[3]);
4228 		calc_y(tmp_y_l, rule->tuples.src_ip[3],
4229 		       rule->tuples_mask.src_ip[3]);
4230 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4231 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4232 
4233 		return true;
4234 	case BIT(INNER_DST_IP):
4235 		calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4236 		       rule->tuples_mask.dst_ip[3]);
4237 		calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4238 		       rule->tuples_mask.dst_ip[3]);
4239 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4240 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4241 
4242 		return true;
4243 	case BIT(INNER_SRC_PORT):
4244 		calc_x(tmp_x_s, rule->tuples.src_port,
4245 		       rule->tuples_mask.src_port);
4246 		calc_y(tmp_y_s, rule->tuples.src_port,
4247 		       rule->tuples_mask.src_port);
4248 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4249 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4250 
4251 		return true;
4252 	case BIT(INNER_DST_PORT):
4253 		calc_x(tmp_x_s, rule->tuples.dst_port,
4254 		       rule->tuples_mask.dst_port);
4255 		calc_y(tmp_y_s, rule->tuples.dst_port,
4256 		       rule->tuples_mask.dst_port);
4257 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4258 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4259 
4260 		return true;
4261 	default:
4262 		return false;
4263 	}
4264 }
4265 
4266 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4267 				 u8 vf_id, u8 network_port_id)
4268 {
4269 	u32 port_number = 0;
4270 
4271 	if (port_type == HOST_PORT) {
4272 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4273 				pf_id);
4274 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4275 				vf_id);
4276 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4277 	} else {
4278 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4279 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
4280 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4281 	}
4282 
4283 	return port_number;
4284 }
4285 
4286 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4287 				       __le32 *key_x, __le32 *key_y,
4288 				       struct hclge_fd_rule *rule)
4289 {
4290 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4291 	u8 cur_pos = 0, tuple_size, shift_bits;
4292 	int i;
4293 
4294 	for (i = 0; i < MAX_META_DATA; i++) {
4295 		tuple_size = meta_data_key_info[i].key_length;
4296 		tuple_bit = key_cfg->meta_data_active & BIT(i);
4297 
4298 		switch (tuple_bit) {
4299 		case BIT(ROCE_TYPE):
4300 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4301 			cur_pos += tuple_size;
4302 			break;
4303 		case BIT(DST_VPORT):
4304 			port_number = hclge_get_port_number(HOST_PORT, 0,
4305 							    rule->vf_id, 0);
4306 			hnae3_set_field(meta_data,
4307 					GENMASK(cur_pos + tuple_size, cur_pos),
4308 					cur_pos, port_number);
4309 			cur_pos += tuple_size;
4310 			break;
4311 		default:
4312 			break;
4313 		}
4314 	}
4315 
4316 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4317 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4318 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
4319 
4320 	*key_x = cpu_to_le32(tmp_x << shift_bits);
4321 	*key_y = cpu_to_le32(tmp_y << shift_bits);
4322 }
4323 
4324 /* A complete key is combined with meta data key and tuple key.
4325  * Meta data key is stored at the MSB region, and tuple key is stored at
4326  * the LSB region, unused bits will be filled 0.
4327  */
4328 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4329 			    struct hclge_fd_rule *rule)
4330 {
4331 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4332 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4333 	u8 *cur_key_x, *cur_key_y;
4334 	int i, ret, tuple_size;
4335 	u8 meta_data_region;
4336 
4337 	memset(key_x, 0, sizeof(key_x));
4338 	memset(key_y, 0, sizeof(key_y));
4339 	cur_key_x = key_x;
4340 	cur_key_y = key_y;
4341 
4342 	for (i = 0 ; i < MAX_TUPLE; i++) {
4343 		bool tuple_valid;
4344 		u32 check_tuple;
4345 
4346 		tuple_size = tuple_key_info[i].key_length / 8;
4347 		check_tuple = key_cfg->tuple_active & BIT(i);
4348 
4349 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4350 						     cur_key_y, rule);
4351 		if (tuple_valid) {
4352 			cur_key_x += tuple_size;
4353 			cur_key_y += tuple_size;
4354 		}
4355 	}
4356 
4357 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4358 			MAX_META_DATA_LENGTH / 8;
4359 
4360 	hclge_fd_convert_meta_data(key_cfg,
4361 				   (__le32 *)(key_x + meta_data_region),
4362 				   (__le32 *)(key_y + meta_data_region),
4363 				   rule);
4364 
4365 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4366 				   true);
4367 	if (ret) {
4368 		dev_err(&hdev->pdev->dev,
4369 			"fd key_y config fail, loc=%d, ret=%d\n",
4370 			rule->queue_id, ret);
4371 		return ret;
4372 	}
4373 
4374 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4375 				   true);
4376 	if (ret)
4377 		dev_err(&hdev->pdev->dev,
4378 			"fd key_x config fail, loc=%d, ret=%d\n",
4379 			rule->queue_id, ret);
4380 	return ret;
4381 }
4382 
4383 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4384 			       struct hclge_fd_rule *rule)
4385 {
4386 	struct hclge_fd_ad_data ad_data;
4387 
4388 	ad_data.ad_id = rule->location;
4389 
4390 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4391 		ad_data.drop_packet = true;
4392 		ad_data.forward_to_direct_queue = false;
4393 		ad_data.queue_id = 0;
4394 	} else {
4395 		ad_data.drop_packet = false;
4396 		ad_data.forward_to_direct_queue = true;
4397 		ad_data.queue_id = rule->queue_id;
4398 	}
4399 
4400 	ad_data.use_counter = false;
4401 	ad_data.counter_id = 0;
4402 
4403 	ad_data.use_next_stage = false;
4404 	ad_data.next_input_key = 0;
4405 
4406 	ad_data.write_rule_id_to_bd = true;
4407 	ad_data.rule_id = rule->location;
4408 
4409 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4410 }
4411 
4412 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4413 			       struct ethtool_rx_flow_spec *fs, u32 *unused)
4414 {
4415 	struct ethtool_tcpip4_spec *tcp_ip4_spec;
4416 	struct ethtool_usrip4_spec *usr_ip4_spec;
4417 	struct ethtool_tcpip6_spec *tcp_ip6_spec;
4418 	struct ethtool_usrip6_spec *usr_ip6_spec;
4419 	struct ethhdr *ether_spec;
4420 
4421 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4422 		return -EINVAL;
4423 
4424 	if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4425 		return -EOPNOTSUPP;
4426 
4427 	if ((fs->flow_type & FLOW_EXT) &&
4428 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4429 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4430 		return -EOPNOTSUPP;
4431 	}
4432 
4433 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4434 	case SCTP_V4_FLOW:
4435 	case TCP_V4_FLOW:
4436 	case UDP_V4_FLOW:
4437 		tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4438 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4439 
4440 		if (!tcp_ip4_spec->ip4src)
4441 			*unused |= BIT(INNER_SRC_IP);
4442 
4443 		if (!tcp_ip4_spec->ip4dst)
4444 			*unused |= BIT(INNER_DST_IP);
4445 
4446 		if (!tcp_ip4_spec->psrc)
4447 			*unused |= BIT(INNER_SRC_PORT);
4448 
4449 		if (!tcp_ip4_spec->pdst)
4450 			*unused |= BIT(INNER_DST_PORT);
4451 
4452 		if (!tcp_ip4_spec->tos)
4453 			*unused |= BIT(INNER_IP_TOS);
4454 
4455 		break;
4456 	case IP_USER_FLOW:
4457 		usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4458 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4459 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4460 
4461 		if (!usr_ip4_spec->ip4src)
4462 			*unused |= BIT(INNER_SRC_IP);
4463 
4464 		if (!usr_ip4_spec->ip4dst)
4465 			*unused |= BIT(INNER_DST_IP);
4466 
4467 		if (!usr_ip4_spec->tos)
4468 			*unused |= BIT(INNER_IP_TOS);
4469 
4470 		if (!usr_ip4_spec->proto)
4471 			*unused |= BIT(INNER_IP_PROTO);
4472 
4473 		if (usr_ip4_spec->l4_4_bytes)
4474 			return -EOPNOTSUPP;
4475 
4476 		if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4477 			return -EOPNOTSUPP;
4478 
4479 		break;
4480 	case SCTP_V6_FLOW:
4481 	case TCP_V6_FLOW:
4482 	case UDP_V6_FLOW:
4483 		tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4484 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4485 			BIT(INNER_IP_TOS);
4486 
4487 		if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4488 		    !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4489 			*unused |= BIT(INNER_SRC_IP);
4490 
4491 		if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4492 		    !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4493 			*unused |= BIT(INNER_DST_IP);
4494 
4495 		if (!tcp_ip6_spec->psrc)
4496 			*unused |= BIT(INNER_SRC_PORT);
4497 
4498 		if (!tcp_ip6_spec->pdst)
4499 			*unused |= BIT(INNER_DST_PORT);
4500 
4501 		if (tcp_ip6_spec->tclass)
4502 			return -EOPNOTSUPP;
4503 
4504 		break;
4505 	case IPV6_USER_FLOW:
4506 		usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4507 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4508 			BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4509 			BIT(INNER_DST_PORT);
4510 
4511 		if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4512 		    !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4513 			*unused |= BIT(INNER_SRC_IP);
4514 
4515 		if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4516 		    !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4517 			*unused |= BIT(INNER_DST_IP);
4518 
4519 		if (!usr_ip6_spec->l4_proto)
4520 			*unused |= BIT(INNER_IP_PROTO);
4521 
4522 		if (usr_ip6_spec->tclass)
4523 			return -EOPNOTSUPP;
4524 
4525 		if (usr_ip6_spec->l4_4_bytes)
4526 			return -EOPNOTSUPP;
4527 
4528 		break;
4529 	case ETHER_FLOW:
4530 		ether_spec = &fs->h_u.ether_spec;
4531 		*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4532 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4533 			BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4534 
4535 		if (is_zero_ether_addr(ether_spec->h_source))
4536 			*unused |= BIT(INNER_SRC_MAC);
4537 
4538 		if (is_zero_ether_addr(ether_spec->h_dest))
4539 			*unused |= BIT(INNER_DST_MAC);
4540 
4541 		if (!ether_spec->h_proto)
4542 			*unused |= BIT(INNER_ETH_TYPE);
4543 
4544 		break;
4545 	default:
4546 		return -EOPNOTSUPP;
4547 	}
4548 
4549 	if ((fs->flow_type & FLOW_EXT)) {
4550 		if (fs->h_ext.vlan_etype)
4551 			return -EOPNOTSUPP;
4552 		if (!fs->h_ext.vlan_tci)
4553 			*unused |= BIT(INNER_VLAN_TAG_FST);
4554 
4555 		if (fs->m_ext.vlan_tci) {
4556 			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4557 				return -EINVAL;
4558 		}
4559 	} else {
4560 		*unused |= BIT(INNER_VLAN_TAG_FST);
4561 	}
4562 
4563 	if (fs->flow_type & FLOW_MAC_EXT) {
4564 		if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4565 			return -EOPNOTSUPP;
4566 
4567 		if (is_zero_ether_addr(fs->h_ext.h_dest))
4568 			*unused |= BIT(INNER_DST_MAC);
4569 		else
4570 			*unused &= ~(BIT(INNER_DST_MAC));
4571 	}
4572 
4573 	return 0;
4574 }
4575 
4576 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4577 {
4578 	struct hclge_fd_rule *rule = NULL;
4579 	struct hlist_node *node2;
4580 
4581 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4582 		if (rule->location >= location)
4583 			break;
4584 	}
4585 
4586 	return  rule && rule->location == location;
4587 }
4588 
4589 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4590 				     struct hclge_fd_rule *new_rule,
4591 				     u16 location,
4592 				     bool is_add)
4593 {
4594 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
4595 	struct hlist_node *node2;
4596 
4597 	if (is_add && !new_rule)
4598 		return -EINVAL;
4599 
4600 	hlist_for_each_entry_safe(rule, node2,
4601 				  &hdev->fd_rule_list, rule_node) {
4602 		if (rule->location >= location)
4603 			break;
4604 		parent = rule;
4605 	}
4606 
4607 	if (rule && rule->location == location) {
4608 		hlist_del(&rule->rule_node);
4609 		kfree(rule);
4610 		hdev->hclge_fd_rule_num--;
4611 
4612 		if (!is_add)
4613 			return 0;
4614 
4615 	} else if (!is_add) {
4616 		dev_err(&hdev->pdev->dev,
4617 			"delete fail, rule %d is inexistent\n",
4618 			location);
4619 		return -EINVAL;
4620 	}
4621 
4622 	INIT_HLIST_NODE(&new_rule->rule_node);
4623 
4624 	if (parent)
4625 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4626 	else
4627 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4628 
4629 	hdev->hclge_fd_rule_num++;
4630 
4631 	return 0;
4632 }
4633 
4634 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4635 			      struct ethtool_rx_flow_spec *fs,
4636 			      struct hclge_fd_rule *rule)
4637 {
4638 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4639 
4640 	switch (flow_type) {
4641 	case SCTP_V4_FLOW:
4642 	case TCP_V4_FLOW:
4643 	case UDP_V4_FLOW:
4644 		rule->tuples.src_ip[3] =
4645 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4646 		rule->tuples_mask.src_ip[3] =
4647 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4648 
4649 		rule->tuples.dst_ip[3] =
4650 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4651 		rule->tuples_mask.dst_ip[3] =
4652 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4653 
4654 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4655 		rule->tuples_mask.src_port =
4656 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4657 
4658 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4659 		rule->tuples_mask.dst_port =
4660 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4661 
4662 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4663 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4664 
4665 		rule->tuples.ether_proto = ETH_P_IP;
4666 		rule->tuples_mask.ether_proto = 0xFFFF;
4667 
4668 		break;
4669 	case IP_USER_FLOW:
4670 		rule->tuples.src_ip[3] =
4671 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4672 		rule->tuples_mask.src_ip[3] =
4673 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4674 
4675 		rule->tuples.dst_ip[3] =
4676 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4677 		rule->tuples_mask.dst_ip[3] =
4678 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4679 
4680 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4681 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4682 
4683 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4684 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4685 
4686 		rule->tuples.ether_proto = ETH_P_IP;
4687 		rule->tuples_mask.ether_proto = 0xFFFF;
4688 
4689 		break;
4690 	case SCTP_V6_FLOW:
4691 	case TCP_V6_FLOW:
4692 	case UDP_V6_FLOW:
4693 		be32_to_cpu_array(rule->tuples.src_ip,
4694 				  fs->h_u.tcp_ip6_spec.ip6src, 4);
4695 		be32_to_cpu_array(rule->tuples_mask.src_ip,
4696 				  fs->m_u.tcp_ip6_spec.ip6src, 4);
4697 
4698 		be32_to_cpu_array(rule->tuples.dst_ip,
4699 				  fs->h_u.tcp_ip6_spec.ip6dst, 4);
4700 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
4701 				  fs->m_u.tcp_ip6_spec.ip6dst, 4);
4702 
4703 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4704 		rule->tuples_mask.src_port =
4705 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4706 
4707 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4708 		rule->tuples_mask.dst_port =
4709 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4710 
4711 		rule->tuples.ether_proto = ETH_P_IPV6;
4712 		rule->tuples_mask.ether_proto = 0xFFFF;
4713 
4714 		break;
4715 	case IPV6_USER_FLOW:
4716 		be32_to_cpu_array(rule->tuples.src_ip,
4717 				  fs->h_u.usr_ip6_spec.ip6src, 4);
4718 		be32_to_cpu_array(rule->tuples_mask.src_ip,
4719 				  fs->m_u.usr_ip6_spec.ip6src, 4);
4720 
4721 		be32_to_cpu_array(rule->tuples.dst_ip,
4722 				  fs->h_u.usr_ip6_spec.ip6dst, 4);
4723 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
4724 				  fs->m_u.usr_ip6_spec.ip6dst, 4);
4725 
4726 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4727 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4728 
4729 		rule->tuples.ether_proto = ETH_P_IPV6;
4730 		rule->tuples_mask.ether_proto = 0xFFFF;
4731 
4732 		break;
4733 	case ETHER_FLOW:
4734 		ether_addr_copy(rule->tuples.src_mac,
4735 				fs->h_u.ether_spec.h_source);
4736 		ether_addr_copy(rule->tuples_mask.src_mac,
4737 				fs->m_u.ether_spec.h_source);
4738 
4739 		ether_addr_copy(rule->tuples.dst_mac,
4740 				fs->h_u.ether_spec.h_dest);
4741 		ether_addr_copy(rule->tuples_mask.dst_mac,
4742 				fs->m_u.ether_spec.h_dest);
4743 
4744 		rule->tuples.ether_proto =
4745 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
4746 		rule->tuples_mask.ether_proto =
4747 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
4748 
4749 		break;
4750 	default:
4751 		return -EOPNOTSUPP;
4752 	}
4753 
4754 	switch (flow_type) {
4755 	case SCTP_V4_FLOW:
4756 	case SCTP_V6_FLOW:
4757 		rule->tuples.ip_proto = IPPROTO_SCTP;
4758 		rule->tuples_mask.ip_proto = 0xFF;
4759 		break;
4760 	case TCP_V4_FLOW:
4761 	case TCP_V6_FLOW:
4762 		rule->tuples.ip_proto = IPPROTO_TCP;
4763 		rule->tuples_mask.ip_proto = 0xFF;
4764 		break;
4765 	case UDP_V4_FLOW:
4766 	case UDP_V6_FLOW:
4767 		rule->tuples.ip_proto = IPPROTO_UDP;
4768 		rule->tuples_mask.ip_proto = 0xFF;
4769 		break;
4770 	default:
4771 		break;
4772 	}
4773 
4774 	if ((fs->flow_type & FLOW_EXT)) {
4775 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4776 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4777 	}
4778 
4779 	if (fs->flow_type & FLOW_MAC_EXT) {
4780 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4781 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4782 	}
4783 
4784 	return 0;
4785 }
4786 
4787 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4788 			      struct ethtool_rxnfc *cmd)
4789 {
4790 	struct hclge_vport *vport = hclge_get_vport(handle);
4791 	struct hclge_dev *hdev = vport->back;
4792 	u16 dst_vport_id = 0, q_index = 0;
4793 	struct ethtool_rx_flow_spec *fs;
4794 	struct hclge_fd_rule *rule;
4795 	u32 unused = 0;
4796 	u8 action;
4797 	int ret;
4798 
4799 	if (!hnae3_dev_fd_supported(hdev))
4800 		return -EOPNOTSUPP;
4801 
4802 	if (!hdev->fd_en) {
4803 		dev_warn(&hdev->pdev->dev,
4804 			 "Please enable flow director first\n");
4805 		return -EOPNOTSUPP;
4806 	}
4807 
4808 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4809 
4810 	ret = hclge_fd_check_spec(hdev, fs, &unused);
4811 	if (ret) {
4812 		dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4813 		return ret;
4814 	}
4815 
4816 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4817 		action = HCLGE_FD_ACTION_DROP_PACKET;
4818 	} else {
4819 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4820 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4821 		u16 tqps;
4822 
4823 		if (vf > hdev->num_req_vfs) {
4824 			dev_err(&hdev->pdev->dev,
4825 				"Error: vf id (%d) > max vf num (%d)\n",
4826 				vf, hdev->num_req_vfs);
4827 			return -EINVAL;
4828 		}
4829 
4830 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4831 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4832 
4833 		if (ring >= tqps) {
4834 			dev_err(&hdev->pdev->dev,
4835 				"Error: queue id (%d) > max tqp num (%d)\n",
4836 				ring, tqps - 1);
4837 			return -EINVAL;
4838 		}
4839 
4840 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4841 		q_index = ring;
4842 	}
4843 
4844 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4845 	if (!rule)
4846 		return -ENOMEM;
4847 
4848 	ret = hclge_fd_get_tuple(hdev, fs, rule);
4849 	if (ret)
4850 		goto free_rule;
4851 
4852 	rule->flow_type = fs->flow_type;
4853 
4854 	rule->location = fs->location;
4855 	rule->unused_tuple = unused;
4856 	rule->vf_id = dst_vport_id;
4857 	rule->queue_id = q_index;
4858 	rule->action = action;
4859 
4860 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4861 	if (ret)
4862 		goto free_rule;
4863 
4864 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4865 	if (ret)
4866 		goto free_rule;
4867 
4868 	ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4869 	if (ret)
4870 		goto free_rule;
4871 
4872 	return ret;
4873 
4874 free_rule:
4875 	kfree(rule);
4876 	return ret;
4877 }
4878 
4879 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4880 			      struct ethtool_rxnfc *cmd)
4881 {
4882 	struct hclge_vport *vport = hclge_get_vport(handle);
4883 	struct hclge_dev *hdev = vport->back;
4884 	struct ethtool_rx_flow_spec *fs;
4885 	int ret;
4886 
4887 	if (!hnae3_dev_fd_supported(hdev))
4888 		return -EOPNOTSUPP;
4889 
4890 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4891 
4892 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4893 		return -EINVAL;
4894 
4895 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
4896 		dev_err(&hdev->pdev->dev,
4897 			"Delete fail, rule %d is inexistent\n",
4898 			fs->location);
4899 		return -ENOENT;
4900 	}
4901 
4902 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4903 				   fs->location, NULL, false);
4904 	if (ret)
4905 		return ret;
4906 
4907 	return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4908 					 false);
4909 }
4910 
4911 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4912 				     bool clear_list)
4913 {
4914 	struct hclge_vport *vport = hclge_get_vport(handle);
4915 	struct hclge_dev *hdev = vport->back;
4916 	struct hclge_fd_rule *rule;
4917 	struct hlist_node *node;
4918 
4919 	if (!hnae3_dev_fd_supported(hdev))
4920 		return;
4921 
4922 	if (clear_list) {
4923 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4924 					  rule_node) {
4925 			hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4926 					     rule->location, NULL, false);
4927 			hlist_del(&rule->rule_node);
4928 			kfree(rule);
4929 			hdev->hclge_fd_rule_num--;
4930 		}
4931 	} else {
4932 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4933 					  rule_node)
4934 			hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4935 					     rule->location, NULL, false);
4936 	}
4937 }
4938 
4939 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4940 {
4941 	struct hclge_vport *vport = hclge_get_vport(handle);
4942 	struct hclge_dev *hdev = vport->back;
4943 	struct hclge_fd_rule *rule;
4944 	struct hlist_node *node;
4945 	int ret;
4946 
4947 	/* Return ok here, because reset error handling will check this
4948 	 * return value. If error is returned here, the reset process will
4949 	 * fail.
4950 	 */
4951 	if (!hnae3_dev_fd_supported(hdev))
4952 		return 0;
4953 
4954 	/* if fd is disabled, should not restore it when reset */
4955 	if (!hdev->fd_en)
4956 		return 0;
4957 
4958 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4959 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4960 		if (!ret)
4961 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4962 
4963 		if (ret) {
4964 			dev_warn(&hdev->pdev->dev,
4965 				 "Restore rule %d failed, remove it\n",
4966 				 rule->location);
4967 			hlist_del(&rule->rule_node);
4968 			kfree(rule);
4969 			hdev->hclge_fd_rule_num--;
4970 		}
4971 	}
4972 	return 0;
4973 }
4974 
4975 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4976 				 struct ethtool_rxnfc *cmd)
4977 {
4978 	struct hclge_vport *vport = hclge_get_vport(handle);
4979 	struct hclge_dev *hdev = vport->back;
4980 
4981 	if (!hnae3_dev_fd_supported(hdev))
4982 		return -EOPNOTSUPP;
4983 
4984 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
4985 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4986 
4987 	return 0;
4988 }
4989 
4990 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
4991 				  struct ethtool_rxnfc *cmd)
4992 {
4993 	struct hclge_vport *vport = hclge_get_vport(handle);
4994 	struct hclge_fd_rule *rule = NULL;
4995 	struct hclge_dev *hdev = vport->back;
4996 	struct ethtool_rx_flow_spec *fs;
4997 	struct hlist_node *node2;
4998 
4999 	if (!hnae3_dev_fd_supported(hdev))
5000 		return -EOPNOTSUPP;
5001 
5002 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5003 
5004 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5005 		if (rule->location >= fs->location)
5006 			break;
5007 	}
5008 
5009 	if (!rule || fs->location != rule->location)
5010 		return -ENOENT;
5011 
5012 	fs->flow_type = rule->flow_type;
5013 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5014 	case SCTP_V4_FLOW:
5015 	case TCP_V4_FLOW:
5016 	case UDP_V4_FLOW:
5017 		fs->h_u.tcp_ip4_spec.ip4src =
5018 				cpu_to_be32(rule->tuples.src_ip[3]);
5019 		fs->m_u.tcp_ip4_spec.ip4src =
5020 				rule->unused_tuple & BIT(INNER_SRC_IP) ?
5021 				0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5022 
5023 		fs->h_u.tcp_ip4_spec.ip4dst =
5024 				cpu_to_be32(rule->tuples.dst_ip[3]);
5025 		fs->m_u.tcp_ip4_spec.ip4dst =
5026 				rule->unused_tuple & BIT(INNER_DST_IP) ?
5027 				0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5028 
5029 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5030 		fs->m_u.tcp_ip4_spec.psrc =
5031 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5032 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5033 
5034 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5035 		fs->m_u.tcp_ip4_spec.pdst =
5036 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5037 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5038 
5039 		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5040 		fs->m_u.tcp_ip4_spec.tos =
5041 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5042 				0 : rule->tuples_mask.ip_tos;
5043 
5044 		break;
5045 	case IP_USER_FLOW:
5046 		fs->h_u.usr_ip4_spec.ip4src =
5047 				cpu_to_be32(rule->tuples.src_ip[3]);
5048 		fs->m_u.tcp_ip4_spec.ip4src =
5049 				rule->unused_tuple & BIT(INNER_SRC_IP) ?
5050 				0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5051 
5052 		fs->h_u.usr_ip4_spec.ip4dst =
5053 				cpu_to_be32(rule->tuples.dst_ip[3]);
5054 		fs->m_u.usr_ip4_spec.ip4dst =
5055 				rule->unused_tuple & BIT(INNER_DST_IP) ?
5056 				0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5057 
5058 		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5059 		fs->m_u.usr_ip4_spec.tos =
5060 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5061 				0 : rule->tuples_mask.ip_tos;
5062 
5063 		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5064 		fs->m_u.usr_ip4_spec.proto =
5065 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5066 				0 : rule->tuples_mask.ip_proto;
5067 
5068 		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5069 
5070 		break;
5071 	case SCTP_V6_FLOW:
5072 	case TCP_V6_FLOW:
5073 	case UDP_V6_FLOW:
5074 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5075 				  rule->tuples.src_ip, 4);
5076 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5077 			memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5078 		else
5079 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5080 					  rule->tuples_mask.src_ip, 4);
5081 
5082 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5083 				  rule->tuples.dst_ip, 4);
5084 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5085 			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5086 		else
5087 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5088 					  rule->tuples_mask.dst_ip, 4);
5089 
5090 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5091 		fs->m_u.tcp_ip6_spec.psrc =
5092 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5093 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5094 
5095 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5096 		fs->m_u.tcp_ip6_spec.pdst =
5097 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5098 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5099 
5100 		break;
5101 	case IPV6_USER_FLOW:
5102 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5103 				  rule->tuples.src_ip, 4);
5104 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5105 			memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5106 		else
5107 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5108 					  rule->tuples_mask.src_ip, 4);
5109 
5110 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5111 				  rule->tuples.dst_ip, 4);
5112 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5113 			memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5114 		else
5115 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5116 					  rule->tuples_mask.dst_ip, 4);
5117 
5118 		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5119 		fs->m_u.usr_ip6_spec.l4_proto =
5120 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5121 				0 : rule->tuples_mask.ip_proto;
5122 
5123 		break;
5124 	case ETHER_FLOW:
5125 		ether_addr_copy(fs->h_u.ether_spec.h_source,
5126 				rule->tuples.src_mac);
5127 		if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5128 			eth_zero_addr(fs->m_u.ether_spec.h_source);
5129 		else
5130 			ether_addr_copy(fs->m_u.ether_spec.h_source,
5131 					rule->tuples_mask.src_mac);
5132 
5133 		ether_addr_copy(fs->h_u.ether_spec.h_dest,
5134 				rule->tuples.dst_mac);
5135 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5136 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5137 		else
5138 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5139 					rule->tuples_mask.dst_mac);
5140 
5141 		fs->h_u.ether_spec.h_proto =
5142 				cpu_to_be16(rule->tuples.ether_proto);
5143 		fs->m_u.ether_spec.h_proto =
5144 				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5145 				0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5146 
5147 		break;
5148 	default:
5149 		return -EOPNOTSUPP;
5150 	}
5151 
5152 	if (fs->flow_type & FLOW_EXT) {
5153 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5154 		fs->m_ext.vlan_tci =
5155 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5156 				cpu_to_be16(VLAN_VID_MASK) :
5157 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
5158 	}
5159 
5160 	if (fs->flow_type & FLOW_MAC_EXT) {
5161 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5162 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5163 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5164 		else
5165 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5166 					rule->tuples_mask.dst_mac);
5167 	}
5168 
5169 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5170 		fs->ring_cookie = RX_CLS_FLOW_DISC;
5171 	} else {
5172 		u64 vf_id;
5173 
5174 		fs->ring_cookie = rule->queue_id;
5175 		vf_id = rule->vf_id;
5176 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5177 		fs->ring_cookie |= vf_id;
5178 	}
5179 
5180 	return 0;
5181 }
5182 
5183 static int hclge_get_all_rules(struct hnae3_handle *handle,
5184 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
5185 {
5186 	struct hclge_vport *vport = hclge_get_vport(handle);
5187 	struct hclge_dev *hdev = vport->back;
5188 	struct hclge_fd_rule *rule;
5189 	struct hlist_node *node2;
5190 	int cnt = 0;
5191 
5192 	if (!hnae3_dev_fd_supported(hdev))
5193 		return -EOPNOTSUPP;
5194 
5195 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5196 
5197 	hlist_for_each_entry_safe(rule, node2,
5198 				  &hdev->fd_rule_list, rule_node) {
5199 		if (cnt == cmd->rule_cnt)
5200 			return -EMSGSIZE;
5201 
5202 		rule_locs[cnt] = rule->location;
5203 		cnt++;
5204 	}
5205 
5206 	cmd->rule_cnt = cnt;
5207 
5208 	return 0;
5209 }
5210 
5211 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5212 {
5213 	struct hclge_vport *vport = hclge_get_vport(handle);
5214 	struct hclge_dev *hdev = vport->back;
5215 
5216 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5217 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5218 }
5219 
5220 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5221 {
5222 	struct hclge_vport *vport = hclge_get_vport(handle);
5223 	struct hclge_dev *hdev = vport->back;
5224 
5225 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5226 }
5227 
5228 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5229 {
5230 	struct hclge_vport *vport = hclge_get_vport(handle);
5231 	struct hclge_dev *hdev = vport->back;
5232 
5233 	return hdev->reset_count;
5234 }
5235 
5236 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5237 {
5238 	struct hclge_vport *vport = hclge_get_vport(handle);
5239 	struct hclge_dev *hdev = vport->back;
5240 
5241 	hdev->fd_en = enable;
5242 	if (!enable)
5243 		hclge_del_all_fd_entries(handle, false);
5244 	else
5245 		hclge_restore_fd_entries(handle);
5246 }
5247 
5248 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5249 {
5250 	struct hclge_desc desc;
5251 	struct hclge_config_mac_mode_cmd *req =
5252 		(struct hclge_config_mac_mode_cmd *)desc.data;
5253 	u32 loop_en = 0;
5254 	int ret;
5255 
5256 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5257 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5258 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5259 	hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5260 	hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5261 	hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5262 	hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5263 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5264 	hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5265 	hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5266 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5267 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5268 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5269 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5270 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5271 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5272 
5273 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5274 	if (ret)
5275 		dev_err(&hdev->pdev->dev,
5276 			"mac enable fail, ret =%d.\n", ret);
5277 }
5278 
5279 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5280 {
5281 	struct hclge_config_mac_mode_cmd *req;
5282 	struct hclge_desc desc;
5283 	u32 loop_en;
5284 	int ret;
5285 
5286 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5287 	/* 1 Read out the MAC mode config at first */
5288 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5289 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5290 	if (ret) {
5291 		dev_err(&hdev->pdev->dev,
5292 			"mac loopback get fail, ret =%d.\n", ret);
5293 		return ret;
5294 	}
5295 
5296 	/* 2 Then setup the loopback flag */
5297 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5298 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5299 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5300 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5301 
5302 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5303 
5304 	/* 3 Config mac work mode with loopback flag
5305 	 * and its original configure parameters
5306 	 */
5307 	hclge_cmd_reuse_desc(&desc, false);
5308 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5309 	if (ret)
5310 		dev_err(&hdev->pdev->dev,
5311 			"mac loopback set fail, ret =%d.\n", ret);
5312 	return ret;
5313 }
5314 
5315 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5316 				     enum hnae3_loop loop_mode)
5317 {
5318 #define HCLGE_SERDES_RETRY_MS	10
5319 #define HCLGE_SERDES_RETRY_NUM	100
5320 
5321 #define HCLGE_MAC_LINK_STATUS_MS   20
5322 #define HCLGE_MAC_LINK_STATUS_NUM  10
5323 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5324 #define HCLGE_MAC_LINK_STATUS_UP   1
5325 
5326 	struct hclge_serdes_lb_cmd *req;
5327 	struct hclge_desc desc;
5328 	int mac_link_ret = 0;
5329 	int ret, i = 0;
5330 	u8 loop_mode_b;
5331 
5332 	req = (struct hclge_serdes_lb_cmd *)desc.data;
5333 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5334 
5335 	switch (loop_mode) {
5336 	case HNAE3_LOOP_SERIAL_SERDES:
5337 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5338 		break;
5339 	case HNAE3_LOOP_PARALLEL_SERDES:
5340 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5341 		break;
5342 	default:
5343 		dev_err(&hdev->pdev->dev,
5344 			"unsupported serdes loopback mode %d\n", loop_mode);
5345 		return -ENOTSUPP;
5346 	}
5347 
5348 	if (en) {
5349 		req->enable = loop_mode_b;
5350 		req->mask = loop_mode_b;
5351 		mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5352 	} else {
5353 		req->mask = loop_mode_b;
5354 		mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5355 	}
5356 
5357 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5358 	if (ret) {
5359 		dev_err(&hdev->pdev->dev,
5360 			"serdes loopback set fail, ret = %d\n", ret);
5361 		return ret;
5362 	}
5363 
5364 	do {
5365 		msleep(HCLGE_SERDES_RETRY_MS);
5366 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5367 					   true);
5368 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5369 		if (ret) {
5370 			dev_err(&hdev->pdev->dev,
5371 				"serdes loopback get, ret = %d\n", ret);
5372 			return ret;
5373 		}
5374 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
5375 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5376 
5377 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5378 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5379 		return -EBUSY;
5380 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5381 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5382 		return -EIO;
5383 	}
5384 
5385 	hclge_cfg_mac_mode(hdev, en);
5386 
5387 	i = 0;
5388 	do {
5389 		/* serdes Internal loopback, independent of the network cable.*/
5390 		msleep(HCLGE_MAC_LINK_STATUS_MS);
5391 		ret = hclge_get_mac_link_status(hdev);
5392 		if (ret == mac_link_ret)
5393 			return 0;
5394 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5395 
5396 	dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5397 
5398 	return -EBUSY;
5399 }
5400 
5401 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5402 			    int stream_id, bool enable)
5403 {
5404 	struct hclge_desc desc;
5405 	struct hclge_cfg_com_tqp_queue_cmd *req =
5406 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5407 	int ret;
5408 
5409 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5410 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5411 	req->stream_id = cpu_to_le16(stream_id);
5412 	req->enable |= enable << HCLGE_TQP_ENABLE_B;
5413 
5414 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5415 	if (ret)
5416 		dev_err(&hdev->pdev->dev,
5417 			"Tqp enable fail, status =%d.\n", ret);
5418 	return ret;
5419 }
5420 
5421 static int hclge_set_loopback(struct hnae3_handle *handle,
5422 			      enum hnae3_loop loop_mode, bool en)
5423 {
5424 	struct hclge_vport *vport = hclge_get_vport(handle);
5425 	struct hnae3_knic_private_info *kinfo;
5426 	struct hclge_dev *hdev = vport->back;
5427 	int i, ret;
5428 
5429 	switch (loop_mode) {
5430 	case HNAE3_LOOP_APP:
5431 		ret = hclge_set_app_loopback(hdev, en);
5432 		break;
5433 	case HNAE3_LOOP_SERIAL_SERDES:
5434 	case HNAE3_LOOP_PARALLEL_SERDES:
5435 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5436 		break;
5437 	default:
5438 		ret = -ENOTSUPP;
5439 		dev_err(&hdev->pdev->dev,
5440 			"loop_mode %d is not supported\n", loop_mode);
5441 		break;
5442 	}
5443 
5444 	if (ret)
5445 		return ret;
5446 
5447 	kinfo = &vport->nic.kinfo;
5448 	for (i = 0; i < kinfo->num_tqps; i++) {
5449 		ret = hclge_tqp_enable(hdev, i, 0, en);
5450 		if (ret)
5451 			return ret;
5452 	}
5453 
5454 	return 0;
5455 }
5456 
5457 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5458 {
5459 	struct hclge_vport *vport = hclge_get_vport(handle);
5460 	struct hnae3_knic_private_info *kinfo;
5461 	struct hnae3_queue *queue;
5462 	struct hclge_tqp *tqp;
5463 	int i;
5464 
5465 	kinfo = &vport->nic.kinfo;
5466 	for (i = 0; i < kinfo->num_tqps; i++) {
5467 		queue = handle->kinfo.tqp[i];
5468 		tqp = container_of(queue, struct hclge_tqp, q);
5469 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5470 	}
5471 }
5472 
5473 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5474 {
5475 	struct hclge_vport *vport = hclge_get_vport(handle);
5476 	struct hclge_dev *hdev = vport->back;
5477 
5478 	if (enable) {
5479 		mod_timer(&hdev->service_timer, jiffies + HZ);
5480 	} else {
5481 		del_timer_sync(&hdev->service_timer);
5482 		cancel_work_sync(&hdev->service_task);
5483 		clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5484 	}
5485 }
5486 
5487 static int hclge_ae_start(struct hnae3_handle *handle)
5488 {
5489 	struct hclge_vport *vport = hclge_get_vport(handle);
5490 	struct hclge_dev *hdev = vport->back;
5491 
5492 	/* mac enable */
5493 	hclge_cfg_mac_mode(hdev, true);
5494 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5495 	hdev->hw.mac.link = 0;
5496 
5497 	/* reset tqp stats */
5498 	hclge_reset_tqp_stats(handle);
5499 
5500 	hclge_mac_start_phy(hdev);
5501 
5502 	return 0;
5503 }
5504 
5505 static void hclge_ae_stop(struct hnae3_handle *handle)
5506 {
5507 	struct hclge_vport *vport = hclge_get_vport(handle);
5508 	struct hclge_dev *hdev = vport->back;
5509 	int i;
5510 
5511 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
5512 
5513 	/* If it is not PF reset, the firmware will disable the MAC,
5514 	 * so it only need to stop phy here.
5515 	 */
5516 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5517 	    hdev->reset_type != HNAE3_FUNC_RESET) {
5518 		hclge_mac_stop_phy(hdev);
5519 		return;
5520 	}
5521 
5522 	for (i = 0; i < handle->kinfo.num_tqps; i++)
5523 		hclge_reset_tqp(handle, i);
5524 
5525 	/* Mac disable */
5526 	hclge_cfg_mac_mode(hdev, false);
5527 
5528 	hclge_mac_stop_phy(hdev);
5529 
5530 	/* reset tqp stats */
5531 	hclge_reset_tqp_stats(handle);
5532 	hclge_update_link_status(hdev);
5533 }
5534 
5535 int hclge_vport_start(struct hclge_vport *vport)
5536 {
5537 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5538 	vport->last_active_jiffies = jiffies;
5539 	return 0;
5540 }
5541 
5542 void hclge_vport_stop(struct hclge_vport *vport)
5543 {
5544 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5545 }
5546 
5547 static int hclge_client_start(struct hnae3_handle *handle)
5548 {
5549 	struct hclge_vport *vport = hclge_get_vport(handle);
5550 
5551 	return hclge_vport_start(vport);
5552 }
5553 
5554 static void hclge_client_stop(struct hnae3_handle *handle)
5555 {
5556 	struct hclge_vport *vport = hclge_get_vport(handle);
5557 
5558 	hclge_vport_stop(vport);
5559 }
5560 
5561 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5562 					 u16 cmdq_resp, u8  resp_code,
5563 					 enum hclge_mac_vlan_tbl_opcode op)
5564 {
5565 	struct hclge_dev *hdev = vport->back;
5566 	int return_status = -EIO;
5567 
5568 	if (cmdq_resp) {
5569 		dev_err(&hdev->pdev->dev,
5570 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5571 			cmdq_resp);
5572 		return -EIO;
5573 	}
5574 
5575 	if (op == HCLGE_MAC_VLAN_ADD) {
5576 		if ((!resp_code) || (resp_code == 1)) {
5577 			return_status = 0;
5578 		} else if (resp_code == 2) {
5579 			return_status = -ENOSPC;
5580 			dev_err(&hdev->pdev->dev,
5581 				"add mac addr failed for uc_overflow.\n");
5582 		} else if (resp_code == 3) {
5583 			return_status = -ENOSPC;
5584 			dev_err(&hdev->pdev->dev,
5585 				"add mac addr failed for mc_overflow.\n");
5586 		} else {
5587 			dev_err(&hdev->pdev->dev,
5588 				"add mac addr failed for undefined, code=%d.\n",
5589 				resp_code);
5590 		}
5591 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
5592 		if (!resp_code) {
5593 			return_status = 0;
5594 		} else if (resp_code == 1) {
5595 			return_status = -ENOENT;
5596 			dev_dbg(&hdev->pdev->dev,
5597 				"remove mac addr failed for miss.\n");
5598 		} else {
5599 			dev_err(&hdev->pdev->dev,
5600 				"remove mac addr failed for undefined, code=%d.\n",
5601 				resp_code);
5602 		}
5603 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
5604 		if (!resp_code) {
5605 			return_status = 0;
5606 		} else if (resp_code == 1) {
5607 			return_status = -ENOENT;
5608 			dev_dbg(&hdev->pdev->dev,
5609 				"lookup mac addr failed for miss.\n");
5610 		} else {
5611 			dev_err(&hdev->pdev->dev,
5612 				"lookup mac addr failed for undefined, code=%d.\n",
5613 				resp_code);
5614 		}
5615 	} else {
5616 		return_status = -EINVAL;
5617 		dev_err(&hdev->pdev->dev,
5618 			"unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5619 			op);
5620 	}
5621 
5622 	return return_status;
5623 }
5624 
5625 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5626 {
5627 	int word_num;
5628 	int bit_num;
5629 
5630 	if (vfid > 255 || vfid < 0)
5631 		return -EIO;
5632 
5633 	if (vfid >= 0 && vfid <= 191) {
5634 		word_num = vfid / 32;
5635 		bit_num  = vfid % 32;
5636 		if (clr)
5637 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5638 		else
5639 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5640 	} else {
5641 		word_num = (vfid - 192) / 32;
5642 		bit_num  = vfid % 32;
5643 		if (clr)
5644 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5645 		else
5646 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5647 	}
5648 
5649 	return 0;
5650 }
5651 
5652 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5653 {
5654 #define HCLGE_DESC_NUMBER 3
5655 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5656 	int i, j;
5657 
5658 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5659 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5660 			if (desc[i].data[j])
5661 				return false;
5662 
5663 	return true;
5664 }
5665 
5666 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5667 				   const u8 *addr, bool is_mc)
5668 {
5669 	const unsigned char *mac_addr = addr;
5670 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5671 		       (mac_addr[0]) | (mac_addr[1] << 8);
5672 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
5673 
5674 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5675 	if (is_mc) {
5676 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5677 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5678 	}
5679 
5680 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5681 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5682 }
5683 
5684 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5685 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
5686 {
5687 	struct hclge_dev *hdev = vport->back;
5688 	struct hclge_desc desc;
5689 	u8 resp_code;
5690 	u16 retval;
5691 	int ret;
5692 
5693 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5694 
5695 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5696 
5697 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5698 	if (ret) {
5699 		dev_err(&hdev->pdev->dev,
5700 			"del mac addr failed for cmd_send, ret =%d.\n",
5701 			ret);
5702 		return ret;
5703 	}
5704 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5705 	retval = le16_to_cpu(desc.retval);
5706 
5707 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5708 					     HCLGE_MAC_VLAN_REMOVE);
5709 }
5710 
5711 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5712 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
5713 				     struct hclge_desc *desc,
5714 				     bool is_mc)
5715 {
5716 	struct hclge_dev *hdev = vport->back;
5717 	u8 resp_code;
5718 	u16 retval;
5719 	int ret;
5720 
5721 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5722 	if (is_mc) {
5723 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5724 		memcpy(desc[0].data,
5725 		       req,
5726 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5727 		hclge_cmd_setup_basic_desc(&desc[1],
5728 					   HCLGE_OPC_MAC_VLAN_ADD,
5729 					   true);
5730 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5731 		hclge_cmd_setup_basic_desc(&desc[2],
5732 					   HCLGE_OPC_MAC_VLAN_ADD,
5733 					   true);
5734 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
5735 	} else {
5736 		memcpy(desc[0].data,
5737 		       req,
5738 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5739 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
5740 	}
5741 	if (ret) {
5742 		dev_err(&hdev->pdev->dev,
5743 			"lookup mac addr failed for cmd_send, ret =%d.\n",
5744 			ret);
5745 		return ret;
5746 	}
5747 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5748 	retval = le16_to_cpu(desc[0].retval);
5749 
5750 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5751 					     HCLGE_MAC_VLAN_LKUP);
5752 }
5753 
5754 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5755 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
5756 				  struct hclge_desc *mc_desc)
5757 {
5758 	struct hclge_dev *hdev = vport->back;
5759 	int cfg_status;
5760 	u8 resp_code;
5761 	u16 retval;
5762 	int ret;
5763 
5764 	if (!mc_desc) {
5765 		struct hclge_desc desc;
5766 
5767 		hclge_cmd_setup_basic_desc(&desc,
5768 					   HCLGE_OPC_MAC_VLAN_ADD,
5769 					   false);
5770 		memcpy(desc.data, req,
5771 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5772 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5773 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5774 		retval = le16_to_cpu(desc.retval);
5775 
5776 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5777 							   resp_code,
5778 							   HCLGE_MAC_VLAN_ADD);
5779 	} else {
5780 		hclge_cmd_reuse_desc(&mc_desc[0], false);
5781 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5782 		hclge_cmd_reuse_desc(&mc_desc[1], false);
5783 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5784 		hclge_cmd_reuse_desc(&mc_desc[2], false);
5785 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5786 		memcpy(mc_desc[0].data, req,
5787 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5788 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5789 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5790 		retval = le16_to_cpu(mc_desc[0].retval);
5791 
5792 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5793 							   resp_code,
5794 							   HCLGE_MAC_VLAN_ADD);
5795 	}
5796 
5797 	if (ret) {
5798 		dev_err(&hdev->pdev->dev,
5799 			"add mac addr failed for cmd_send, ret =%d.\n",
5800 			ret);
5801 		return ret;
5802 	}
5803 
5804 	return cfg_status;
5805 }
5806 
5807 static int hclge_init_umv_space(struct hclge_dev *hdev)
5808 {
5809 	u16 allocated_size = 0;
5810 	int ret;
5811 
5812 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5813 				  true);
5814 	if (ret)
5815 		return ret;
5816 
5817 	if (allocated_size < hdev->wanted_umv_size)
5818 		dev_warn(&hdev->pdev->dev,
5819 			 "Alloc umv space failed, want %d, get %d\n",
5820 			 hdev->wanted_umv_size, allocated_size);
5821 
5822 	mutex_init(&hdev->umv_mutex);
5823 	hdev->max_umv_size = allocated_size;
5824 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5825 	hdev->share_umv_size = hdev->priv_umv_size +
5826 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
5827 
5828 	return 0;
5829 }
5830 
5831 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5832 {
5833 	int ret;
5834 
5835 	if (hdev->max_umv_size > 0) {
5836 		ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5837 					  false);
5838 		if (ret)
5839 			return ret;
5840 		hdev->max_umv_size = 0;
5841 	}
5842 	mutex_destroy(&hdev->umv_mutex);
5843 
5844 	return 0;
5845 }
5846 
5847 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5848 			       u16 *allocated_size, bool is_alloc)
5849 {
5850 	struct hclge_umv_spc_alc_cmd *req;
5851 	struct hclge_desc desc;
5852 	int ret;
5853 
5854 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5855 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5856 	hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5857 	req->space_size = cpu_to_le32(space_size);
5858 
5859 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5860 	if (ret) {
5861 		dev_err(&hdev->pdev->dev,
5862 			"%s umv space failed for cmd_send, ret =%d\n",
5863 			is_alloc ? "allocate" : "free", ret);
5864 		return ret;
5865 	}
5866 
5867 	if (is_alloc && allocated_size)
5868 		*allocated_size = le32_to_cpu(desc.data[1]);
5869 
5870 	return 0;
5871 }
5872 
5873 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5874 {
5875 	struct hclge_vport *vport;
5876 	int i;
5877 
5878 	for (i = 0; i < hdev->num_alloc_vport; i++) {
5879 		vport = &hdev->vport[i];
5880 		vport->used_umv_num = 0;
5881 	}
5882 
5883 	mutex_lock(&hdev->umv_mutex);
5884 	hdev->share_umv_size = hdev->priv_umv_size +
5885 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
5886 	mutex_unlock(&hdev->umv_mutex);
5887 }
5888 
5889 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5890 {
5891 	struct hclge_dev *hdev = vport->back;
5892 	bool is_full;
5893 
5894 	mutex_lock(&hdev->umv_mutex);
5895 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5896 		   hdev->share_umv_size == 0);
5897 	mutex_unlock(&hdev->umv_mutex);
5898 
5899 	return is_full;
5900 }
5901 
5902 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5903 {
5904 	struct hclge_dev *hdev = vport->back;
5905 
5906 	mutex_lock(&hdev->umv_mutex);
5907 	if (is_free) {
5908 		if (vport->used_umv_num > hdev->priv_umv_size)
5909 			hdev->share_umv_size++;
5910 
5911 		if (vport->used_umv_num > 0)
5912 			vport->used_umv_num--;
5913 	} else {
5914 		if (vport->used_umv_num >= hdev->priv_umv_size &&
5915 		    hdev->share_umv_size > 0)
5916 			hdev->share_umv_size--;
5917 		vport->used_umv_num++;
5918 	}
5919 	mutex_unlock(&hdev->umv_mutex);
5920 }
5921 
5922 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5923 			     const unsigned char *addr)
5924 {
5925 	struct hclge_vport *vport = hclge_get_vport(handle);
5926 
5927 	return hclge_add_uc_addr_common(vport, addr);
5928 }
5929 
5930 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5931 			     const unsigned char *addr)
5932 {
5933 	struct hclge_dev *hdev = vport->back;
5934 	struct hclge_mac_vlan_tbl_entry_cmd req;
5935 	struct hclge_desc desc;
5936 	u16 egress_port = 0;
5937 	int ret;
5938 
5939 	/* mac addr check */
5940 	if (is_zero_ether_addr(addr) ||
5941 	    is_broadcast_ether_addr(addr) ||
5942 	    is_multicast_ether_addr(addr)) {
5943 		dev_err(&hdev->pdev->dev,
5944 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5945 			 addr,
5946 			 is_zero_ether_addr(addr),
5947 			 is_broadcast_ether_addr(addr),
5948 			 is_multicast_ether_addr(addr));
5949 		return -EINVAL;
5950 	}
5951 
5952 	memset(&req, 0, sizeof(req));
5953 
5954 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5955 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5956 
5957 	req.egress_port = cpu_to_le16(egress_port);
5958 
5959 	hclge_prepare_mac_addr(&req, addr, false);
5960 
5961 	/* Lookup the mac address in the mac_vlan table, and add
5962 	 * it if the entry is inexistent. Repeated unicast entry
5963 	 * is not allowed in the mac vlan table.
5964 	 */
5965 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5966 	if (ret == -ENOENT) {
5967 		if (!hclge_is_umv_space_full(vport)) {
5968 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5969 			if (!ret)
5970 				hclge_update_umv_space(vport, false);
5971 			return ret;
5972 		}
5973 
5974 		dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5975 			hdev->priv_umv_size);
5976 
5977 		return -ENOSPC;
5978 	}
5979 
5980 	/* check if we just hit the duplicate */
5981 	if (!ret) {
5982 		dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
5983 			 vport->vport_id, addr);
5984 		return 0;
5985 	}
5986 
5987 	dev_err(&hdev->pdev->dev,
5988 		"PF failed to add unicast entry(%pM) in the MAC table\n",
5989 		addr);
5990 
5991 	return ret;
5992 }
5993 
5994 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5995 			    const unsigned char *addr)
5996 {
5997 	struct hclge_vport *vport = hclge_get_vport(handle);
5998 
5999 	return hclge_rm_uc_addr_common(vport, addr);
6000 }
6001 
6002 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6003 			    const unsigned char *addr)
6004 {
6005 	struct hclge_dev *hdev = vport->back;
6006 	struct hclge_mac_vlan_tbl_entry_cmd req;
6007 	int ret;
6008 
6009 	/* mac addr check */
6010 	if (is_zero_ether_addr(addr) ||
6011 	    is_broadcast_ether_addr(addr) ||
6012 	    is_multicast_ether_addr(addr)) {
6013 		dev_dbg(&hdev->pdev->dev,
6014 			"Remove mac err! invalid mac:%pM.\n",
6015 			 addr);
6016 		return -EINVAL;
6017 	}
6018 
6019 	memset(&req, 0, sizeof(req));
6020 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6021 	hclge_prepare_mac_addr(&req, addr, false);
6022 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
6023 	if (!ret)
6024 		hclge_update_umv_space(vport, true);
6025 
6026 	return ret;
6027 }
6028 
6029 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6030 			     const unsigned char *addr)
6031 {
6032 	struct hclge_vport *vport = hclge_get_vport(handle);
6033 
6034 	return hclge_add_mc_addr_common(vport, addr);
6035 }
6036 
6037 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6038 			     const unsigned char *addr)
6039 {
6040 	struct hclge_dev *hdev = vport->back;
6041 	struct hclge_mac_vlan_tbl_entry_cmd req;
6042 	struct hclge_desc desc[3];
6043 	int status;
6044 
6045 	/* mac addr check */
6046 	if (!is_multicast_ether_addr(addr)) {
6047 		dev_err(&hdev->pdev->dev,
6048 			"Add mc mac err! invalid mac:%pM.\n",
6049 			 addr);
6050 		return -EINVAL;
6051 	}
6052 	memset(&req, 0, sizeof(req));
6053 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6054 	hclge_prepare_mac_addr(&req, addr, true);
6055 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6056 	if (!status) {
6057 		/* This mac addr exist, update VFID for it */
6058 		hclge_update_desc_vfid(desc, vport->vport_id, false);
6059 		status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6060 	} else {
6061 		/* This mac addr do not exist, add new entry for it */
6062 		memset(desc[0].data, 0, sizeof(desc[0].data));
6063 		memset(desc[1].data, 0, sizeof(desc[0].data));
6064 		memset(desc[2].data, 0, sizeof(desc[0].data));
6065 		hclge_update_desc_vfid(desc, vport->vport_id, false);
6066 		status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6067 	}
6068 
6069 	if (status == -ENOSPC)
6070 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6071 
6072 	return status;
6073 }
6074 
6075 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6076 			    const unsigned char *addr)
6077 {
6078 	struct hclge_vport *vport = hclge_get_vport(handle);
6079 
6080 	return hclge_rm_mc_addr_common(vport, addr);
6081 }
6082 
6083 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6084 			    const unsigned char *addr)
6085 {
6086 	struct hclge_dev *hdev = vport->back;
6087 	struct hclge_mac_vlan_tbl_entry_cmd req;
6088 	enum hclge_cmd_status status;
6089 	struct hclge_desc desc[3];
6090 
6091 	/* mac addr check */
6092 	if (!is_multicast_ether_addr(addr)) {
6093 		dev_dbg(&hdev->pdev->dev,
6094 			"Remove mc mac err! invalid mac:%pM.\n",
6095 			 addr);
6096 		return -EINVAL;
6097 	}
6098 
6099 	memset(&req, 0, sizeof(req));
6100 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6101 	hclge_prepare_mac_addr(&req, addr, true);
6102 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6103 	if (!status) {
6104 		/* This mac addr exist, remove this handle's VFID for it */
6105 		hclge_update_desc_vfid(desc, vport->vport_id, true);
6106 
6107 		if (hclge_is_all_function_id_zero(desc))
6108 			/* All the vfid is zero, so need to delete this entry */
6109 			status = hclge_remove_mac_vlan_tbl(vport, &req);
6110 		else
6111 			/* Not all the vfid is zero, update the vfid */
6112 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6113 
6114 	} else {
6115 		/* Maybe this mac address is in mta table, but it cannot be
6116 		 * deleted here because an entry of mta represents an address
6117 		 * range rather than a specific address. the delete action to
6118 		 * all entries will take effect in update_mta_status called by
6119 		 * hns3_nic_set_rx_mode.
6120 		 */
6121 		status = 0;
6122 	}
6123 
6124 	return status;
6125 }
6126 
6127 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6128 			       enum HCLGE_MAC_ADDR_TYPE mac_type)
6129 {
6130 	struct hclge_vport_mac_addr_cfg *mac_cfg;
6131 	struct list_head *list;
6132 
6133 	if (!vport->vport_id)
6134 		return;
6135 
6136 	mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6137 	if (!mac_cfg)
6138 		return;
6139 
6140 	mac_cfg->hd_tbl_status = true;
6141 	memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6142 
6143 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6144 	       &vport->uc_mac_list : &vport->mc_mac_list;
6145 
6146 	list_add_tail(&mac_cfg->node, list);
6147 }
6148 
6149 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6150 			      bool is_write_tbl,
6151 			      enum HCLGE_MAC_ADDR_TYPE mac_type)
6152 {
6153 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6154 	struct list_head *list;
6155 	bool uc_flag, mc_flag;
6156 
6157 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6158 	       &vport->uc_mac_list : &vport->mc_mac_list;
6159 
6160 	uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6161 	mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6162 
6163 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6164 		if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6165 			if (uc_flag && mac_cfg->hd_tbl_status)
6166 				hclge_rm_uc_addr_common(vport, mac_addr);
6167 
6168 			if (mc_flag && mac_cfg->hd_tbl_status)
6169 				hclge_rm_mc_addr_common(vport, mac_addr);
6170 
6171 			list_del(&mac_cfg->node);
6172 			kfree(mac_cfg);
6173 			break;
6174 		}
6175 	}
6176 }
6177 
6178 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6179 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
6180 {
6181 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6182 	struct list_head *list;
6183 
6184 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6185 	       &vport->uc_mac_list : &vport->mc_mac_list;
6186 
6187 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6188 		if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6189 			hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6190 
6191 		if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6192 			hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6193 
6194 		mac_cfg->hd_tbl_status = false;
6195 		if (is_del_list) {
6196 			list_del(&mac_cfg->node);
6197 			kfree(mac_cfg);
6198 		}
6199 	}
6200 }
6201 
6202 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6203 {
6204 	struct hclge_vport_mac_addr_cfg *mac, *tmp;
6205 	struct hclge_vport *vport;
6206 	int i;
6207 
6208 	mutex_lock(&hdev->vport_cfg_mutex);
6209 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6210 		vport = &hdev->vport[i];
6211 		list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6212 			list_del(&mac->node);
6213 			kfree(mac);
6214 		}
6215 
6216 		list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6217 			list_del(&mac->node);
6218 			kfree(mac);
6219 		}
6220 	}
6221 	mutex_unlock(&hdev->vport_cfg_mutex);
6222 }
6223 
6224 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6225 					      u16 cmdq_resp, u8 resp_code)
6226 {
6227 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
6228 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
6229 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
6230 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
6231 
6232 	int return_status;
6233 
6234 	if (cmdq_resp) {
6235 		dev_err(&hdev->pdev->dev,
6236 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6237 			cmdq_resp);
6238 		return -EIO;
6239 	}
6240 
6241 	switch (resp_code) {
6242 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
6243 	case HCLGE_ETHERTYPE_ALREADY_ADD:
6244 		return_status = 0;
6245 		break;
6246 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6247 		dev_err(&hdev->pdev->dev,
6248 			"add mac ethertype failed for manager table overflow.\n");
6249 		return_status = -EIO;
6250 		break;
6251 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
6252 		dev_err(&hdev->pdev->dev,
6253 			"add mac ethertype failed for key conflict.\n");
6254 		return_status = -EIO;
6255 		break;
6256 	default:
6257 		dev_err(&hdev->pdev->dev,
6258 			"add mac ethertype failed for undefined, code=%d.\n",
6259 			resp_code);
6260 		return_status = -EIO;
6261 	}
6262 
6263 	return return_status;
6264 }
6265 
6266 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6267 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
6268 {
6269 	struct hclge_desc desc;
6270 	u8 resp_code;
6271 	u16 retval;
6272 	int ret;
6273 
6274 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6275 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6276 
6277 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6278 	if (ret) {
6279 		dev_err(&hdev->pdev->dev,
6280 			"add mac ethertype failed for cmd_send, ret =%d.\n",
6281 			ret);
6282 		return ret;
6283 	}
6284 
6285 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6286 	retval = le16_to_cpu(desc.retval);
6287 
6288 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6289 }
6290 
6291 static int init_mgr_tbl(struct hclge_dev *hdev)
6292 {
6293 	int ret;
6294 	int i;
6295 
6296 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6297 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6298 		if (ret) {
6299 			dev_err(&hdev->pdev->dev,
6300 				"add mac ethertype failed, ret =%d.\n",
6301 				ret);
6302 			return ret;
6303 		}
6304 	}
6305 
6306 	return 0;
6307 }
6308 
6309 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6310 {
6311 	struct hclge_vport *vport = hclge_get_vport(handle);
6312 	struct hclge_dev *hdev = vport->back;
6313 
6314 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
6315 }
6316 
6317 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6318 			      bool is_first)
6319 {
6320 	const unsigned char *new_addr = (const unsigned char *)p;
6321 	struct hclge_vport *vport = hclge_get_vport(handle);
6322 	struct hclge_dev *hdev = vport->back;
6323 	int ret;
6324 
6325 	/* mac addr check */
6326 	if (is_zero_ether_addr(new_addr) ||
6327 	    is_broadcast_ether_addr(new_addr) ||
6328 	    is_multicast_ether_addr(new_addr)) {
6329 		dev_err(&hdev->pdev->dev,
6330 			"Change uc mac err! invalid mac:%p.\n",
6331 			 new_addr);
6332 		return -EINVAL;
6333 	}
6334 
6335 	if ((!is_first || is_kdump_kernel()) &&
6336 	    hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6337 		dev_warn(&hdev->pdev->dev,
6338 			 "remove old uc mac address fail.\n");
6339 
6340 	ret = hclge_add_uc_addr(handle, new_addr);
6341 	if (ret) {
6342 		dev_err(&hdev->pdev->dev,
6343 			"add uc mac address fail, ret =%d.\n",
6344 			ret);
6345 
6346 		if (!is_first &&
6347 		    hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6348 			dev_err(&hdev->pdev->dev,
6349 				"restore uc mac address fail.\n");
6350 
6351 		return -EIO;
6352 	}
6353 
6354 	ret = hclge_pause_addr_cfg(hdev, new_addr);
6355 	if (ret) {
6356 		dev_err(&hdev->pdev->dev,
6357 			"configure mac pause address fail, ret =%d.\n",
6358 			ret);
6359 		return -EIO;
6360 	}
6361 
6362 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6363 
6364 	return 0;
6365 }
6366 
6367 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6368 			  int cmd)
6369 {
6370 	struct hclge_vport *vport = hclge_get_vport(handle);
6371 	struct hclge_dev *hdev = vport->back;
6372 
6373 	if (!hdev->hw.mac.phydev)
6374 		return -EOPNOTSUPP;
6375 
6376 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6377 }
6378 
6379 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6380 				      u8 fe_type, bool filter_en, u8 vf_id)
6381 {
6382 	struct hclge_vlan_filter_ctrl_cmd *req;
6383 	struct hclge_desc desc;
6384 	int ret;
6385 
6386 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6387 
6388 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6389 	req->vlan_type = vlan_type;
6390 	req->vlan_fe = filter_en ? fe_type : 0;
6391 	req->vf_id = vf_id;
6392 
6393 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6394 	if (ret)
6395 		dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6396 			ret);
6397 
6398 	return ret;
6399 }
6400 
6401 #define HCLGE_FILTER_TYPE_VF		0
6402 #define HCLGE_FILTER_TYPE_PORT		1
6403 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
6404 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
6405 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
6406 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
6407 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
6408 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
6409 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
6410 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
6411 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
6412 
6413 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6414 {
6415 	struct hclge_vport *vport = hclge_get_vport(handle);
6416 	struct hclge_dev *hdev = vport->back;
6417 
6418 	if (hdev->pdev->revision >= 0x21) {
6419 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6420 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
6421 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6422 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
6423 	} else {
6424 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6425 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
6426 					   0);
6427 	}
6428 	if (enable)
6429 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
6430 	else
6431 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6432 }
6433 
6434 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6435 				    bool is_kill, u16 vlan, u8 qos,
6436 				    __be16 proto)
6437 {
6438 #define HCLGE_MAX_VF_BYTES  16
6439 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
6440 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
6441 	struct hclge_desc desc[2];
6442 	u8 vf_byte_val;
6443 	u8 vf_byte_off;
6444 	int ret;
6445 
6446 	hclge_cmd_setup_basic_desc(&desc[0],
6447 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6448 	hclge_cmd_setup_basic_desc(&desc[1],
6449 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6450 
6451 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6452 
6453 	vf_byte_off = vfid / 8;
6454 	vf_byte_val = 1 << (vfid % 8);
6455 
6456 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6457 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6458 
6459 	req0->vlan_id  = cpu_to_le16(vlan);
6460 	req0->vlan_cfg = is_kill;
6461 
6462 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6463 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6464 	else
6465 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6466 
6467 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
6468 	if (ret) {
6469 		dev_err(&hdev->pdev->dev,
6470 			"Send vf vlan command fail, ret =%d.\n",
6471 			ret);
6472 		return ret;
6473 	}
6474 
6475 	if (!is_kill) {
6476 #define HCLGE_VF_VLAN_NO_ENTRY	2
6477 		if (!req0->resp_code || req0->resp_code == 1)
6478 			return 0;
6479 
6480 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6481 			dev_warn(&hdev->pdev->dev,
6482 				 "vf vlan table is full, vf vlan filter is disabled\n");
6483 			return 0;
6484 		}
6485 
6486 		dev_err(&hdev->pdev->dev,
6487 			"Add vf vlan filter fail, ret =%d.\n",
6488 			req0->resp_code);
6489 	} else {
6490 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
6491 		if (!req0->resp_code)
6492 			return 0;
6493 
6494 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6495 			dev_warn(&hdev->pdev->dev,
6496 				 "vlan %d filter is not in vf vlan table\n",
6497 				 vlan);
6498 			return 0;
6499 		}
6500 
6501 		dev_err(&hdev->pdev->dev,
6502 			"Kill vf vlan filter fail, ret =%d.\n",
6503 			req0->resp_code);
6504 	}
6505 
6506 	return -EIO;
6507 }
6508 
6509 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6510 				      u16 vlan_id, bool is_kill)
6511 {
6512 	struct hclge_vlan_filter_pf_cfg_cmd *req;
6513 	struct hclge_desc desc;
6514 	u8 vlan_offset_byte_val;
6515 	u8 vlan_offset_byte;
6516 	u8 vlan_offset_160;
6517 	int ret;
6518 
6519 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6520 
6521 	vlan_offset_160 = vlan_id / 160;
6522 	vlan_offset_byte = (vlan_id % 160) / 8;
6523 	vlan_offset_byte_val = 1 << (vlan_id % 8);
6524 
6525 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6526 	req->vlan_offset = vlan_offset_160;
6527 	req->vlan_cfg = is_kill;
6528 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6529 
6530 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6531 	if (ret)
6532 		dev_err(&hdev->pdev->dev,
6533 			"port vlan command, send fail, ret =%d.\n", ret);
6534 	return ret;
6535 }
6536 
6537 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6538 				    u16 vport_id, u16 vlan_id, u8 qos,
6539 				    bool is_kill)
6540 {
6541 	u16 vport_idx, vport_num = 0;
6542 	int ret;
6543 
6544 	if (is_kill && !vlan_id)
6545 		return 0;
6546 
6547 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6548 				       0, proto);
6549 	if (ret) {
6550 		dev_err(&hdev->pdev->dev,
6551 			"Set %d vport vlan filter config fail, ret =%d.\n",
6552 			vport_id, ret);
6553 		return ret;
6554 	}
6555 
6556 	/* vlan 0 may be added twice when 8021q module is enabled */
6557 	if (!is_kill && !vlan_id &&
6558 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
6559 		return 0;
6560 
6561 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6562 		dev_err(&hdev->pdev->dev,
6563 			"Add port vlan failed, vport %d is already in vlan %d\n",
6564 			vport_id, vlan_id);
6565 		return -EINVAL;
6566 	}
6567 
6568 	if (is_kill &&
6569 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6570 		dev_err(&hdev->pdev->dev,
6571 			"Delete port vlan failed, vport %d is not in vlan %d\n",
6572 			vport_id, vlan_id);
6573 		return -EINVAL;
6574 	}
6575 
6576 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6577 		vport_num++;
6578 
6579 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6580 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6581 						 is_kill);
6582 
6583 	return ret;
6584 }
6585 
6586 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
6587 			  u16 vlan_id, bool is_kill)
6588 {
6589 	struct hclge_vport *vport = hclge_get_vport(handle);
6590 	struct hclge_dev *hdev = vport->back;
6591 
6592 	return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
6593 					0, is_kill);
6594 }
6595 
6596 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
6597 				    u16 vlan, u8 qos, __be16 proto)
6598 {
6599 	struct hclge_vport *vport = hclge_get_vport(handle);
6600 	struct hclge_dev *hdev = vport->back;
6601 
6602 	if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
6603 		return -EINVAL;
6604 	if (proto != htons(ETH_P_8021Q))
6605 		return -EPROTONOSUPPORT;
6606 
6607 	return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
6608 }
6609 
6610 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6611 {
6612 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6613 	struct hclge_vport_vtag_tx_cfg_cmd *req;
6614 	struct hclge_dev *hdev = vport->back;
6615 	struct hclge_desc desc;
6616 	int status;
6617 
6618 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6619 
6620 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6621 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6622 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6623 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6624 		      vcfg->accept_tag1 ? 1 : 0);
6625 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6626 		      vcfg->accept_untag1 ? 1 : 0);
6627 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6628 		      vcfg->accept_tag2 ? 1 : 0);
6629 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6630 		      vcfg->accept_untag2 ? 1 : 0);
6631 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6632 		      vcfg->insert_tag1_en ? 1 : 0);
6633 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6634 		      vcfg->insert_tag2_en ? 1 : 0);
6635 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6636 
6637 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6638 	req->vf_bitmap[req->vf_offset] =
6639 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6640 
6641 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6642 	if (status)
6643 		dev_err(&hdev->pdev->dev,
6644 			"Send port txvlan cfg command fail, ret =%d\n",
6645 			status);
6646 
6647 	return status;
6648 }
6649 
6650 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6651 {
6652 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6653 	struct hclge_vport_vtag_rx_cfg_cmd *req;
6654 	struct hclge_dev *hdev = vport->back;
6655 	struct hclge_desc desc;
6656 	int status;
6657 
6658 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6659 
6660 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6661 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6662 		      vcfg->strip_tag1_en ? 1 : 0);
6663 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6664 		      vcfg->strip_tag2_en ? 1 : 0);
6665 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6666 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
6667 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6668 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
6669 
6670 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6671 	req->vf_bitmap[req->vf_offset] =
6672 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6673 
6674 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6675 	if (status)
6676 		dev_err(&hdev->pdev->dev,
6677 			"Send port rxvlan cfg command fail, ret =%d\n",
6678 			status);
6679 
6680 	return status;
6681 }
6682 
6683 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6684 {
6685 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6686 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6687 	struct hclge_desc desc;
6688 	int status;
6689 
6690 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6691 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6692 	rx_req->ot_fst_vlan_type =
6693 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6694 	rx_req->ot_sec_vlan_type =
6695 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6696 	rx_req->in_fst_vlan_type =
6697 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6698 	rx_req->in_sec_vlan_type =
6699 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6700 
6701 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6702 	if (status) {
6703 		dev_err(&hdev->pdev->dev,
6704 			"Send rxvlan protocol type command fail, ret =%d\n",
6705 			status);
6706 		return status;
6707 	}
6708 
6709 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6710 
6711 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6712 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6713 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6714 
6715 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6716 	if (status)
6717 		dev_err(&hdev->pdev->dev,
6718 			"Send txvlan protocol type command fail, ret =%d\n",
6719 			status);
6720 
6721 	return status;
6722 }
6723 
6724 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6725 {
6726 #define HCLGE_DEF_VLAN_TYPE		0x8100
6727 
6728 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6729 	struct hclge_vport *vport;
6730 	int ret;
6731 	int i;
6732 
6733 	if (hdev->pdev->revision >= 0x21) {
6734 		/* for revision 0x21, vf vlan filter is per function */
6735 		for (i = 0; i < hdev->num_alloc_vport; i++) {
6736 			vport = &hdev->vport[i];
6737 			ret = hclge_set_vlan_filter_ctrl(hdev,
6738 							 HCLGE_FILTER_TYPE_VF,
6739 							 HCLGE_FILTER_FE_EGRESS,
6740 							 true,
6741 							 vport->vport_id);
6742 			if (ret)
6743 				return ret;
6744 		}
6745 
6746 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6747 						 HCLGE_FILTER_FE_INGRESS, true,
6748 						 0);
6749 		if (ret)
6750 			return ret;
6751 	} else {
6752 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6753 						 HCLGE_FILTER_FE_EGRESS_V1_B,
6754 						 true, 0);
6755 		if (ret)
6756 			return ret;
6757 	}
6758 
6759 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
6760 
6761 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6762 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6763 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6764 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6765 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6766 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6767 
6768 	ret = hclge_set_vlan_protocol_type(hdev);
6769 	if (ret)
6770 		return ret;
6771 
6772 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6773 		vport = &hdev->vport[i];
6774 		vport->txvlan_cfg.accept_tag1 = true;
6775 		vport->txvlan_cfg.accept_untag1 = true;
6776 
6777 		/* accept_tag2 and accept_untag2 are not supported on
6778 		 * pdev revision(0x20), new revision support them. The
6779 		 * value of this two fields will not return error when driver
6780 		 * send command to fireware in revision(0x20).
6781 		 * This two fields can not configured by user.
6782 		 */
6783 		vport->txvlan_cfg.accept_tag2 = true;
6784 		vport->txvlan_cfg.accept_untag2 = true;
6785 
6786 		vport->txvlan_cfg.insert_tag1_en = false;
6787 		vport->txvlan_cfg.insert_tag2_en = false;
6788 		vport->txvlan_cfg.default_tag1 = 0;
6789 		vport->txvlan_cfg.default_tag2 = 0;
6790 
6791 		ret = hclge_set_vlan_tx_offload_cfg(vport);
6792 		if (ret)
6793 			return ret;
6794 
6795 		vport->rxvlan_cfg.strip_tag1_en = false;
6796 		vport->rxvlan_cfg.strip_tag2_en = true;
6797 		vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6798 		vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6799 
6800 		ret = hclge_set_vlan_rx_offload_cfg(vport);
6801 		if (ret)
6802 			return ret;
6803 	}
6804 
6805 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6806 }
6807 
6808 void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id)
6809 {
6810 	struct hclge_vport_vlan_cfg *vlan;
6811 
6812 	/* vlan 0 is reserved */
6813 	if (!vlan_id)
6814 		return;
6815 
6816 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
6817 	if (!vlan)
6818 		return;
6819 
6820 	vlan->hd_tbl_status = true;
6821 	vlan->vlan_id = vlan_id;
6822 
6823 	list_add_tail(&vlan->node, &vport->vlan_list);
6824 }
6825 
6826 void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
6827 			       bool is_write_tbl)
6828 {
6829 	struct hclge_vport_vlan_cfg *vlan, *tmp;
6830 	struct hclge_dev *hdev = vport->back;
6831 
6832 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6833 		if (vlan->vlan_id == vlan_id) {
6834 			if (is_write_tbl && vlan->hd_tbl_status)
6835 				hclge_set_vlan_filter_hw(hdev,
6836 							 htons(ETH_P_8021Q),
6837 							 vport->vport_id,
6838 							 vlan_id, 0,
6839 							 true);
6840 
6841 			list_del(&vlan->node);
6842 			kfree(vlan);
6843 			break;
6844 		}
6845 	}
6846 }
6847 
6848 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
6849 {
6850 	struct hclge_vport_vlan_cfg *vlan, *tmp;
6851 	struct hclge_dev *hdev = vport->back;
6852 
6853 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6854 		if (vlan->hd_tbl_status)
6855 			hclge_set_vlan_filter_hw(hdev,
6856 						 htons(ETH_P_8021Q),
6857 						 vport->vport_id,
6858 						 vlan->vlan_id, 0,
6859 						 true);
6860 
6861 		vlan->hd_tbl_status = false;
6862 		if (is_del_list) {
6863 			list_del(&vlan->node);
6864 			kfree(vlan);
6865 		}
6866 	}
6867 }
6868 
6869 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
6870 {
6871 	struct hclge_vport_vlan_cfg *vlan, *tmp;
6872 	struct hclge_vport *vport;
6873 	int i;
6874 
6875 	mutex_lock(&hdev->vport_cfg_mutex);
6876 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6877 		vport = &hdev->vport[i];
6878 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6879 			list_del(&vlan->node);
6880 			kfree(vlan);
6881 		}
6882 	}
6883 	mutex_unlock(&hdev->vport_cfg_mutex);
6884 }
6885 
6886 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6887 {
6888 	struct hclge_vport *vport = hclge_get_vport(handle);
6889 
6890 	vport->rxvlan_cfg.strip_tag1_en = false;
6891 	vport->rxvlan_cfg.strip_tag2_en = enable;
6892 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6893 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6894 
6895 	return hclge_set_vlan_rx_offload_cfg(vport);
6896 }
6897 
6898 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
6899 {
6900 	struct hclge_config_max_frm_size_cmd *req;
6901 	struct hclge_desc desc;
6902 
6903 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
6904 
6905 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
6906 	req->max_frm_size = cpu_to_le16(new_mps);
6907 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
6908 
6909 	return hclge_cmd_send(&hdev->hw, &desc, 1);
6910 }
6911 
6912 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
6913 {
6914 	struct hclge_vport *vport = hclge_get_vport(handle);
6915 
6916 	return hclge_set_vport_mtu(vport, new_mtu);
6917 }
6918 
6919 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
6920 {
6921 	struct hclge_dev *hdev = vport->back;
6922 	int i, max_frm_size, ret = 0;
6923 
6924 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
6925 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
6926 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
6927 		return -EINVAL;
6928 
6929 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
6930 	mutex_lock(&hdev->vport_lock);
6931 	/* VF's mps must fit within hdev->mps */
6932 	if (vport->vport_id && max_frm_size > hdev->mps) {
6933 		mutex_unlock(&hdev->vport_lock);
6934 		return -EINVAL;
6935 	} else if (vport->vport_id) {
6936 		vport->mps = max_frm_size;
6937 		mutex_unlock(&hdev->vport_lock);
6938 		return 0;
6939 	}
6940 
6941 	/* PF's mps must be greater then VF's mps */
6942 	for (i = 1; i < hdev->num_alloc_vport; i++)
6943 		if (max_frm_size < hdev->vport[i].mps) {
6944 			mutex_unlock(&hdev->vport_lock);
6945 			return -EINVAL;
6946 		}
6947 
6948 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
6949 
6950 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
6951 	if (ret) {
6952 		dev_err(&hdev->pdev->dev,
6953 			"Change mtu fail, ret =%d\n", ret);
6954 		goto out;
6955 	}
6956 
6957 	hdev->mps = max_frm_size;
6958 	vport->mps = max_frm_size;
6959 
6960 	ret = hclge_buffer_alloc(hdev);
6961 	if (ret)
6962 		dev_err(&hdev->pdev->dev,
6963 			"Allocate buffer fail, ret =%d\n", ret);
6964 
6965 out:
6966 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6967 	mutex_unlock(&hdev->vport_lock);
6968 	return ret;
6969 }
6970 
6971 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
6972 				    bool enable)
6973 {
6974 	struct hclge_reset_tqp_queue_cmd *req;
6975 	struct hclge_desc desc;
6976 	int ret;
6977 
6978 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
6979 
6980 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6981 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6982 	hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
6983 
6984 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6985 	if (ret) {
6986 		dev_err(&hdev->pdev->dev,
6987 			"Send tqp reset cmd error, status =%d\n", ret);
6988 		return ret;
6989 	}
6990 
6991 	return 0;
6992 }
6993 
6994 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
6995 {
6996 	struct hclge_reset_tqp_queue_cmd *req;
6997 	struct hclge_desc desc;
6998 	int ret;
6999 
7000 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7001 
7002 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7003 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7004 
7005 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7006 	if (ret) {
7007 		dev_err(&hdev->pdev->dev,
7008 			"Get reset status error, status =%d\n", ret);
7009 		return ret;
7010 	}
7011 
7012 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7013 }
7014 
7015 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7016 {
7017 	struct hnae3_queue *queue;
7018 	struct hclge_tqp *tqp;
7019 
7020 	queue = handle->kinfo.tqp[queue_id];
7021 	tqp = container_of(queue, struct hclge_tqp, q);
7022 
7023 	return tqp->index;
7024 }
7025 
7026 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7027 {
7028 	struct hclge_vport *vport = hclge_get_vport(handle);
7029 	struct hclge_dev *hdev = vport->back;
7030 	int reset_try_times = 0;
7031 	int reset_status;
7032 	u16 queue_gid;
7033 	int ret = 0;
7034 
7035 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7036 
7037 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7038 	if (ret) {
7039 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7040 		return ret;
7041 	}
7042 
7043 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7044 	if (ret) {
7045 		dev_err(&hdev->pdev->dev,
7046 			"Send reset tqp cmd fail, ret = %d\n", ret);
7047 		return ret;
7048 	}
7049 
7050 	reset_try_times = 0;
7051 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7052 		/* Wait for tqp hw reset */
7053 		msleep(20);
7054 		reset_status = hclge_get_reset_status(hdev, queue_gid);
7055 		if (reset_status)
7056 			break;
7057 	}
7058 
7059 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7060 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7061 		return ret;
7062 	}
7063 
7064 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7065 	if (ret)
7066 		dev_err(&hdev->pdev->dev,
7067 			"Deassert the soft reset fail, ret = %d\n", ret);
7068 
7069 	return ret;
7070 }
7071 
7072 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7073 {
7074 	struct hclge_dev *hdev = vport->back;
7075 	int reset_try_times = 0;
7076 	int reset_status;
7077 	u16 queue_gid;
7078 	int ret;
7079 
7080 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7081 
7082 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7083 	if (ret) {
7084 		dev_warn(&hdev->pdev->dev,
7085 			 "Send reset tqp cmd fail, ret = %d\n", ret);
7086 		return;
7087 	}
7088 
7089 	reset_try_times = 0;
7090 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7091 		/* Wait for tqp hw reset */
7092 		msleep(20);
7093 		reset_status = hclge_get_reset_status(hdev, queue_gid);
7094 		if (reset_status)
7095 			break;
7096 	}
7097 
7098 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7099 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7100 		return;
7101 	}
7102 
7103 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7104 	if (ret)
7105 		dev_warn(&hdev->pdev->dev,
7106 			 "Deassert the soft reset fail, ret = %d\n", ret);
7107 }
7108 
7109 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7110 {
7111 	struct hclge_vport *vport = hclge_get_vport(handle);
7112 	struct hclge_dev *hdev = vport->back;
7113 
7114 	return hdev->fw_version;
7115 }
7116 
7117 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7118 {
7119 	struct phy_device *phydev = hdev->hw.mac.phydev;
7120 
7121 	if (!phydev)
7122 		return;
7123 
7124 	phy_set_asym_pause(phydev, rx_en, tx_en);
7125 }
7126 
7127 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7128 {
7129 	int ret;
7130 
7131 	if (rx_en && tx_en)
7132 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
7133 	else if (rx_en && !tx_en)
7134 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7135 	else if (!rx_en && tx_en)
7136 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7137 	else
7138 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
7139 
7140 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7141 		return 0;
7142 
7143 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7144 	if (ret) {
7145 		dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7146 			ret);
7147 		return ret;
7148 	}
7149 
7150 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7151 
7152 	return 0;
7153 }
7154 
7155 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7156 {
7157 	struct phy_device *phydev = hdev->hw.mac.phydev;
7158 	u16 remote_advertising = 0;
7159 	u16 local_advertising = 0;
7160 	u32 rx_pause, tx_pause;
7161 	u8 flowctl;
7162 
7163 	if (!phydev->link || !phydev->autoneg)
7164 		return 0;
7165 
7166 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7167 
7168 	if (phydev->pause)
7169 		remote_advertising = LPA_PAUSE_CAP;
7170 
7171 	if (phydev->asym_pause)
7172 		remote_advertising |= LPA_PAUSE_ASYM;
7173 
7174 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7175 					   remote_advertising);
7176 	tx_pause = flowctl & FLOW_CTRL_TX;
7177 	rx_pause = flowctl & FLOW_CTRL_RX;
7178 
7179 	if (phydev->duplex == HCLGE_MAC_HALF) {
7180 		tx_pause = 0;
7181 		rx_pause = 0;
7182 	}
7183 
7184 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
7185 }
7186 
7187 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
7188 				 u32 *rx_en, u32 *tx_en)
7189 {
7190 	struct hclge_vport *vport = hclge_get_vport(handle);
7191 	struct hclge_dev *hdev = vport->back;
7192 
7193 	*auto_neg = hclge_get_autoneg(handle);
7194 
7195 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7196 		*rx_en = 0;
7197 		*tx_en = 0;
7198 		return;
7199 	}
7200 
7201 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
7202 		*rx_en = 1;
7203 		*tx_en = 0;
7204 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
7205 		*tx_en = 1;
7206 		*rx_en = 0;
7207 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
7208 		*rx_en = 1;
7209 		*tx_en = 1;
7210 	} else {
7211 		*rx_en = 0;
7212 		*tx_en = 0;
7213 	}
7214 }
7215 
7216 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
7217 				u32 rx_en, u32 tx_en)
7218 {
7219 	struct hclge_vport *vport = hclge_get_vport(handle);
7220 	struct hclge_dev *hdev = vport->back;
7221 	struct phy_device *phydev = hdev->hw.mac.phydev;
7222 	u32 fc_autoneg;
7223 
7224 	fc_autoneg = hclge_get_autoneg(handle);
7225 	if (auto_neg != fc_autoneg) {
7226 		dev_info(&hdev->pdev->dev,
7227 			 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
7228 		return -EOPNOTSUPP;
7229 	}
7230 
7231 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7232 		dev_info(&hdev->pdev->dev,
7233 			 "Priority flow control enabled. Cannot set link flow control.\n");
7234 		return -EOPNOTSUPP;
7235 	}
7236 
7237 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
7238 
7239 	if (!fc_autoneg)
7240 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
7241 
7242 	/* Only support flow control negotiation for netdev with
7243 	 * phy attached for now.
7244 	 */
7245 	if (!phydev)
7246 		return -EOPNOTSUPP;
7247 
7248 	return phy_start_aneg(phydev);
7249 }
7250 
7251 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
7252 					  u8 *auto_neg, u32 *speed, u8 *duplex)
7253 {
7254 	struct hclge_vport *vport = hclge_get_vport(handle);
7255 	struct hclge_dev *hdev = vport->back;
7256 
7257 	if (speed)
7258 		*speed = hdev->hw.mac.speed;
7259 	if (duplex)
7260 		*duplex = hdev->hw.mac.duplex;
7261 	if (auto_neg)
7262 		*auto_neg = hdev->hw.mac.autoneg;
7263 }
7264 
7265 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
7266 {
7267 	struct hclge_vport *vport = hclge_get_vport(handle);
7268 	struct hclge_dev *hdev = vport->back;
7269 
7270 	if (media_type)
7271 		*media_type = hdev->hw.mac.media_type;
7272 }
7273 
7274 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
7275 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
7276 {
7277 	struct hclge_vport *vport = hclge_get_vport(handle);
7278 	struct hclge_dev *hdev = vport->back;
7279 	struct phy_device *phydev = hdev->hw.mac.phydev;
7280 	int mdix_ctrl, mdix, retval, is_resolved;
7281 
7282 	if (!phydev) {
7283 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7284 		*tp_mdix = ETH_TP_MDI_INVALID;
7285 		return;
7286 	}
7287 
7288 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
7289 
7290 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
7291 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
7292 				    HCLGE_PHY_MDIX_CTRL_S);
7293 
7294 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
7295 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
7296 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
7297 
7298 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7299 
7300 	switch (mdix_ctrl) {
7301 	case 0x0:
7302 		*tp_mdix_ctrl = ETH_TP_MDI;
7303 		break;
7304 	case 0x1:
7305 		*tp_mdix_ctrl = ETH_TP_MDI_X;
7306 		break;
7307 	case 0x3:
7308 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7309 		break;
7310 	default:
7311 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7312 		break;
7313 	}
7314 
7315 	if (!is_resolved)
7316 		*tp_mdix = ETH_TP_MDI_INVALID;
7317 	else if (mdix)
7318 		*tp_mdix = ETH_TP_MDI_X;
7319 	else
7320 		*tp_mdix = ETH_TP_MDI;
7321 }
7322 
7323 static int hclge_init_client_instance(struct hnae3_client *client,
7324 				      struct hnae3_ae_dev *ae_dev)
7325 {
7326 	struct hclge_dev *hdev = ae_dev->priv;
7327 	struct hclge_vport *vport;
7328 	int i, ret;
7329 
7330 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
7331 		vport = &hdev->vport[i];
7332 
7333 		switch (client->type) {
7334 		case HNAE3_CLIENT_KNIC:
7335 
7336 			hdev->nic_client = client;
7337 			vport->nic.client = client;
7338 			ret = client->ops->init_instance(&vport->nic);
7339 			if (ret)
7340 				goto clear_nic;
7341 
7342 			hnae3_set_client_init_flag(client, ae_dev, 1);
7343 
7344 			if (hdev->roce_client &&
7345 			    hnae3_dev_roce_supported(hdev)) {
7346 				struct hnae3_client *rc = hdev->roce_client;
7347 
7348 				ret = hclge_init_roce_base_info(vport);
7349 				if (ret)
7350 					goto clear_roce;
7351 
7352 				ret = rc->ops->init_instance(&vport->roce);
7353 				if (ret)
7354 					goto clear_roce;
7355 
7356 				hnae3_set_client_init_flag(hdev->roce_client,
7357 							   ae_dev, 1);
7358 			}
7359 
7360 			break;
7361 		case HNAE3_CLIENT_UNIC:
7362 			hdev->nic_client = client;
7363 			vport->nic.client = client;
7364 
7365 			ret = client->ops->init_instance(&vport->nic);
7366 			if (ret)
7367 				goto clear_nic;
7368 
7369 			hnae3_set_client_init_flag(client, ae_dev, 1);
7370 
7371 			break;
7372 		case HNAE3_CLIENT_ROCE:
7373 			if (hnae3_dev_roce_supported(hdev)) {
7374 				hdev->roce_client = client;
7375 				vport->roce.client = client;
7376 			}
7377 
7378 			if (hdev->roce_client && hdev->nic_client) {
7379 				ret = hclge_init_roce_base_info(vport);
7380 				if (ret)
7381 					goto clear_roce;
7382 
7383 				ret = client->ops->init_instance(&vport->roce);
7384 				if (ret)
7385 					goto clear_roce;
7386 
7387 				hnae3_set_client_init_flag(client, ae_dev, 1);
7388 			}
7389 
7390 			break;
7391 		default:
7392 			return -EINVAL;
7393 		}
7394 	}
7395 
7396 	return 0;
7397 
7398 clear_nic:
7399 	hdev->nic_client = NULL;
7400 	vport->nic.client = NULL;
7401 	return ret;
7402 clear_roce:
7403 	hdev->roce_client = NULL;
7404 	vport->roce.client = NULL;
7405 	return ret;
7406 }
7407 
7408 static void hclge_uninit_client_instance(struct hnae3_client *client,
7409 					 struct hnae3_ae_dev *ae_dev)
7410 {
7411 	struct hclge_dev *hdev = ae_dev->priv;
7412 	struct hclge_vport *vport;
7413 	int i;
7414 
7415 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7416 		vport = &hdev->vport[i];
7417 		if (hdev->roce_client) {
7418 			hdev->roce_client->ops->uninit_instance(&vport->roce,
7419 								0);
7420 			hdev->roce_client = NULL;
7421 			vport->roce.client = NULL;
7422 		}
7423 		if (client->type == HNAE3_CLIENT_ROCE)
7424 			return;
7425 		if (hdev->nic_client && client->ops->uninit_instance) {
7426 			client->ops->uninit_instance(&vport->nic, 0);
7427 			hdev->nic_client = NULL;
7428 			vport->nic.client = NULL;
7429 		}
7430 	}
7431 }
7432 
7433 static int hclge_pci_init(struct hclge_dev *hdev)
7434 {
7435 	struct pci_dev *pdev = hdev->pdev;
7436 	struct hclge_hw *hw;
7437 	int ret;
7438 
7439 	ret = pci_enable_device(pdev);
7440 	if (ret) {
7441 		dev_err(&pdev->dev, "failed to enable PCI device\n");
7442 		return ret;
7443 	}
7444 
7445 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7446 	if (ret) {
7447 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7448 		if (ret) {
7449 			dev_err(&pdev->dev,
7450 				"can't set consistent PCI DMA");
7451 			goto err_disable_device;
7452 		}
7453 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7454 	}
7455 
7456 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7457 	if (ret) {
7458 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7459 		goto err_disable_device;
7460 	}
7461 
7462 	pci_set_master(pdev);
7463 	hw = &hdev->hw;
7464 	hw->io_base = pcim_iomap(pdev, 2, 0);
7465 	if (!hw->io_base) {
7466 		dev_err(&pdev->dev, "Can't map configuration register space\n");
7467 		ret = -ENOMEM;
7468 		goto err_clr_master;
7469 	}
7470 
7471 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7472 
7473 	return 0;
7474 err_clr_master:
7475 	pci_clear_master(pdev);
7476 	pci_release_regions(pdev);
7477 err_disable_device:
7478 	pci_disable_device(pdev);
7479 
7480 	return ret;
7481 }
7482 
7483 static void hclge_pci_uninit(struct hclge_dev *hdev)
7484 {
7485 	struct pci_dev *pdev = hdev->pdev;
7486 
7487 	pcim_iounmap(pdev, hdev->hw.io_base);
7488 	pci_free_irq_vectors(pdev);
7489 	pci_clear_master(pdev);
7490 	pci_release_mem_regions(pdev);
7491 	pci_disable_device(pdev);
7492 }
7493 
7494 static void hclge_state_init(struct hclge_dev *hdev)
7495 {
7496 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7497 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7498 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7499 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7500 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7501 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7502 }
7503 
7504 static void hclge_state_uninit(struct hclge_dev *hdev)
7505 {
7506 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7507 
7508 	if (hdev->service_timer.function)
7509 		del_timer_sync(&hdev->service_timer);
7510 	if (hdev->reset_timer.function)
7511 		del_timer_sync(&hdev->reset_timer);
7512 	if (hdev->service_task.func)
7513 		cancel_work_sync(&hdev->service_task);
7514 	if (hdev->rst_service_task.func)
7515 		cancel_work_sync(&hdev->rst_service_task);
7516 	if (hdev->mbx_service_task.func)
7517 		cancel_work_sync(&hdev->mbx_service_task);
7518 }
7519 
7520 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7521 {
7522 #define HCLGE_FLR_WAIT_MS	100
7523 #define HCLGE_FLR_WAIT_CNT	50
7524 	struct hclge_dev *hdev = ae_dev->priv;
7525 	int cnt = 0;
7526 
7527 	clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7528 	clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7529 	set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7530 	hclge_reset_event(hdev->pdev, NULL);
7531 
7532 	while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7533 	       cnt++ < HCLGE_FLR_WAIT_CNT)
7534 		msleep(HCLGE_FLR_WAIT_MS);
7535 
7536 	if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7537 		dev_err(&hdev->pdev->dev,
7538 			"flr wait down timeout: %d\n", cnt);
7539 }
7540 
7541 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7542 {
7543 	struct hclge_dev *hdev = ae_dev->priv;
7544 
7545 	set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7546 }
7547 
7548 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7549 {
7550 	struct pci_dev *pdev = ae_dev->pdev;
7551 	struct hclge_dev *hdev;
7552 	int ret;
7553 
7554 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7555 	if (!hdev) {
7556 		ret = -ENOMEM;
7557 		goto out;
7558 	}
7559 
7560 	hdev->pdev = pdev;
7561 	hdev->ae_dev = ae_dev;
7562 	hdev->reset_type = HNAE3_NONE_RESET;
7563 	hdev->reset_level = HNAE3_FUNC_RESET;
7564 	ae_dev->priv = hdev;
7565 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7566 
7567 	mutex_init(&hdev->vport_lock);
7568 	mutex_init(&hdev->vport_cfg_mutex);
7569 
7570 	ret = hclge_pci_init(hdev);
7571 	if (ret) {
7572 		dev_err(&pdev->dev, "PCI init failed\n");
7573 		goto out;
7574 	}
7575 
7576 	/* Firmware command queue initialize */
7577 	ret = hclge_cmd_queue_init(hdev);
7578 	if (ret) {
7579 		dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7580 		goto err_pci_uninit;
7581 	}
7582 
7583 	/* Firmware command initialize */
7584 	ret = hclge_cmd_init(hdev);
7585 	if (ret)
7586 		goto err_cmd_uninit;
7587 
7588 	ret = hclge_get_cap(hdev);
7589 	if (ret) {
7590 		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7591 			ret);
7592 		goto err_cmd_uninit;
7593 	}
7594 
7595 	ret = hclge_configure(hdev);
7596 	if (ret) {
7597 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7598 		goto err_cmd_uninit;
7599 	}
7600 
7601 	ret = hclge_init_msi(hdev);
7602 	if (ret) {
7603 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
7604 		goto err_cmd_uninit;
7605 	}
7606 
7607 	ret = hclge_misc_irq_init(hdev);
7608 	if (ret) {
7609 		dev_err(&pdev->dev,
7610 			"Misc IRQ(vector0) init error, ret = %d.\n",
7611 			ret);
7612 		goto err_msi_uninit;
7613 	}
7614 
7615 	ret = hclge_alloc_tqps(hdev);
7616 	if (ret) {
7617 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
7618 		goto err_msi_irq_uninit;
7619 	}
7620 
7621 	ret = hclge_alloc_vport(hdev);
7622 	if (ret) {
7623 		dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
7624 		goto err_msi_irq_uninit;
7625 	}
7626 
7627 	ret = hclge_map_tqp(hdev);
7628 	if (ret) {
7629 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7630 		goto err_msi_irq_uninit;
7631 	}
7632 
7633 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7634 		ret = hclge_mac_mdio_config(hdev);
7635 		if (ret) {
7636 			dev_err(&hdev->pdev->dev,
7637 				"mdio config fail ret=%d\n", ret);
7638 			goto err_msi_irq_uninit;
7639 		}
7640 	}
7641 
7642 	ret = hclge_init_umv_space(hdev);
7643 	if (ret) {
7644 		dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
7645 		goto err_mdiobus_unreg;
7646 	}
7647 
7648 	ret = hclge_mac_init(hdev);
7649 	if (ret) {
7650 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7651 		goto err_mdiobus_unreg;
7652 	}
7653 
7654 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7655 	if (ret) {
7656 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7657 		goto err_mdiobus_unreg;
7658 	}
7659 
7660 	ret = hclge_config_gro(hdev, true);
7661 	if (ret)
7662 		goto err_mdiobus_unreg;
7663 
7664 	ret = hclge_init_vlan_config(hdev);
7665 	if (ret) {
7666 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7667 		goto err_mdiobus_unreg;
7668 	}
7669 
7670 	ret = hclge_tm_schd_init(hdev);
7671 	if (ret) {
7672 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
7673 		goto err_mdiobus_unreg;
7674 	}
7675 
7676 	hclge_rss_init_cfg(hdev);
7677 	ret = hclge_rss_init_hw(hdev);
7678 	if (ret) {
7679 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7680 		goto err_mdiobus_unreg;
7681 	}
7682 
7683 	ret = init_mgr_tbl(hdev);
7684 	if (ret) {
7685 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
7686 		goto err_mdiobus_unreg;
7687 	}
7688 
7689 	ret = hclge_init_fd_config(hdev);
7690 	if (ret) {
7691 		dev_err(&pdev->dev,
7692 			"fd table init fail, ret=%d\n", ret);
7693 		goto err_mdiobus_unreg;
7694 	}
7695 
7696 	ret = hclge_hw_error_set_state(hdev, true);
7697 	if (ret) {
7698 		dev_err(&pdev->dev,
7699 			"fail(%d) to enable hw error interrupts\n", ret);
7700 		goto err_mdiobus_unreg;
7701 	}
7702 
7703 	hclge_dcb_ops_set(hdev);
7704 
7705 	timer_setup(&hdev->service_timer, hclge_service_timer, 0);
7706 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7707 	INIT_WORK(&hdev->service_task, hclge_service_task);
7708 	INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
7709 	INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
7710 
7711 	hclge_clear_all_event_cause(hdev);
7712 
7713 	/* Enable MISC vector(vector0) */
7714 	hclge_enable_vector(&hdev->misc_vector, true);
7715 
7716 	hclge_state_init(hdev);
7717 	hdev->last_reset_time = jiffies;
7718 
7719 	pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7720 	return 0;
7721 
7722 err_mdiobus_unreg:
7723 	if (hdev->hw.mac.phydev)
7724 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
7725 err_msi_irq_uninit:
7726 	hclge_misc_irq_uninit(hdev);
7727 err_msi_uninit:
7728 	pci_free_irq_vectors(pdev);
7729 err_cmd_uninit:
7730 	hclge_cmd_uninit(hdev);
7731 err_pci_uninit:
7732 	pcim_iounmap(pdev, hdev->hw.io_base);
7733 	pci_clear_master(pdev);
7734 	pci_release_regions(pdev);
7735 	pci_disable_device(pdev);
7736 out:
7737 	return ret;
7738 }
7739 
7740 static void hclge_stats_clear(struct hclge_dev *hdev)
7741 {
7742 	memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7743 }
7744 
7745 static void hclge_reset_vport_state(struct hclge_dev *hdev)
7746 {
7747 	struct hclge_vport *vport = hdev->vport;
7748 	int i;
7749 
7750 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7751 		hclge_vport_stop(vport);
7752 		vport++;
7753 	}
7754 }
7755 
7756 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7757 {
7758 	struct hclge_dev *hdev = ae_dev->priv;
7759 	struct pci_dev *pdev = ae_dev->pdev;
7760 	int ret;
7761 
7762 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7763 
7764 	hclge_stats_clear(hdev);
7765 	memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
7766 
7767 	ret = hclge_cmd_init(hdev);
7768 	if (ret) {
7769 		dev_err(&pdev->dev, "Cmd queue init failed\n");
7770 		return ret;
7771 	}
7772 
7773 	ret = hclge_map_tqp(hdev);
7774 	if (ret) {
7775 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7776 		return ret;
7777 	}
7778 
7779 	hclge_reset_umv_space(hdev);
7780 
7781 	ret = hclge_mac_init(hdev);
7782 	if (ret) {
7783 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7784 		return ret;
7785 	}
7786 
7787 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7788 	if (ret) {
7789 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7790 		return ret;
7791 	}
7792 
7793 	ret = hclge_config_gro(hdev, true);
7794 	if (ret)
7795 		return ret;
7796 
7797 	ret = hclge_init_vlan_config(hdev);
7798 	if (ret) {
7799 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7800 		return ret;
7801 	}
7802 
7803 	ret = hclge_tm_init_hw(hdev, true);
7804 	if (ret) {
7805 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
7806 		return ret;
7807 	}
7808 
7809 	ret = hclge_rss_init_hw(hdev);
7810 	if (ret) {
7811 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7812 		return ret;
7813 	}
7814 
7815 	ret = hclge_init_fd_config(hdev);
7816 	if (ret) {
7817 		dev_err(&pdev->dev,
7818 			"fd table init fail, ret=%d\n", ret);
7819 		return ret;
7820 	}
7821 
7822 	/* Re-enable the hw error interrupts because
7823 	 * the interrupts get disabled on core/global reset.
7824 	 */
7825 	ret = hclge_hw_error_set_state(hdev, true);
7826 	if (ret) {
7827 		dev_err(&pdev->dev,
7828 			"fail(%d) to re-enable HNS hw error interrupts\n", ret);
7829 		return ret;
7830 	}
7831 
7832 	hclge_reset_vport_state(hdev);
7833 
7834 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
7835 		 HCLGE_DRIVER_NAME);
7836 
7837 	return 0;
7838 }
7839 
7840 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
7841 {
7842 	struct hclge_dev *hdev = ae_dev->priv;
7843 	struct hclge_mac *mac = &hdev->hw.mac;
7844 
7845 	hclge_state_uninit(hdev);
7846 
7847 	if (mac->phydev)
7848 		mdiobus_unregister(mac->mdio_bus);
7849 
7850 	hclge_uninit_umv_space(hdev);
7851 
7852 	/* Disable MISC vector(vector0) */
7853 	hclge_enable_vector(&hdev->misc_vector, false);
7854 	synchronize_irq(hdev->misc_vector.vector_irq);
7855 
7856 	hclge_hw_error_set_state(hdev, false);
7857 	hclge_cmd_uninit(hdev);
7858 	hclge_misc_irq_uninit(hdev);
7859 	hclge_pci_uninit(hdev);
7860 	mutex_destroy(&hdev->vport_lock);
7861 	hclge_uninit_vport_mac_table(hdev);
7862 	hclge_uninit_vport_vlan_table(hdev);
7863 	mutex_destroy(&hdev->vport_cfg_mutex);
7864 	ae_dev->priv = NULL;
7865 }
7866 
7867 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
7868 {
7869 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7870 	struct hclge_vport *vport = hclge_get_vport(handle);
7871 	struct hclge_dev *hdev = vport->back;
7872 
7873 	return min_t(u32, hdev->rss_size_max,
7874 		     vport->alloc_tqps / kinfo->num_tc);
7875 }
7876 
7877 static void hclge_get_channels(struct hnae3_handle *handle,
7878 			       struct ethtool_channels *ch)
7879 {
7880 	ch->max_combined = hclge_get_max_channels(handle);
7881 	ch->other_count = 1;
7882 	ch->max_other = 1;
7883 	ch->combined_count = handle->kinfo.rss_size;
7884 }
7885 
7886 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
7887 					u16 *alloc_tqps, u16 *max_rss_size)
7888 {
7889 	struct hclge_vport *vport = hclge_get_vport(handle);
7890 	struct hclge_dev *hdev = vport->back;
7891 
7892 	*alloc_tqps = vport->alloc_tqps;
7893 	*max_rss_size = hdev->rss_size_max;
7894 }
7895 
7896 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
7897 			      bool rxfh_configured)
7898 {
7899 	struct hclge_vport *vport = hclge_get_vport(handle);
7900 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7901 	struct hclge_dev *hdev = vport->back;
7902 	int cur_rss_size = kinfo->rss_size;
7903 	int cur_tqps = kinfo->num_tqps;
7904 	u16 tc_offset[HCLGE_MAX_TC_NUM];
7905 	u16 tc_valid[HCLGE_MAX_TC_NUM];
7906 	u16 tc_size[HCLGE_MAX_TC_NUM];
7907 	u16 roundup_size;
7908 	u32 *rss_indir;
7909 	int ret, i;
7910 
7911 	kinfo->req_rss_size = new_tqps_num;
7912 
7913 	ret = hclge_tm_vport_map_update(hdev);
7914 	if (ret) {
7915 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
7916 		return ret;
7917 	}
7918 
7919 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
7920 	roundup_size = ilog2(roundup_size);
7921 	/* Set the RSS TC mode according to the new RSS size */
7922 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
7923 		tc_valid[i] = 0;
7924 
7925 		if (!(hdev->hw_tc_map & BIT(i)))
7926 			continue;
7927 
7928 		tc_valid[i] = 1;
7929 		tc_size[i] = roundup_size;
7930 		tc_offset[i] = kinfo->rss_size * i;
7931 	}
7932 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
7933 	if (ret)
7934 		return ret;
7935 
7936 	/* RSS indirection table has been configuared by user */
7937 	if (rxfh_configured)
7938 		goto out;
7939 
7940 	/* Reinitializes the rss indirect table according to the new RSS size */
7941 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
7942 	if (!rss_indir)
7943 		return -ENOMEM;
7944 
7945 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
7946 		rss_indir[i] = i % kinfo->rss_size;
7947 
7948 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
7949 	if (ret)
7950 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
7951 			ret);
7952 
7953 	kfree(rss_indir);
7954 
7955 out:
7956 	if (!ret)
7957 		dev_info(&hdev->pdev->dev,
7958 			 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
7959 			 cur_rss_size, kinfo->rss_size,
7960 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
7961 
7962 	return ret;
7963 }
7964 
7965 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
7966 			      u32 *regs_num_64_bit)
7967 {
7968 	struct hclge_desc desc;
7969 	u32 total_num;
7970 	int ret;
7971 
7972 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
7973 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7974 	if (ret) {
7975 		dev_err(&hdev->pdev->dev,
7976 			"Query register number cmd failed, ret = %d.\n", ret);
7977 		return ret;
7978 	}
7979 
7980 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
7981 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
7982 
7983 	total_num = *regs_num_32_bit + *regs_num_64_bit;
7984 	if (!total_num)
7985 		return -EINVAL;
7986 
7987 	return 0;
7988 }
7989 
7990 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7991 				 void *data)
7992 {
7993 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
7994 
7995 	struct hclge_desc *desc;
7996 	u32 *reg_val = data;
7997 	__le32 *desc_data;
7998 	int cmd_num;
7999 	int i, k, n;
8000 	int ret;
8001 
8002 	if (regs_num == 0)
8003 		return 0;
8004 
8005 	cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8006 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8007 	if (!desc)
8008 		return -ENOMEM;
8009 
8010 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8011 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8012 	if (ret) {
8013 		dev_err(&hdev->pdev->dev,
8014 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
8015 		kfree(desc);
8016 		return ret;
8017 	}
8018 
8019 	for (i = 0; i < cmd_num; i++) {
8020 		if (i == 0) {
8021 			desc_data = (__le32 *)(&desc[i].data[0]);
8022 			n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8023 		} else {
8024 			desc_data = (__le32 *)(&desc[i]);
8025 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
8026 		}
8027 		for (k = 0; k < n; k++) {
8028 			*reg_val++ = le32_to_cpu(*desc_data++);
8029 
8030 			regs_num--;
8031 			if (!regs_num)
8032 				break;
8033 		}
8034 	}
8035 
8036 	kfree(desc);
8037 	return 0;
8038 }
8039 
8040 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8041 				 void *data)
8042 {
8043 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8044 
8045 	struct hclge_desc *desc;
8046 	u64 *reg_val = data;
8047 	__le64 *desc_data;
8048 	int cmd_num;
8049 	int i, k, n;
8050 	int ret;
8051 
8052 	if (regs_num == 0)
8053 		return 0;
8054 
8055 	cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8056 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8057 	if (!desc)
8058 		return -ENOMEM;
8059 
8060 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8061 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8062 	if (ret) {
8063 		dev_err(&hdev->pdev->dev,
8064 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
8065 		kfree(desc);
8066 		return ret;
8067 	}
8068 
8069 	for (i = 0; i < cmd_num; i++) {
8070 		if (i == 0) {
8071 			desc_data = (__le64 *)(&desc[i].data[0]);
8072 			n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8073 		} else {
8074 			desc_data = (__le64 *)(&desc[i]);
8075 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
8076 		}
8077 		for (k = 0; k < n; k++) {
8078 			*reg_val++ = le64_to_cpu(*desc_data++);
8079 
8080 			regs_num--;
8081 			if (!regs_num)
8082 				break;
8083 		}
8084 	}
8085 
8086 	kfree(desc);
8087 	return 0;
8088 }
8089 
8090 #define MAX_SEPARATE_NUM	4
8091 #define SEPARATOR_VALUE		0xFFFFFFFF
8092 #define REG_NUM_PER_LINE	4
8093 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
8094 
8095 static int hclge_get_regs_len(struct hnae3_handle *handle)
8096 {
8097 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8098 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8099 	struct hclge_vport *vport = hclge_get_vport(handle);
8100 	struct hclge_dev *hdev = vport->back;
8101 	u32 regs_num_32_bit, regs_num_64_bit;
8102 	int ret;
8103 
8104 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8105 	if (ret) {
8106 		dev_err(&hdev->pdev->dev,
8107 			"Get register number failed, ret = %d.\n", ret);
8108 		return -EOPNOTSUPP;
8109 	}
8110 
8111 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8112 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8113 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8114 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8115 
8116 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8117 		tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8118 		regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8119 }
8120 
8121 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8122 			   void *data)
8123 {
8124 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8125 	struct hclge_vport *vport = hclge_get_vport(handle);
8126 	struct hclge_dev *hdev = vport->back;
8127 	u32 regs_num_32_bit, regs_num_64_bit;
8128 	int i, j, reg_um, separator_num;
8129 	u32 *reg = data;
8130 	int ret;
8131 
8132 	*version = hdev->fw_version;
8133 
8134 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8135 	if (ret) {
8136 		dev_err(&hdev->pdev->dev,
8137 			"Get register number failed, ret = %d.\n", ret);
8138 		return;
8139 	}
8140 
8141 	/* fetching per-PF registers valus from PF PCIe register space */
8142 	reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8143 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8144 	for (i = 0; i < reg_um; i++)
8145 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8146 	for (i = 0; i < separator_num; i++)
8147 		*reg++ = SEPARATOR_VALUE;
8148 
8149 	reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
8150 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8151 	for (i = 0; i < reg_um; i++)
8152 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
8153 	for (i = 0; i < separator_num; i++)
8154 		*reg++ = SEPARATOR_VALUE;
8155 
8156 	reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
8157 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8158 	for (j = 0; j < kinfo->num_tqps; j++) {
8159 		for (i = 0; i < reg_um; i++)
8160 			*reg++ = hclge_read_dev(&hdev->hw,
8161 						ring_reg_addr_list[i] +
8162 						0x200 * j);
8163 		for (i = 0; i < separator_num; i++)
8164 			*reg++ = SEPARATOR_VALUE;
8165 	}
8166 
8167 	reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
8168 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8169 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
8170 		for (i = 0; i < reg_um; i++)
8171 			*reg++ = hclge_read_dev(&hdev->hw,
8172 						tqp_intr_reg_addr_list[i] +
8173 						4 * j);
8174 		for (i = 0; i < separator_num; i++)
8175 			*reg++ = SEPARATOR_VALUE;
8176 	}
8177 
8178 	/* fetching PF common registers values from firmware */
8179 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
8180 	if (ret) {
8181 		dev_err(&hdev->pdev->dev,
8182 			"Get 32 bit register failed, ret = %d.\n", ret);
8183 		return;
8184 	}
8185 
8186 	reg += regs_num_32_bit;
8187 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
8188 	if (ret)
8189 		dev_err(&hdev->pdev->dev,
8190 			"Get 64 bit register failed, ret = %d.\n", ret);
8191 }
8192 
8193 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
8194 {
8195 	struct hclge_set_led_state_cmd *req;
8196 	struct hclge_desc desc;
8197 	int ret;
8198 
8199 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
8200 
8201 	req = (struct hclge_set_led_state_cmd *)desc.data;
8202 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
8203 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
8204 
8205 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8206 	if (ret)
8207 		dev_err(&hdev->pdev->dev,
8208 			"Send set led state cmd error, ret =%d\n", ret);
8209 
8210 	return ret;
8211 }
8212 
8213 enum hclge_led_status {
8214 	HCLGE_LED_OFF,
8215 	HCLGE_LED_ON,
8216 	HCLGE_LED_NO_CHANGE = 0xFF,
8217 };
8218 
8219 static int hclge_set_led_id(struct hnae3_handle *handle,
8220 			    enum ethtool_phys_id_state status)
8221 {
8222 	struct hclge_vport *vport = hclge_get_vport(handle);
8223 	struct hclge_dev *hdev = vport->back;
8224 
8225 	switch (status) {
8226 	case ETHTOOL_ID_ACTIVE:
8227 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
8228 	case ETHTOOL_ID_INACTIVE:
8229 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
8230 	default:
8231 		return -EINVAL;
8232 	}
8233 }
8234 
8235 static void hclge_get_link_mode(struct hnae3_handle *handle,
8236 				unsigned long *supported,
8237 				unsigned long *advertising)
8238 {
8239 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
8240 	struct hclge_vport *vport = hclge_get_vport(handle);
8241 	struct hclge_dev *hdev = vport->back;
8242 	unsigned int idx = 0;
8243 
8244 	for (; idx < size; idx++) {
8245 		supported[idx] = hdev->hw.mac.supported[idx];
8246 		advertising[idx] = hdev->hw.mac.advertising[idx];
8247 	}
8248 }
8249 
8250 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
8251 {
8252 	struct hclge_vport *vport = hclge_get_vport(handle);
8253 	struct hclge_dev *hdev = vport->back;
8254 
8255 	return hclge_config_gro(hdev, enable);
8256 }
8257 
8258 static const struct hnae3_ae_ops hclge_ops = {
8259 	.init_ae_dev = hclge_init_ae_dev,
8260 	.uninit_ae_dev = hclge_uninit_ae_dev,
8261 	.flr_prepare = hclge_flr_prepare,
8262 	.flr_done = hclge_flr_done,
8263 	.init_client_instance = hclge_init_client_instance,
8264 	.uninit_client_instance = hclge_uninit_client_instance,
8265 	.map_ring_to_vector = hclge_map_ring_to_vector,
8266 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
8267 	.get_vector = hclge_get_vector,
8268 	.put_vector = hclge_put_vector,
8269 	.set_promisc_mode = hclge_set_promisc_mode,
8270 	.set_loopback = hclge_set_loopback,
8271 	.start = hclge_ae_start,
8272 	.stop = hclge_ae_stop,
8273 	.client_start = hclge_client_start,
8274 	.client_stop = hclge_client_stop,
8275 	.get_status = hclge_get_status,
8276 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
8277 	.update_speed_duplex_h = hclge_update_speed_duplex_h,
8278 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
8279 	.get_media_type = hclge_get_media_type,
8280 	.get_rss_key_size = hclge_get_rss_key_size,
8281 	.get_rss_indir_size = hclge_get_rss_indir_size,
8282 	.get_rss = hclge_get_rss,
8283 	.set_rss = hclge_set_rss,
8284 	.set_rss_tuple = hclge_set_rss_tuple,
8285 	.get_rss_tuple = hclge_get_rss_tuple,
8286 	.get_tc_size = hclge_get_tc_size,
8287 	.get_mac_addr = hclge_get_mac_addr,
8288 	.set_mac_addr = hclge_set_mac_addr,
8289 	.do_ioctl = hclge_do_ioctl,
8290 	.add_uc_addr = hclge_add_uc_addr,
8291 	.rm_uc_addr = hclge_rm_uc_addr,
8292 	.add_mc_addr = hclge_add_mc_addr,
8293 	.rm_mc_addr = hclge_rm_mc_addr,
8294 	.set_autoneg = hclge_set_autoneg,
8295 	.get_autoneg = hclge_get_autoneg,
8296 	.get_pauseparam = hclge_get_pauseparam,
8297 	.set_pauseparam = hclge_set_pauseparam,
8298 	.set_mtu = hclge_set_mtu,
8299 	.reset_queue = hclge_reset_tqp,
8300 	.get_stats = hclge_get_stats,
8301 	.update_stats = hclge_update_stats,
8302 	.get_strings = hclge_get_strings,
8303 	.get_sset_count = hclge_get_sset_count,
8304 	.get_fw_version = hclge_get_fw_version,
8305 	.get_mdix_mode = hclge_get_mdix_mode,
8306 	.enable_vlan_filter = hclge_enable_vlan_filter,
8307 	.set_vlan_filter = hclge_set_vlan_filter,
8308 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
8309 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
8310 	.reset_event = hclge_reset_event,
8311 	.set_default_reset_request = hclge_set_def_reset_request,
8312 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8313 	.set_channels = hclge_set_channels,
8314 	.get_channels = hclge_get_channels,
8315 	.get_regs_len = hclge_get_regs_len,
8316 	.get_regs = hclge_get_regs,
8317 	.set_led_id = hclge_set_led_id,
8318 	.get_link_mode = hclge_get_link_mode,
8319 	.add_fd_entry = hclge_add_fd_entry,
8320 	.del_fd_entry = hclge_del_fd_entry,
8321 	.del_all_fd_entries = hclge_del_all_fd_entries,
8322 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8323 	.get_fd_rule_info = hclge_get_fd_rule_info,
8324 	.get_fd_all_rules = hclge_get_all_rules,
8325 	.restore_fd_rules = hclge_restore_fd_entries,
8326 	.enable_fd = hclge_enable_fd,
8327 	.dbg_run_cmd = hclge_dbg_run_cmd,
8328 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
8329 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
8330 	.ae_dev_resetting = hclge_ae_dev_resetting,
8331 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
8332 	.set_gro_en = hclge_gro_en,
8333 	.get_global_queue_id = hclge_covert_handle_qid_global,
8334 	.set_timer_task = hclge_set_timer_task,
8335 	.mac_connect_phy = hclge_mac_connect_phy,
8336 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
8337 };
8338 
8339 static struct hnae3_ae_algo ae_algo = {
8340 	.ops = &hclge_ops,
8341 	.pdev_id_table = ae_algo_pci_tbl,
8342 };
8343 
8344 static int hclge_init(void)
8345 {
8346 	pr_info("%s is initializing\n", HCLGE_NAME);
8347 
8348 	hnae3_register_ae_algo(&ae_algo);
8349 
8350 	return 0;
8351 }
8352 
8353 static void hclge_exit(void)
8354 {
8355 	hnae3_unregister_ae_algo(&ae_algo);
8356 }
8357 module_init(hclge_init);
8358 module_exit(hclge_exit);
8359 
8360 MODULE_LICENSE("GPL");
8361 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8362 MODULE_DESCRIPTION("HCLGE Driver");
8363 MODULE_VERSION(HCLGE_MOD_VERSION);
8364